mirror of
https://github.com/typeable/bloodhound.git
synced 2024-11-25 23:46:40 +03:00
Merge pull request #216 from andrewthad/best_compression
add ability to change compression codec. This allows us to use DEFLATE instead of lz4.
This commit is contained in:
commit
ce723c2089
@ -74,6 +74,7 @@ module Database.V5.Bloodhound.Types
|
||||
, UpdatableIndexSetting(..)
|
||||
, IndexSettingsSummary(..)
|
||||
, AllocationPolicy(..)
|
||||
, Compression(..)
|
||||
, ReplicaBounds(..)
|
||||
, Bytes(..)
|
||||
, gigabytes
|
||||
@ -586,6 +587,7 @@ data UpdatableIndexSetting = NumberOfReplicas ReplicaCount
|
||||
| TTLDisablePurge Bool
|
||||
-- ^ Disables temporarily the purge of expired docs.
|
||||
| TranslogFSType FSType
|
||||
| CompressionSetting Compression
|
||||
| IndexCompoundFormat CompoundFormat
|
||||
| IndexCompoundOnFlush Bool
|
||||
| WarmerEnabled Bool
|
||||
@ -688,6 +690,26 @@ data ReplicaBounds = ReplicasBounded Int Int
|
||||
| ReplicasUnbounded
|
||||
deriving (Eq, Read, Show, Generic, Typeable)
|
||||
|
||||
data Compression
|
||||
= CompressionDefault
|
||||
-- ^ Compress with LZ4
|
||||
| CompressionBest
|
||||
-- ^ Compress with DEFLATE. Elastic
|
||||
-- <https://www.elastic.co/blog/elasticsearch-storage-the-true-story-2.0 blogs>
|
||||
-- that this can reduce disk use by 15%-25%.
|
||||
deriving (Eq,Show,Generic,Typeable)
|
||||
|
||||
instance ToJSON Compression where
|
||||
toJSON x = case x of
|
||||
CompressionDefault -> toJSON ("default" :: Text)
|
||||
CompressionBest -> toJSON ("best_compression" :: Text)
|
||||
|
||||
instance FromJSON Compression where
|
||||
parseJSON = withText "Compression" $ \t -> case t of
|
||||
"default" -> return CompressionDefault
|
||||
"best_compression" -> return CompressionBest
|
||||
_ -> fail "invalid compression codec"
|
||||
|
||||
-- | A measure of bytes used for various configurations. You may want
|
||||
-- to use smart constructors like 'gigabytes' for larger values.
|
||||
--
|
||||
@ -3079,6 +3101,7 @@ instance ToJSON UpdatableIndexSetting where
|
||||
toJSON (GCDeletes x) = oPath ("index" :| ["gc_deletes"]) (NominalDiffTimeJSON x)
|
||||
toJSON (TTLDisablePurge x) = oPath ("index" :| ["ttl", "disable_purge"]) x
|
||||
toJSON (TranslogFSType x) = oPath ("index" :| ["translog", "fs", "type"]) x
|
||||
toJSON (CompressionSetting x) = oPath ("index" :| ["codec"]) x
|
||||
toJSON (IndexCompoundFormat x) = oPath ("index" :| ["compound_format"]) x
|
||||
toJSON (IndexCompoundOnFlush x) = oPath ("index" :| ["compound_on_flush"]) x
|
||||
toJSON (WarmerEnabled x) = oPath ("index" :| ["warmer", "enabled"]) x
|
||||
@ -3112,6 +3135,7 @@ instance FromJSON UpdatableIndexSetting where
|
||||
<|> gcDeletes `taggedAt` ["index", "gc_deletes"]
|
||||
<|> ttlDisablePurge `taggedAt` ["index", "ttl", "disable_purge"]
|
||||
<|> translogFSType `taggedAt` ["index", "translog", "fs", "type"]
|
||||
<|> compressionSetting `taggedAt` ["index", "codec"]
|
||||
<|> compoundFormat `taggedAt` ["index", "compound_format"]
|
||||
<|> compoundOnFlush `taggedAt` ["index", "compound_on_flush"]
|
||||
<|> warmerEnabled `taggedAt` ["index", "warmer", "enabled"]
|
||||
@ -3146,6 +3170,7 @@ instance FromJSON UpdatableIndexSetting where
|
||||
gcDeletes = pure . GCDeletes . ndtJSON
|
||||
ttlDisablePurge = pure . TTLDisablePurge
|
||||
translogFSType = pure . TranslogFSType
|
||||
compressionSetting = pure . CompressionSetting
|
||||
compoundFormat = pure . IndexCompoundFormat
|
||||
compoundOnFlush = pure . IndexCompoundOnFlush
|
||||
warmerEnabled = pure . WarmerEnabled
|
||||
|
@ -914,6 +914,7 @@ instance Arbitrary AnalyzerDefinition where arbitrary = sopArbitrary; shrink = g
|
||||
instance Arbitrary Analysis where arbitrary = sopArbitrary; shrink = genericShrink
|
||||
instance Arbitrary Tokenizer where arbitrary = sopArbitrary; shrink = genericShrink
|
||||
instance Arbitrary UpdatableIndexSetting where arbitrary = sopArbitrary; shrink = genericShrink
|
||||
instance Arbitrary Compression where arbitrary = sopArbitrary; shrink = genericShrink
|
||||
instance Arbitrary Bytes where arbitrary = sopArbitrary; shrink = genericShrink
|
||||
instance Arbitrary AllocationPolicy where arbitrary = sopArbitrary; shrink = genericShrink
|
||||
instance Arbitrary InitialShardCount where arbitrary = sopArbitrary; shrink = genericShrink
|
||||
@ -1648,6 +1649,25 @@ main = hspec $ do
|
||||
updates
|
||||
)
|
||||
|
||||
it "accepts default compression codec" $ when' (atleast es50) $ withTestEnv $ do
|
||||
_ <- deleteExampleIndex
|
||||
let updates = [CompressionSetting CompressionDefault]
|
||||
createResp <- createIndexWith (updates ++ [NumberOfReplicas (ReplicaCount 0)]) 1 testIndex
|
||||
liftIO $ validateStatus createResp 200
|
||||
getResp <- getIndexSettings testIndex
|
||||
liftIO $ getResp `shouldBe` Right
|
||||
(IndexSettingsSummary testIndex (IndexSettings (ShardCount 1) (ReplicaCount 0)) updates)
|
||||
|
||||
it "accepts best compression codec" $ when' (atleast es50) $ withTestEnv $ do
|
||||
_ <- deleteExampleIndex
|
||||
let updates = [CompressionSetting CompressionBest]
|
||||
createResp <- createIndexWith (updates ++ [NumberOfReplicas (ReplicaCount 0)]) 1 testIndex
|
||||
liftIO $ validateStatus createResp 200
|
||||
getResp <- getIndexSettings testIndex
|
||||
liftIO $ getResp `shouldBe` Right
|
||||
(IndexSettingsSummary testIndex (IndexSettings (ShardCount 1) (ReplicaCount 0)) updates)
|
||||
|
||||
|
||||
describe "Index Optimization" $ do
|
||||
it "returns a successful response upon completion" $ withTestEnv $ do
|
||||
_ <- createExampleIndex
|
||||
|
Loading…
Reference in New Issue
Block a user