Elastic 5.0 compatibility

This commit moves bloodhound library to use elastic 5.0.
The filter type was changes to be a wrapper for queries, and some
new APIs are met.

It is *not* feture-complete, however, all tests pass.
This commit is contained in:
Josh Berman 2016-11-29 13:18:01 +02:00
parent 0027a45506
commit 437bc3d16f
6 changed files with 260 additions and 725 deletions

View File

@ -1,4 +1,4 @@
name: bloodhound
name: bloodhound5
version: 0.12.0.0
synopsis: ElasticSearch client library for Haskell
description: ElasticSearch made awesome for Haskell hackers
@ -57,7 +57,7 @@ test-suite tests
main-is: tests.hs
hs-source-dirs: tests
build-depends: base,
bloodhound,
bloodhound5,
bytestring,
http-client,
http-types,
@ -76,7 +76,7 @@ test-suite tests
errors,
exceptions,
temporary,
unix,
unix-compat,
network-uri
default-language: Haskell2010
@ -89,7 +89,7 @@ test-suite doctests
if impl(ghc >= 7.8)
build-depends: base,
aeson,
bloodhound,
bloodhound5,
directory,
doctest >= 0.10.1,
filepath

View File

@ -28,7 +28,7 @@ module Database.Bloodhound.Client
, deleteIndex
, updateIndexSettings
, getIndexSettings
, optimizeIndex
, forceMergeIndex
, indexExists
, openIndex
, closeIndex
@ -43,7 +43,6 @@ module Database.Bloodhound.Client
, deleteTemplate
-- ** Mapping
, putMapping
, deleteMapping
-- ** Documents
, indexDocument
, updateDocument
@ -133,7 +132,8 @@ import Database.Bloodhound.Types
-- >>> let testMapping = MappingName "tweet"
-- >>> let defaultIndexSettings = IndexSettings (ShardCount 1) (ReplicaCount 0)
-- >>> data TweetMapping = TweetMapping deriving (Eq, Show)
-- >>> _ <- runBH' $ deleteIndex testIndex >> deleteMapping testIndex testMapping
-- >>> _ <- runBH' $ deleteIndex testIndex
-- >>> _ <- runBH' $ deleteIndex (IndexName "didimakeanindex")
-- >>> import GHC.Generics
-- >>> import Data.Time.Calendar (Day (..))
-- >>> import Data.Time.Clock (UTCTime (..), secondsToDiffTime)
@ -224,12 +224,8 @@ appendSearchTypeParam originalUrl st = addQuery params originalUrl
where stText = "search_type"
params
| st == SearchTypeDfsQueryThenFetch = [(stText, Just "dfs_query_then_fetch")]
| st == SearchTypeCount = [(stText, Just "count")]
| st == SearchTypeScan = [(stText, Just "scan"), ("scroll", Just "1m")]
| st == SearchTypeQueryAndFetch = [(stText, Just "query_and_fetch")]
| st == SearchTypeDfsQueryAndFetch = [(stText, Just "dfs_query_and_fetch")]
-- used to catch 'SearchTypeQueryThenFetch', which is also the default
| otherwise = [(stText, Just "query_then_fetch")]
| otherwise = []
-- | Severely dumbed down query renderer. Assumes your data doesn't
-- need any encoding
@ -273,8 +269,8 @@ post = dispatch NHTM.methodPost
-- | 'getStatus' fetches the 'Status' of a 'Server'
--
-- >>> serverStatus <- runBH' getStatus
-- >>> fmap status (serverStatus)
-- Just 200
-- >>> fmap tagline (serverStatus)
-- Just "You Know, for Search"
getStatus :: MonadBH m => m (Maybe Status)
getStatus = do
response <- get =<< url
@ -513,7 +509,7 @@ createIndex indexSettings (IndexName indexName) =
-- >>> response <- runBH' $ deleteIndex (IndexName "didimakeanindex")
-- >>> respIsTwoHunna response
-- True
-- >>> runBH' $ indexExists testIndex
-- >>> runBH' $ indexExists (IndexName "didimakeanindex")
-- False
deleteIndex :: MonadBH m => IndexName -> m Reply
deleteIndex (IndexName indexName) =
@ -540,33 +536,36 @@ getIndexSettings (IndexName indexName) = do
where url = joinPath [indexName, "_settings"]
-- | 'optimizeIndex' will optimize a single index, list of indexes or
-- all indexes. Note that this call will block until finishing but
-- will continue even if the request times out. Concurrent requests to
-- optimize an index while another is performing will block until the
-- previous one finishes. For more information see
-- <https://www.elastic.co/guide/en/elasticsearch/reference/1.7/indices-optimize.html>. Nothing
-- | 'forceMergeIndex'
--
-- The force merge API allows to force merging of one or more indices through
-- an API. The merge relates to the number of segments a Lucene index holds
-- within each shard. The force merge operation allows to reduce the number of
-- segments by merging them.
--
-- This call will block until the merge is complete. If the http connection is
-- lost, the request will continue in the background, and any new requests will
-- block until the previous force merge is complete.
-- For more information see
-- <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html#indices-forcemerge>.
-- Nothing
-- worthwhile comes back in the reply body, so matching on the status
-- should suffice.
--
-- 'optimizeIndex' with a maxNumSegments of 1 and onlyExpungeDeletes
-- 'forceMergeIndex' with a maxNumSegments of 1 and onlyExpungeDeletes
-- to True is the main way to release disk space back to the OS being
-- held by deleted documents.
--
-- Note that this API was deprecated in ElasticSearch 2.1 for the
-- almost completely identical forcemerge API. Adding support to that
-- API would be trivial but due to the significant breaking changes,
-- this library cannot currently be used with >= 2.0, so that feature was omitted.
--
-- >>> let ixn = IndexName "unoptimizedindex"
-- >>> _ <- runBH' $ deleteIndex ixn >> createIndex defaultIndexSettings ixn
-- >>> response <- runBH' $ optimizeIndex (IndexList (ixn :| [])) (defaultIndexOptimizationSettings { maxNumSegments = Just 1, onlyExpungeDeletes = True })
-- >>> response <- runBH' $ forceMergeIndex (IndexList (ixn :| [])) (defaultIndexOptimizationSettings { maxNumSegments = Just 1, onlyExpungeDeletes = True })
-- >>> respIsTwoHunna response
-- True
optimizeIndex :: MonadBH m => IndexSelection -> IndexOptimizationSettings -> m Reply
optimizeIndex ixs IndexOptimizationSettings {..} =
forceMergeIndex :: MonadBH m => IndexSelection -> ForceMergeIndexSettings -> m Reply
forceMergeIndex ixs ForceMergeIndexSettings {..} =
bindM2 post url (return body)
where url = addQuery params <$> joinPath [indexName, "_optimize"]
where url = addQuery params <$> joinPath [indexName, "_forcemerge"]
params = catMaybes [ ("max_num_segments",) . Just . showText <$> maxNumSegments
, Just ("only_expunge_deletes", Just (boolQP onlyExpungeDeletes))
, Just ("flush", Just (boolQP flushAfterOptimize))
@ -745,7 +744,7 @@ deleteTemplate (TemplateName templateName) =
-- >>> _ <- runBH' $ createIndex defaultIndexSettings testIndex
-- >>> resp <- runBH' $ putMapping testIndex testMapping TweetMapping
-- >>> print resp
-- Response {responseStatus = Status {statusCode = 200, statusMessage = "OK"}, responseVersion = HTTP/1.1, responseHeaders = [("Content-Type","application/json; charset=UTF-8"),("Content-Length","21")], responseBody = "{\"acknowledged\":true}", responseCookieJar = CJ {expose = []}, responseClose' = ResponseClose}
-- Response {responseStatus = Status {statusCode = 200, statusMessage = "OK"}, responseVersion = HTTP/1.1, responseHeaders = [("content-type","application/json; charset=UTF-8"),("content-encoding","gzip"),("transfer-encoding","chunked")], responseBody = "{\"acknowledged\":true}", responseCookieJar = CJ {expose = []}, responseClose' = ResponseClose}
putMapping :: (MonadBH m, ToJSON a) => IndexName
-> MappingName -> a -> m Reply
putMapping (IndexName indexName) (MappingName mappingName) mapping =
@ -755,21 +754,6 @@ putMapping (IndexName indexName) (MappingName mappingName) mapping =
-- erroneously. The correct API call is: "/INDEX/_mapping/MAPPING_NAME"
body = Just $ encode mapping
-- | 'deleteMapping' is an HTTP DELETE and deletes a mapping for a given index.
-- Mappings are schemas for documents in indexes.
--
-- >>> _ <- runBH' $ createIndex defaultIndexSettings testIndex
-- >>> _ <- runBH' $ putMapping testIndex testMapping TweetMapping
-- >>> resp <- runBH' $ deleteMapping testIndex testMapping
-- >>> print resp
-- Response {responseStatus = Status {statusCode = 200, statusMessage = "OK"}, responseVersion = HTTP/1.1, responseHeaders = [("Content-Type","application/json; charset=UTF-8"),("Content-Length","21")], responseBody = "{\"acknowledged\":true}", responseCookieJar = CJ {expose = []}, responseClose' = ResponseClose}
deleteMapping :: MonadBH m => IndexName -> MappingName -> m Reply
deleteMapping (IndexName indexName)
(MappingName mappingName) =
-- "_mapping" and mappingName below were originally transposed
-- erroneously. The correct API call is: "/INDEX/_mapping/MAPPING_NAME"
delete =<< joinPath [indexName, "_mapping", mappingName]
versionCtlParams :: IndexDocumentSettings -> [(Text, Maybe Text)]
versionCtlParams cfg =
case idsVersionControl cfg of
@ -791,7 +775,7 @@ versionCtlParams cfg =
--
-- >>> resp <- runBH' $ indexDocument testIndex testMapping defaultIndexDocumentSettings exampleTweet (DocId "1")
-- >>> print resp
-- Response {responseStatus = Status {statusCode = 201, statusMessage = "Created"}, responseVersion = HTTP/1.1, responseHeaders = [("Content-Type","application/json; charset=UTF-8"),("Content-Length","74")], responseBody = "{\"_index\":\"twitter\",\"_type\":\"tweet\",\"_id\":\"1\",\"_version\":1,\"created\":true}", responseCookieJar = CJ {expose = []}, responseClose' = ResponseClose}
-- Response {responseStatus = Status {statusCode = 201, statusMessage = "Created"}, responseVersion = HTTP/1.1, responseHeaders = [("Location","/twitter/tweet/1"),("content-type","application/json; charset=UTF-8"),("content-encoding","gzip"),("transfer-encoding","chunked")], responseBody = "{\"_index\":\"twitter\",\"_type\":\"tweet\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"_shards\":{\"total\":2,\"successful\":1,\"failed\":0},\"created\":true}", responseCookieJar = CJ {expose = []}, responseClose' = ResponseClose}
indexDocument :: (ToJSON doc, MonadBH m) => IndexName -> MappingName
-> IndexDocumentSettings -> doc -> DocId -> m Reply
indexDocument (IndexName indexName)
@ -958,16 +942,21 @@ searchByType (IndexName indexName)
-- search results. Note that the search is put into 'SearchTypeScan'
-- mode and thus results will not be sorted. Combine this with
-- 'advanceScroll' to efficiently stream through the full result set
getInitialScroll :: MonadBH m => IndexName -> MappingName -> Search -> m (Maybe ScrollId)
getInitialScroll (IndexName indexName) (MappingName mappingName) search = do
let url = joinPath [indexName, mappingName, "_search"]
search' = search { searchType = SearchTypeScan }
resp' <- bindM2 dispatchSearch url (return search')
let msr = decode' $ responseBody resp' :: Maybe (SearchResult ())
msid = maybe Nothing scrollId msr
return msid
getInitialScroll ::
(FromJSON a, MonadThrow m, MonadBH m) => IndexName ->
MappingName ->
Search ->
m (Either EsError (SearchResult a))
getInitialScroll (IndexName indexName) (MappingName mappingName) search' = do
let url = addQuery params <$> joinPath [indexName, mappingName, "_search"]
params = [("scroll", Just "1m")]
sorting = Just [DefaultSortSpec $ mkSort (FieldName "_doc") Descending]
search = search' { sortBody = sorting }
resp' <- bindM2 dispatchSearch url (return search)
parseEsResponse resp'
scroll' :: (FromJSON a, MonadBH m, MonadThrow m) => Maybe ScrollId -> m ([Hit a], Maybe ScrollId)
scroll' :: (FromJSON a, MonadBH m, MonadThrow m) => Maybe ScrollId ->
m ([Hit a], Maybe ScrollId)
scroll' Nothing = return ([], Nothing)
scroll' (Just sid) = do
res <- advanceScroll sid 60
@ -987,20 +976,29 @@ advanceScroll
-- ^ How long should the snapshot of data be kept around? This timeout is updated every time 'advanceScroll' is used, so don't feel the need to set it to the entire duration of your search processing. Note that durations < 1s will be rounded up. Also note that 'NominalDiffTime' is an instance of Num so literals like 60 will be interpreted as seconds. 60s is a reasonable default.
-> m (Either EsError (SearchResult a))
advanceScroll (ScrollId sid) scroll = do
url <- joinPath ["_search/scroll?scroll=" <> scrollTime]
parseEsResponse =<< post url (Just . L.fromStrict $ T.encodeUtf8 sid)
url <- joinPath ["_search", "scroll"]
resp <- post url (Just $ encode scrollObject)
parseEsResponse resp
where scrollTime = showText secs <> "s"
secs :: Integer
secs = round scroll
scrollObject = object [ "scroll" .= scrollTime
, "scroll_id" .= sid
]
simpleAccumulator :: (FromJSON a, MonadBH m, MonadThrow m) => [Hit a] -> ([Hit a], Maybe ScrollId) -> m ([Hit a], Maybe ScrollId)
simpleAccumulator ::
(FromJSON a, MonadBH m, MonadThrow m) =>
[Hit a] ->
([Hit a], Maybe ScrollId) ->
m ([Hit a], Maybe ScrollId)
simpleAccumulator oldHits (newHits, Nothing) = return (oldHits ++ newHits, Nothing)
simpleAccumulator oldHits ([], _) = return (oldHits, Nothing)
simpleAccumulator oldHits (newHits, msid) = do
(newHits', msid') <- scroll' msid
simpleAccumulator (oldHits ++ newHits) (newHits', msid')
-- | 'scanSearch' uses the 'scan&scroll' API of elastic,
-- | 'scanSearch' uses the 'scroll' API of elastic,
-- for a given 'IndexName' and 'MappingName'. Note that this will
-- consume the entire search result set and will be doing O(n) list
-- appends so this may not be suitable for large result sets. In that
@ -1009,11 +1007,16 @@ simpleAccumulator oldHits (newHits, msid) = do
-- pipes, or your favorite streaming IO abstraction of choice. Note
-- that ordering on the search would destroy performance and thus is
-- ignored.
scanSearch :: (FromJSON a, MonadBH m, MonadThrow m) => IndexName -> MappingName -> Search -> m [Hit a]
scanSearch :: (FromJSON a, MonadBH m, MonadThrow m) => IndexName
-> MappingName
-> Search
-> m [Hit a]
scanSearch indexName mappingName search = do
msid <- getInitialScroll indexName mappingName search
(hits, msid') <- scroll' msid
(totalHits, _) <- simpleAccumulator [] (hits, msid')
initialSearchResult <- getInitialScroll indexName mappingName search
let (hits', josh) = case initialSearchResult of
Right SearchResult {..} -> (hits searchHits, scrollId)
Left _ -> ([], Nothing)
(totalHits, _) <- simpleAccumulator [] (hits', josh)
return totalHits
-- | 'mkSearch' is a helper function for defaulting additional fields of a 'Search'

View File

@ -152,8 +152,8 @@ module Database.Bloodhound.Types
, IndexSelection(..)
, NodeSelection(..)
, NodeSelector(..)
, IndexOptimizationSettings(..)
, defaultIndexOptimizationSettings
, ForceMergeIndexSettings(..)
, defaultForceMergeIndexSettings
, TemplateName(..)
, TemplatePattern(..)
, MappingName(..)
@ -178,7 +178,6 @@ module Database.Bloodhound.Types
, BoostingQuery(..)
, CommonTermsQuery(..)
, DisMaxQuery(..)
, FilteredQuery(..)
, FuzzyLikeThisQuery(..)
, FuzzyLikeFieldQuery(..)
, FuzzyQuery(..)
@ -452,7 +451,7 @@ runBH e f = runReaderT (unBH f) e
{-| 'Version' is embedded in 'Status' -}
data Version = Version { number :: VersionNumber
, build_hash :: BuildHash
, build_timestamp :: UTCTime
, build_date :: UTCTime
, build_snapshot :: Bool
, lucene_version :: VersionNumber } deriving (Eq, Read, Show, Generic, Typeable)
@ -466,11 +465,11 @@ newtype VersionNumber = VersionNumber { versionNumber :: Vers.Version
<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-status.html#indices-status>
-}
data Status = Status { ok :: Maybe Bool
, status :: Int
, name :: Text
, version :: Version
, tagline :: Text } deriving (Eq, Read, Show, Generic)
data Status = Status { name :: Text
, cluster_name :: Text
, cluster_uuid :: Text
, version :: Version
, tagline :: Text } deriving (Eq, Read, Show, Generic)
{-| 'IndexSettings' is used to configure the shards and replicas when you create
an Elasticsearch Index.
@ -487,12 +486,12 @@ defaultIndexSettings :: IndexSettings
defaultIndexSettings = IndexSettings (ShardCount 3) (ReplicaCount 2)
{-| 'IndexOptimizationSettings' is used to configure index optimization. See
<https://www.elastic.co/guide/en/elasticsearch/reference/1.7/indices-optimize.html>
{-| 'ForceMergeIndexSettings' is used to configure index optimization. See
<https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html>
for more info.
-}
data IndexOptimizationSettings =
IndexOptimizationSettings { maxNumSegments :: Maybe Int
data ForceMergeIndexSettings =
ForceMergeIndexSettings { maxNumSegments :: Maybe Int
-- ^ Number of segments to optimize to. 1 will fully optimize the index. If omitted, the default behavior is to only optimize if the server deems it necessary.
, onlyExpungeDeletes :: Bool
-- ^ Should the optimize process only expunge segments with deletes in them? If the purpose of the optimization is to free disk space, this should be set to True.
@ -501,12 +500,12 @@ data IndexOptimizationSettings =
} deriving (Eq, Show, Generic, Typeable)
{-| 'defaultIndexOptimizationSettings' implements the default settings that
{-| 'defaultForceMergeIndexSettings' implements the default settings that
ElasticSearch uses for index optimization. 'maxNumSegments' is Nothing,
'onlyExpungeDeletes' is False, and flushAfterOptimize is True.
-}
defaultIndexOptimizationSettings :: IndexOptimizationSettings
defaultIndexOptimizationSettings = IndexOptimizationSettings Nothing False True
defaultForceMergeIndexSettings :: ForceMergeIndexSettings
defaultForceMergeIndexSettings = ForceMergeIndexSettings Nothing False True
{-| 'UpdatableIndexSetting' are settings which may be updated after an index is created.
@ -845,6 +844,8 @@ data SortSpec = DefaultSortSpec DefaultSort
{-| 'DefaultSort' is usually the kind of 'SortSpec' you'll want. There's a
'mkSort' convenience function for when you want to specify only the most
common parameters.
The `ignoreUnmapped`, when `Just` field is used to set the elastic 'unmapped_type'
<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html#search-request-sort>
-}
@ -852,7 +853,7 @@ data DefaultSort =
DefaultSort { sortFieldName :: FieldName
, sortOrder :: SortOrder
-- default False
, ignoreUnmapped :: Bool
, ignoreUnmapped :: Maybe Text
, sortMode :: Maybe SortMode
, missingSort :: Maybe Missing
, nestedFilter :: Maybe Filter } deriving (Eq, Read, Show, Generic, Typeable)
@ -887,7 +888,7 @@ data SortMode = SortMin
that you can concisely describe the usual kind of 'SortSpec's you want.
-}
mkSort :: FieldName -> SortOrder -> DefaultSort
mkSort fieldName sOrder = DefaultSort fieldName sOrder False Nothing Nothing Nothing
mkSort fieldName sOrder = DefaultSort fieldName sOrder Nothing Nothing Nothing Nothing
{-| 'Cache' is for telling ES whether it should cache a 'Filter' not.
'Query's cannot be cached.
@ -1100,10 +1101,6 @@ data Search = Search { queryBody :: Maybe Query
data SearchType = SearchTypeQueryThenFetch
| SearchTypeDfsQueryThenFetch
| SearchTypeCount
| SearchTypeScan
| SearchTypeQueryAndFetch
| SearchTypeDfsQueryAndFetch
deriving (Eq, Read, Show, Generic, Typeable)
data Source =
@ -1183,10 +1180,8 @@ data Query =
| QueryBoolQuery BoolQuery
| QueryBoostingQuery BoostingQuery
| QueryCommonTermsQuery CommonTermsQuery
| ConstantScoreFilter Filter Boost
| ConstantScoreQuery Query Boost
| QueryDisMaxQuery DisMaxQuery
| QueryFilteredQuery FilteredQuery
| QueryFuzzyLikeThisQuery FuzzyLikeThisQuery
| QueryFuzzyLikeFieldQuery FuzzyLikeFieldQuery
| QueryFuzzyQuery FuzzyQuery
@ -1203,8 +1198,21 @@ data Query =
| QuerySimpleQueryStringQuery SimpleQueryStringQuery
| QueryRangeQuery RangeQuery
| QueryRegexpQuery RegexpQuery
| QueryExistsQuery FieldName
| QueryMatchNoneQuery
deriving (Eq, Read, Show, Generic, Typeable)
-- | As of Elastic 2.0, 'Filters' are just 'Queries' housed in a Bool Query, and
-- flagged in a different context.
newtype Filter = Filter { unFilter :: Query }
deriving (Eq, Read, Show, Generic, Typeable)
instance ToJSON Filter where
toJSON = toJSON . unFilter
instance FromJSON Filter where
parseJSON v = Filter <$> parseJSON v
data RegexpQuery =
RegexpQuery { regexpQueryField :: FieldName
, regexpQuery :: Regexp
@ -1385,11 +1393,6 @@ data FuzzyLikeThisQuery =
, fuzzyLikeAnalyzer :: Maybe Analyzer
} deriving (Eq, Read, Show, Generic, Typeable)
data FilteredQuery =
FilteredQuery
{ filteredQuery :: Query
, filteredFilter :: Filter } deriving (Eq, Read, Show, Generic, Typeable)
data DisMaxQuery =
DisMaxQuery { disMaxQueries :: [Query]
-- default 0.0
@ -1450,6 +1453,7 @@ data MultiMatchQueryType =
data BoolQuery =
BoolQuery { boolQueryMustMatch :: [Query]
, boolQueryFilter :: [Filter]
, boolQueryMustNotMatch :: [Query]
, boolQueryShouldMatch :: [Query]
, boolQueryMinimumShouldMatch :: Maybe MinimumMatch
@ -1457,9 +1461,9 @@ data BoolQuery =
, boolQueryDisableCoord :: Maybe DisableCoord
} deriving (Eq, Read, Show, Generic, Typeable)
mkBoolQuery :: [Query] -> [Query] -> [Query] -> BoolQuery
mkBoolQuery must mustNot should =
BoolQuery must mustNot should Nothing Nothing Nothing
mkBoolQuery :: [Query] -> [Filter] -> [Query] -> [Query] -> BoolQuery
mkBoolQuery must filter mustNot should =
BoolQuery must filter mustNot should Nothing Nothing Nothing
data BoostingQuery =
BoostingQuery { positiveQuery :: Query
@ -1487,26 +1491,6 @@ data MinimumMatchHighLow =
MinimumMatchHighLow { lowFreq :: MinimumMatch
, highFreq :: MinimumMatch } deriving (Eq, Read, Show, Generic, Typeable)
data Filter = AndFilter [Filter] Cache
| OrFilter [Filter] Cache
| NotFilter Filter Cache
| IdentityFilter
| BoolFilter BoolMatch
| ExistsFilter FieldName -- always cached
| GeoBoundingBoxFilter GeoBoundingBoxConstraint
| GeoDistanceFilter GeoPoint Distance DistanceType OptimizeBbox Cache
| GeoDistanceRangeFilter GeoPoint DistanceRange
| GeoPolygonFilter FieldName [LatLon]
| IdsFilter MappingName [DocId]
| LimitFilter Int
| MissingFilter FieldName Existence NullValue
| PrefixFilter FieldName PrefixValue Cache
| QueryFilter Query Cache
| RangeFilter FieldName RangeValue RangeExecution Cache
| RegexpFilter FieldName Regexp RegexpFlags CacheName Cache CacheKey
| TermFilter Term Cache
deriving (Eq, Read, Show, Generic, Typeable)
data ZeroTermsQuery = ZeroTermsNone
| ZeroTermsAll deriving (Eq, Read, Show, Generic, Typeable)
@ -1713,8 +1697,7 @@ data Interval = Year
| Day
| Hour
| Minute
| Second
| FractionalInterval Float TimeInterval deriving (Eq, Read, Show, Generic, Typeable)
| Second deriving (Eq, Read, Show, Generic, Typeable)
data Aggregation = TermsAgg TermsAggregation
| CardinalityAgg CardinalityAggregation
@ -1808,7 +1791,7 @@ mkCardinalityAggregation t = CardinalityAggregation t Nothing
instance ToJSON Version where
toJSON Version {..} = object ["number" .= number
,"build_hash" .= build_hash
,"build_timestamp" .= build_timestamp
,"build_date" .= build_date
,"build_snapshot" .= build_snapshot
,"lucene_version" .= lucene_version]
@ -1817,7 +1800,7 @@ instance FromJSON Version where
where parse o = Version
<$> o .: "number"
<*> o .: "build_hash"
<*> o .: "build_timestamp"
<*> o .: "build_date"
<*> o .: "build_snapshot"
<*> o .: "lucene_version"
@ -1860,7 +1843,6 @@ instance ToJSON Interval where
toJSON Hour = "hour"
toJSON Minute = "minute"
toJSON Second = "second"
toJSON (FractionalInterval fraction interval) = toJSON $ show fraction ++ show interval
instance Show TimeInterval where
show Weeks = "w"
@ -2058,193 +2040,6 @@ instance FromJSON POSIXMS where
where parse n = let n' = truncate n :: Integer
in POSIXMS (posixSecondsToUTCTime (fromInteger (n' `div` 1000)))
instance Monoid Filter where
mempty = IdentityFilter
mappend a b = AndFilter [a, b] defaultCache
instance Seminearring Filter where
a <||> b = OrFilter [a, b] defaultCache
instance ToJSON Filter where
toJSON (AndFilter filters cache) =
object ["and" .=
object [ "filters" .= fmap toJSON filters
, "_cache" .= cache]]
toJSON (OrFilter filters cache) =
object ["or" .=
object [ "filters" .= fmap toJSON filters
, "_cache" .= cache]]
toJSON (NotFilter notFilter cache) =
object ["not" .=
object ["filter" .= notFilter
, "_cache" .= cache]]
toJSON (IdentityFilter) =
object ["match_all" .= object []]
toJSON (TermFilter (Term termFilterField termFilterValue) cache) =
object ["term" .= object base]
where base = [termFilterField .= termFilterValue,
"_cache" .= cache]
toJSON (ExistsFilter (FieldName fieldName)) =
object ["exists" .= object
["field" .= fieldName]]
toJSON (BoolFilter boolMatch) =
object ["bool" .= boolMatch]
toJSON (GeoBoundingBoxFilter bbConstraint) =
object ["geo_bounding_box" .= bbConstraint]
toJSON (GeoDistanceFilter (GeoPoint (FieldName distanceGeoField) geoDistLatLon)
distance distanceType optimizeBbox cache) =
object ["geo_distance" .=
object ["distance" .= distance
, "distance_type" .= distanceType
, "optimize_bbox" .= optimizeBbox
, distanceGeoField .= geoDistLatLon
, "_cache" .= cache]]
toJSON (GeoDistanceRangeFilter (GeoPoint (FieldName gddrField) drLatLon)
(DistanceRange geoDistRangeDistFrom drDistanceTo)) =
object ["geo_distance_range" .=
object ["from" .= geoDistRangeDistFrom
, "to" .= drDistanceTo
, gddrField .= drLatLon]]
toJSON (GeoPolygonFilter (FieldName geoPolygonFilterField) latLons) =
object ["geo_polygon" .=
object [geoPolygonFilterField .=
object ["points" .= fmap toJSON latLons]]]
toJSON (IdsFilter (MappingName mappingName) values) =
object ["ids" .=
object ["type" .= mappingName
, "values" .= fmap unpackId values]]
toJSON (LimitFilter limit) =
object ["limit" .= object ["value" .= limit]]
toJSON (MissingFilter (FieldName fieldName) (Existence existence) (NullValue nullValue)) =
object ["missing" .=
object ["field" .= fieldName
, "existence" .= existence
, "null_value" .= nullValue]]
toJSON (PrefixFilter (FieldName fieldName) fieldValue cache) =
object ["prefix" .=
object [fieldName .= fieldValue
, "_cache" .= cache]]
toJSON (QueryFilter query False) =
object ["query" .= toJSON query ]
toJSON (QueryFilter query True) =
object ["fquery" .=
object [ "query" .= toJSON query
, "_cache" .= True ]]
toJSON (RangeFilter (FieldName fieldName) rangeValue rangeExecution cache) =
object ["range" .=
object [ fieldName .= object (rangeValueToPair rangeValue)
, "execution" .= rangeExecution
, "_cache" .= cache]]
toJSON (RegexpFilter (FieldName fieldName)
(Regexp regexText) flags (CacheName cacheName) cache (CacheKey cacheKey)) =
object ["regexp" .=
object [fieldName .=
object ["value" .= regexText
, "flags" .= flags]
, "_name" .= cacheName
, "_cache" .= cache
, "_cache_key" .= cacheKey]]
instance FromJSON Filter where
parseJSON = withObject "Filter" parse
where parse o = andFilter `taggedWith` "and"
<|> orFilter `taggedWith` "or"
<|> notFilter `taggedWith` "not"
<|> identityFilter `taggedWith` "match_all"
<|> boolFilter `taggedWith` "bool"
<|> existsFilter `taggedWith` "exists"
<|> geoBoundingBoxFilter `taggedWith` "geo_bounding_box"
<|> geoDistanceFilter `taggedWith` "geo_distance"
<|> geoDistanceRangeFilter `taggedWith` "geo_distance_range"
<|> geoPolygonFilter `taggedWith` "geo_polygon"
<|> idsFilter `taggedWith` "ids"
<|> limitFilter `taggedWith` "limit"
<|> missingFilter `taggedWith` "missing"
<|> prefixFilter `taggedWith` "prefix"
<|> queryFilter `taggedWith` "query"
<|> fqueryFilter `taggedWith` "fquery"
<|> rangeFilter `taggedWith` "range"
<|> regexpFilter `taggedWith` "regexp"
<|> termFilter `taggedWith` "term"
where taggedWith parser k = parser =<< o .: k
andFilter o = AndFilter <$> o .: "filters"
<*> o .:? "_cache" .!= defaultCache
orFilter o = OrFilter <$> o .: "filters"
<*> o .:? "_cache" .!= defaultCache
notFilter o = NotFilter <$> o .: "filter"
<*> o .: "_cache" .!= defaultCache
identityFilter :: Object -> Parser Filter
identityFilter m
| HM.null m = pure IdentityFilter
| otherwise = fail ("Identityfilter expected empty object but got " <> show m)
boolFilter = pure . BoolFilter
existsFilter o = ExistsFilter <$> o .: "field"
geoBoundingBoxFilter = pure . GeoBoundingBoxFilter
geoDistanceFilter o = do
case HM.toList (deleteSeveral ["distance", "distance_type", "optimize_bbox", "_cache"] o) of
[(fn, v)] -> do
gp <- GeoPoint (FieldName fn) <$> parseJSON v
GeoDistanceFilter gp <$> o .: "distance"
<*> o .: "distance_type"
<*> o .: "optimize_bbox"
<*> o .:? "_cache" .!= defaultCache
_ -> fail "Could not find GeoDistanceFilter field name"
geoDistanceRangeFilter o = do
case HM.toList (deleteSeveral ["from", "to"] o) of
[(fn, v)] -> do
gp <- GeoPoint (FieldName fn) <$> parseJSON v
rng <- DistanceRange <$> o .: "from" <*> o .: "to"
return (GeoDistanceRangeFilter gp rng)
_ -> fail "Could not find GeoDistanceRangeFilter field name"
geoPolygonFilter = fieldTagged $ \fn o -> GeoPolygonFilter fn <$> o .: "points"
idsFilter o = IdsFilter <$> o .: "type"
<*> o .: "values"
limitFilter o = LimitFilter <$> o .: "value"
missingFilter o = MissingFilter <$> o .: "field"
<*> o .: "existence"
<*> o .: "null_value"
prefixFilter o = case HM.toList (HM.delete "_cache" o) of
[(fn, String v)] -> PrefixFilter (FieldName fn) v <$> o .:? "_cache" .!= defaultCache
_ -> fail "Could not parse PrefixFilter"
queryFilter q = pure (QueryFilter q False)
fqueryFilter o = QueryFilter <$> o .: "query" <*> pure True
rangeFilter o = case HM.toList (deleteSeveral ["execution", "_cache"] o) of
[(fn, v)] -> RangeFilter (FieldName fn)
<$> parseJSON v
<*> o .: "execution"
<*> o .:? "_cache" .!= defaultCache
_ -> fail "Could not find field name for RangeFilter"
regexpFilter o = case HM.toList (deleteSeveral ["_name", "_cache", "_cache_key"] o) of
[(fn, Object o')] -> RegexpFilter (FieldName fn)
<$> o' .: "value"
<*> o' .: "flags"
<*> o .: "_name"
<*> o .:? "_cache" .!= defaultCache
<*> o .: "_cache_key"
_ -> fail "Could not find field name for RegexpFilter"
termFilter o = case HM.toList (HM.delete "_cache" o) of
[(termField, String termVal)] -> TermFilter (Term termField termVal)
<$> o .:? "_cache" .!= defaultCache
_ -> fail "Could not find term field for TermFilter"
fieldTagged :: Monad m => (FieldName -> Object -> m a) -> Object -> m a
fieldTagged f o = case HM.toList o of
[(k, Object o')] -> f (FieldName k) o'
@ -2291,10 +2086,6 @@ instance ToJSON Query where
toJSON (QueryCommonTermsQuery commonTermsQuery) =
object [ "common" .= commonTermsQuery ]
toJSON (ConstantScoreFilter csFilter boost) =
object ["constant_score" .= object ["filter" .= csFilter
, "boost" .= boost]]
toJSON (ConstantScoreQuery query boost) =
object ["constant_score" .= object ["query" .= query
, "boost" .= boost]]
@ -2302,9 +2093,6 @@ instance ToJSON Query where
toJSON (QueryDisMaxQuery disMaxQuery) =
object [ "dis_max" .= disMaxQuery ]
toJSON (QueryFilteredQuery qFilteredQuery) =
object [ "filtered" .= qFilteredQuery ]
toJSON (QueryFuzzyLikeThisQuery fuzzyQuery) =
object [ "fuzzy_like_this" .= fuzzyQuery ]
@ -2347,6 +2135,13 @@ instance ToJSON Query where
toJSON (QuerySimpleQueryStringQuery query) =
object [ "simple_query_string" .= query ]
toJSON (QueryExistsQuery (FieldName fieldName)) =
object ["exists" .= object
["field" .= fieldName]
]
toJSON QueryMatchNoneQuery =
object ["match_none" .= object []]
instance FromJSON Query where
parseJSON v = withObject "Query" parse v
where parse o = termQuery `taggedWith` "term"
@ -2358,10 +2153,8 @@ instance FromJSON Query where
<|> queryBoolQuery `taggedWith` "bool"
<|> queryBoostingQuery `taggedWith` "boosting"
<|> queryCommonTermsQuery `taggedWith` "common"
<|> constantScoreFilter `taggedWith` "constant_score"
<|> constantScoreQuery `taggedWith` "constant_score"
<|> queryDisMaxQuery `taggedWith` "dis_max"
<|> queryFilteredQuery `taggedWith` "filtered"
<|> queryFuzzyLikeThisQuery `taggedWith` "fuzzy_like_this"
<|> queryFuzzyLikeFieldQuery `taggedWith` "fuzzy_like_this_field"
<|> queryFuzzyQuery `taggedWith` "fuzzy"
@ -2393,16 +2186,11 @@ instance FromJSON Query where
queryBoolQuery = pure . QueryBoolQuery
queryBoostingQuery = pure . QueryBoostingQuery
queryCommonTermsQuery = pure . QueryCommonTermsQuery
constantScoreFilter o = case HM.lookup "filter" o of
Just x -> ConstantScoreFilter <$> parseJSON x
<*> o .: "boost"
_ -> fail "Does not appear to be a ConstantScoreFilter"
constantScoreQuery o = case HM.lookup "query" o of
Just x -> ConstantScoreQuery <$> parseJSON x
<*> o .: "boost"
_ -> fail "Does not appear to be a ConstantScoreQuery"
queryDisMaxQuery = pure . QueryDisMaxQuery
queryFilteredQuery = pure . QueryFilteredQuery
queryFuzzyLikeThisQuery = pure . QueryFuzzyLikeThisQuery
queryFuzzyLikeFieldQuery = pure . QueryFuzzyLikeFieldQuery
queryFuzzyQuery = pure . QueryFuzzyQuery
@ -2417,6 +2205,7 @@ instance FromJSON Query where
queryRangeQuery = pure . QueryRangeQuery
queryRegexpQuery = pure . QueryRegexpQuery
querySimpleQueryStringQuery = pure . QuerySimpleQueryStringQuery
queryExistsQuery o = QueryExistsQuery <$> o .: "field"
omitNulls :: [(Text, Value)] -> Value
@ -2425,7 +2214,6 @@ omitNulls = object . filter notNull where
notNull (_, Array a) = (not . V.null) a
notNull _ = True
instance ToJSON SimpleQueryStringQuery where
toJSON SimpleQueryStringQuery {..} =
omitNulls (base ++ maybeAdd)
@ -2814,17 +2602,6 @@ instance FromJSON FuzzyLikeThisQuery where
<*> o .: "boost"
<*> o .:? "analyzer"
instance ToJSON FilteredQuery where
toJSON (FilteredQuery query fFilter) =
object [ "query" .= query
, "filter" .= fFilter ]
instance FromJSON FilteredQuery where
parseJSON = withObject "FilteredQuery" parse
where parse o = FilteredQuery
<$> o .: "query"
<*> o .: "filter"
instance ToJSON DisMaxQuery where
toJSON (DisMaxQuery queries tiebreaker boost) =
omitNulls base
@ -2896,9 +2673,10 @@ instance FromJSON BoostingQuery where
<*> o .: "negative_boost"
instance ToJSON BoolQuery where
toJSON (BoolQuery mustM notM shouldM bqMin boost disableCoord) =
toJSON (BoolQuery mustM filterM notM shouldM bqMin boost disableCoord) =
omitNulls base
where base = [ "must" .= mustM
, "filter" .= filterM
, "must_not" .= notM
, "should" .= shouldM
, "minimum_should_match" .= bqMin
@ -2909,6 +2687,7 @@ instance FromJSON BoolQuery where
parseJSON = withObject "BoolQuery" parse
where parse o = BoolQuery
<$> o .:? "must" .!= []
<*> o .:? "filter" .!= []
<*> o .:? "must_not" .!= []
<*> o .:? "should" .!= []
<*> o .:? "minimum_should_match"
@ -3023,9 +2802,9 @@ instance FromJSON MatchQueryType where
instance FromJSON Status where
parseJSON (Object v) = Status <$>
v .:? "ok" <*>
(v .:? "status" .!= 200) <*>
v .: "name" <*>
v .: "cluster_name" <*>
v .: "cluster_uuid" <*>
v .: "version" <*>
v .: "tagline"
parseJSON _ = empty
@ -3355,9 +3134,8 @@ instance FromJSON SearchAliasRouting where
where parse t = SearchAliasRouting <$> parseNEJSON (String <$> T.splitOn "," t)
instance ToJSON Search where
toJSON (Search query sFilter sort searchAggs highlight sTrackSortScores sFrom sSize _ sFields sSource) =
omitNulls [ "query" .= query
, "filter" .= sFilter
toJSON (Search mquery sFilter sort searchAggs highlight sTrackSortScores sFrom sSize _ sFields sSource) =
omitNulls [ "query" .= query'
, "sort" .= sort
, "aggregations" .= searchAggs
, "highlight" .= highlight
@ -3366,12 +3144,15 @@ instance ToJSON Search where
, "track_scores" .= sTrackSortScores
, "fields" .= sFields
, "_source" .= sSource]
where query' = case sFilter of
Nothing -> mquery
Just x -> Just . QueryBoolQuery $ mkBoolQuery (maybeToList mquery) [x] [] []
instance ToJSON Source where
toJSON NoSource = toJSON False
toJSON (SourcePatterns patterns) = toJSON patterns
toJSON (SourceIncludeExclude incl excl) = object [ "include" .= incl, "exclude" .= excl ]
toJSON (SourceIncludeExclude incl excl) = object [ "includes" .= incl, "excludes" .= excl ]
instance ToJSON PatternOrPatterns where
toJSON (PopPattern pattern) = toJSON pattern
@ -3480,7 +3261,7 @@ instance ToJSON SortSpec where
dsSortMode dsMissingSort dsNestedFilter)) =
object [dsSortFieldName .= omitNulls base] where
base = [ "order" .= dsSortOrder
, "ignore_unmapped" .= dsIgnoreUnmapped
, "unmapped_type" .= dsIgnoreUnmapped
, "mode" .= dsSortMode
, "missing" .= dsMissingSort
, "nested_filter" .= dsNestedFilter ]
@ -3887,7 +3668,7 @@ data NodeStats = NodeStats {
, nodeStatsHTTP :: NodeHTTPStats
, nodeStatsTransport :: NodeTransportStats
, nodeStatsFS :: NodeFSStats
, nodeStatsNetwork :: NodeNetworkStats
, nodeStatsNetwork :: Maybe NodeNetworkStats
, nodeStatsThreadPool :: NodeThreadPoolsStats
, nodeStatsJVM :: NodeJVMStats
, nodeStatsProcess :: NodeProcessStats
@ -3940,7 +3721,7 @@ data NodeDataPathStats = NodeDataPathStats {
, nodeDataPathFree :: Bytes
, nodeDataPathTotal :: Bytes
, nodeDataPathType :: Maybe Text
, nodeDataPathDevice :: Text
, nodeDataPathDevice :: Maybe Text
, nodeDataPathMount :: Text
, nodeDataPathPath :: Text
} deriving (Eq, Show, Generic, Typeable)
@ -3979,17 +3760,17 @@ data NodeThreadPoolsStats = NodeThreadPoolsStats {
, nodeThreadPoolsStatsGet :: NodeThreadPoolStats
, nodeThreadPoolsStatsManagement :: NodeThreadPoolStats
, nodeThreadPoolsStatsFetchShardStore :: Maybe NodeThreadPoolStats
, nodeThreadPoolsStatsOptimize :: NodeThreadPoolStats
, nodeThreadPoolsStatsOptimize :: Maybe NodeThreadPoolStats
, nodeThreadPoolsStatsFlush :: NodeThreadPoolStats
, nodeThreadPoolsStatsSearch :: NodeThreadPoolStats
, nodeThreadPoolsStatsWarmer :: NodeThreadPoolStats
, nodeThreadPoolsStatsGeneric :: NodeThreadPoolStats
, nodeThreadPoolsStatsSuggest :: NodeThreadPoolStats
, nodeThreadPoolsStatsSuggest :: Maybe NodeThreadPoolStats
, nodeThreadPoolsStatsRefresh :: NodeThreadPoolStats
, nodeThreadPoolsStatsIndex :: NodeThreadPoolStats
, nodeThreadPoolsStatsListener :: Maybe NodeThreadPoolStats
, nodeThreadPoolsStatsFetchShardStarted :: Maybe NodeThreadPoolStats
, nodeThreadPoolsStatsPercolate :: NodeThreadPoolStats
, nodeThreadPoolsStatsPercolate :: Maybe NodeThreadPoolStats
} deriving (Eq, Show, Generic, Typeable)
data NodeThreadPoolStats = NodeThreadPoolStats {
@ -4040,34 +3821,26 @@ data JVMPoolStats = JVMPoolStats {
} deriving (Eq, Show, Generic, Typeable)
data NodeProcessStats = NodeProcessStats {
nodeProcessMemTotalVirtual :: Bytes
, nodeProcessMemShare :: Bytes
, nodeProcessMemResident :: Bytes
, nodeProcessCPUTotal :: NominalDiffTime
, nodeProcessCPUUser :: NominalDiffTime
, nodeProcessCPUSys :: NominalDiffTime
, nodeProcessCPUPercent :: Int
nodeProcessTimestamp :: UTCTime
, nodeProcessOpenFDs :: Int
, nodeProcessTimestamp :: UTCTime
, nodeProcessMaxFDs :: Int
, nodeProcessCPUPercent :: Int
, nodeProcessCPUTotal :: NominalDiffTime
, nodeProcessMemTotalVirtual :: Bytes
} deriving (Eq, Show, Generic, Typeable)
data NodeOSStats = NodeOSStats {
nodeOSSwapFree :: Bytes
, nodeOSSwapUsed :: Bytes
, nodeOSMemActualUsed :: Bytes
, nodeOSMemActualFree :: Bytes
, nodeOSMemUsedPercent :: Int
nodeOSTimestamp :: UTCTime
, nodeOSCPUPercent :: Int
, nodeOSLoad :: Maybe LoadAvgs
, nodeOSMemTotal :: Bytes
, nodeOSMemFree :: Bytes
, nodeOSMemFreePercent :: Int
, nodeOSMemUsed :: Bytes
, nodeOSMemFree :: Bytes
, nodeOSCPUStolen :: Int
, nodeOSCPUUsage :: Int
, nodeOSCPUIdle :: Int
, nodeOSCPUUser :: Int
, nodeOSCPUSys :: Int
, nodeOSLoad :: Maybe LoadAvgs
, nodeOSUptime :: NominalDiffTime
, nodeOSTimestamp :: UTCTime
, nodeOSMemUsedPercent :: Int
, nodeOSSwapTotal :: Bytes
, nodeOSSwapFree :: Bytes
, nodeOSSwapUsed :: Bytes
} deriving (Eq, Show, Generic, Typeable)
data LoadAvgs = LoadAvgs {
@ -4084,9 +3857,9 @@ data NodeIndicesStats = NodeIndicesStats {
, nodeIndicesStatsQueryCacheHits :: Maybe Int
, nodeIndicesStatsQueryCacheEvictions :: Maybe Int
, nodeIndicesStatsQueryCacheSize :: Maybe Bytes
, nodeIndicesStatsSuggestCurrent :: Int
, nodeIndicesStatsSuggestTime :: NominalDiffTime
, nodeIndicesStatsSuggestTotal :: Int
, nodeIndicesStatsSuggestCurrent :: Maybe Int
, nodeIndicesStatsSuggestTime :: Maybe NominalDiffTime
, nodeIndicesStatsSuggestTotal :: Maybe Int
, nodeIndicesStatsTranslogSize :: Bytes
, nodeIndicesStatsTranslogOps :: Int
, nodeIndicesStatsSegFixedBitSetMemory :: Maybe Bytes
@ -4096,16 +3869,13 @@ data NodeIndicesStats = NodeIndicesStats {
, nodeIndicesStatsSegMemory :: Bytes
, nodeIndicesStatsSegCount :: Int
, nodeIndicesStatsCompletionSize :: Bytes
, nodeIndicesStatsPercolateQueries :: Int
, nodeIndicesStatsPercolateMemory :: Bytes
, nodeIndicesStatsPercolateCurrent :: Int
, nodeIndicesStatsPercolateTime :: NominalDiffTime
, nodeIndicesStatsPercolateTotal :: Int
, nodeIndicesStatsPercolateQueries :: Maybe Int
, nodeIndicesStatsPercolateMemory :: Maybe Bytes
, nodeIndicesStatsPercolateCurrent :: Maybe Int
, nodeIndicesStatsPercolateTime :: Maybe NominalDiffTime
, nodeIndicesStatsPercolateTotal :: Maybe Int
, nodeIndicesStatsFieldDataEvictions :: Int
, nodeIndicesStatsFieldDataMemory :: Bytes
, nodeIndicesStatsIDCacheMemory :: Bytes
, nodeIndicesStatsFilterCacheEvictions :: Int
, nodeIndicesStatsFilterCacheMemory :: Bytes
, nodeIndicesStatsWarmerTotalTime :: NominalDiffTime
, nodeIndicesStatsWarmerTotal :: Int
, nodeIndicesStatsWarmerCurrent :: Int
@ -4163,7 +3933,7 @@ newtype PluginName = PluginName { pluginName :: Text }
deriving (Eq, Ord, Generic, Show, Typeable, FromJSON)
data NodeInfo = NodeInfo {
nodeInfoHTTPAddress :: EsAddress
nodeInfoHTTPAddress :: Maybe EsAddress
, nodeInfoBuild :: BuildHash
, nodeInfoESVersion :: VersionNumber
, nodeInfoIP :: Server
@ -4174,7 +3944,7 @@ data NodeInfo = NodeInfo {
, nodeInfoPlugins :: [NodePluginInfo]
, nodeInfoHTTP :: NodeHTTPInfo
, nodeInfoTransport :: NodeTransportInfo
, nodeInfoNetwork :: NodeNetworkInfo
, nodeInfoNetwork :: Maybe NodeNetworkInfo
, nodeInfoThreadPool :: NodeThreadPoolsInfo
, nodeInfoJVM :: NodeJVMInfo
, nodeInfoProcess :: NodeProcessInfo
@ -4206,7 +3976,7 @@ data NodeTransportInfo = NodeTransportInfo {
data BoundTransportAddress = BoundTransportAddress {
publishAddress :: EsAddress
, boundAddress :: EsAddress
, boundAddress :: [EsAddress]
} deriving (Eq, Show, Generic, Typeable)
data NodeNetworkInfo = NodeNetworkInfo {
@ -4229,15 +3999,15 @@ data NodeNetworkInterface = NodeNetworkInterface {
data NodeThreadPoolsInfo = NodeThreadPoolsInfo {
nodeThreadPoolsRefresh :: NodeThreadPoolInfo
, nodeThreadPoolsManagement :: NodeThreadPoolInfo
, nodeThreadPoolsPercolate :: NodeThreadPoolInfo
, nodeThreadPoolsPercolate :: Maybe NodeThreadPoolInfo
, nodeThreadPoolsListener :: Maybe NodeThreadPoolInfo
, nodeThreadPoolsFetchShardStarted :: Maybe NodeThreadPoolInfo
, nodeThreadPoolsSearch :: NodeThreadPoolInfo
, nodeThreadPoolsFlush :: NodeThreadPoolInfo
, nodeThreadPoolsWarmer :: NodeThreadPoolInfo
, nodeThreadPoolsOptimize :: NodeThreadPoolInfo
, nodeThreadPoolsOptimize :: Maybe NodeThreadPoolInfo
, nodeThreadPoolsBulk :: NodeThreadPoolInfo
, nodeThreadPoolsSuggest :: NodeThreadPoolInfo
, nodeThreadPoolsSuggest :: Maybe NodeThreadPoolInfo
, nodeThreadPoolsMerge :: NodeThreadPoolInfo
, nodeThreadPoolsSnapshot :: NodeThreadPoolInfo
, nodeThreadPoolsGet :: NodeThreadPoolInfo
@ -4300,11 +4070,12 @@ newtype PID = PID {
} deriving (Eq, Show, Generic, Typeable, FromJSON)
data NodeOSInfo = NodeOSInfo {
nodeOSSwap :: Bytes
, nodeOSMem :: Bytes
, nodeOSCPUInfo :: CPUInfo
nodeOSRefreshInterval :: NominalDiffTime
, nodeOSName :: Text
, nodeOSArch :: Text
, nodeOSVersion :: VersionNumber
, nodeOSAvailableProcessors :: Int
, nodeOSRefreshInterval :: NominalDiffTime
, nodeOSAllocatedProcessors :: Int
} deriving (Eq, Show, Generic, Typeable)
data CPUInfo = CPUInfo {
@ -4320,7 +4091,7 @@ data CPUInfo = CPUInfo {
data NodeProcessInfo = NodeProcessInfo {
nodeProcessMLockAll :: Bool
-- ^ See <https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration.html>
, nodeProcessMaxFileDescriptors :: Int
, nodeProcessMaxFileDescriptors :: Maybe Int
, nodeProcessId :: PID
, nodeProcessRefreshInterval :: NominalDiffTime
} deriving (Eq, Show, Generic, Typeable)
@ -4725,7 +4496,7 @@ instance FromJSON NodeDataPathStats where
<*> o .: "free_in_bytes"
<*> o .: "total_in_bytes"
<*> o .:? "type"
<*> o .: "dev"
<*> o .:? "dev"
<*> o .: "mount"
<*> o .: "path"
@ -4772,21 +4543,21 @@ instance FromJSON NodeThreadPoolsStats where
where
parse o = NodeThreadPoolsStats <$> o .: "snapshot"
<*> o .: "bulk"
<*> o .: "merge"
<*> o .: "force_merge"
<*> o .: "get"
<*> o .: "management"
<*> o .:? "fetch_shard_store"
<*> o .: "optimize"
<*> o .:? "optimize"
<*> o .: "flush"
<*> o .: "search"
<*> o .: "warmer"
<*> o .: "generic"
<*> o .: "suggest"
<*> o .:? "suggest"
<*> o .: "refresh"
<*> o .: "index"
<*> o .:? "listener"
<*> o .:? "fetch_shard_started"
<*> o .: "percolate"
<*> o .:? "percolate"
instance FromJSON NodeThreadPoolStats where
parseJSON = withObject "NodeThreadPoolStats" parse
where
@ -4859,15 +4630,12 @@ instance FromJSON NodeProcessStats where
parse o = do
mem <- o .: "mem"
cpu <- o .: "cpu"
NodeProcessStats <$> mem .: "total_virtual_in_bytes"
<*> mem .: "share_in_bytes"
<*> mem .: "resident_in_bytes"
<*> (unMS <$> cpu .: "total_in_millis")
<*> (unMS <$> cpu .: "user_in_millis")
<*> (unMS <$> cpu .: "sys_in_millis")
<*> cpu .: "percent"
NodeProcessStats <$> (posixMS <$> o .: "timestamp")
<*> o .: "open_file_descriptors"
<*> (posixMS <$> o .: "timestamp")
<*> o .: "max_file_descriptors"
<*> cpu .: "percent"
<*> (unMS <$> cpu .: "total_in_millis")
<*> mem .: "total_virtual_in_bytes"
instance FromJSON NodeOSStats where
parseJSON = withObject "NodeOSStats" parse
@ -4877,22 +4645,17 @@ instance FromJSON NodeOSStats where
mem <- o .: "mem"
cpu <- o .: "cpu"
load <- o .:? "load_average"
NodeOSStats <$> swap .: "free_in_bytes"
<*> swap .: "used_in_bytes"
<*> mem .: "actual_used_in_bytes"
<*> mem .: "actual_free_in_bytes"
<*> mem .: "used_percent"
NodeOSStats <$> (posixMS <$> o .: "timestamp")
<*> cpu .: "percent"
<*> pure load
<*> mem .: "total_in_bytes"
<*> mem .: "free_in_bytes"
<*> mem .: "free_percent"
<*> mem .: "used_in_bytes"
<*> mem .: "free_in_bytes"
<*> cpu .: "stolen"
<*> cpu .: "usage"
<*> cpu .: "idle"
<*> cpu .: "user"
<*> cpu .: "sys"
<*> pure load
<*> (unMS <$> o .: "uptime_in_millis")
<*> (posixMS <$> o .: "timestamp")
<*> mem .: "used_percent"
<*> swap .: "total_in_bytes"
<*> swap .: "free_in_bytes"
<*> swap .: "used_in_bytes"
instance FromJSON LoadAvgs where
parseJSON = withArray "LoadAvgs" parse
@ -4912,14 +4675,12 @@ instance FromJSON NodeIndicesStats where
Nothing -> pure Nothing
mRecovery <- o .:? "recovery"
mQueryCache <- o .:? "query_cache"
suggest <- o .: "suggest"
mSuggest <- o .:? "suggest"
translog <- o .: "translog"
segments <- o .: "segments"
completion <- o .: "completion"
percolate <- o .: "percolate"
mPercolate <- o .:? "percolate"
fielddata <- o .: "fielddata"
idCache <- o .: "id_cache"
filterCache <- o .: "filter_cache"
warmer <- o .: "warmer"
flush <- o .: "flush"
refresh <- o .: "refresh"
@ -4936,9 +4697,9 @@ instance FromJSON NodeIndicesStats where
<*> mQueryCache .:: "hit_count"
<*> mQueryCache .:: "evictions"
<*> mQueryCache .:: "memory_size_in_bytes"
<*> suggest .: "current"
<*> (unMS <$> suggest .: "time_in_millis")
<*> suggest .: "total"
<*> mSuggest .:: "current"
<*> (fmap unMS <$> mSuggest .:: "time_in_millis")
<*> mSuggest .:: "total"
<*> translog .: "size_in_bytes"
<*> translog .: "operations"
<*> segments .:? "fixed_bit_set_memory_in_bytes"
@ -4948,16 +4709,13 @@ instance FromJSON NodeIndicesStats where
<*> segments .: "memory_in_bytes"
<*> segments .: "count"
<*> completion .: "size_in_bytes"
<*> percolate .: "queries"
<*> percolate .: "memory_size_in_bytes"
<*> percolate .: "current"
<*> (unMS <$> percolate .: "time_in_millis")
<*> percolate .: "total"
<*> mPercolate .:: "queries"
<*> mPercolate .:: "memory_size_in_bytes"
<*> mPercolate .:: "current"
<*> (fmap unMS <$> mPercolate .:: "time_in_millis")
<*> mPercolate .:: "total"
<*> fielddata .: "evictions"
<*> fielddata .: "memory_size_in_bytes"
<*> idCache .: "memory_size_in_bytes"
<*> filterCache .: "evictions"
<*> filterCache .: "memory_size_in_bytes"
<*> (unMS <$> warmer .: "total_time_in_millis")
<*> warmer .: "total"
<*> warmer .: "current"
@ -5015,7 +4773,7 @@ parseNodeStats fnid o = do
<*> o .: "http"
<*> o .: "transport"
<*> o .: "fs"
<*> o .: "network"
<*> o .:? "network"
<*> o .: "thread_pool"
<*> o .: "jvm"
<*> o .: "process"
@ -5024,8 +4782,8 @@ parseNodeStats fnid o = do
parseNodeInfo :: FullNodeId -> Object -> Parser NodeInfo
parseNodeInfo nid o =
NodeInfo <$> o .: "http_address"
<*> o .: "build"
NodeInfo <$> o .:? "http_address"
<*> o .: "build_hash"
<*> o .: "version"
<*> o .: "ip"
<*> o .: "host"
@ -5035,7 +4793,7 @@ parseNodeInfo nid o =
<*> o .: "plugins"
<*> o .: "http"
<*> o .: "transport"
<*> o .: "network"
<*> o .:? "network"
<*> o .: "thread_pool"
<*> o .: "jvm"
<*> o .: "process"
@ -5067,13 +4825,12 @@ instance FromJSON NodeOSInfo where
parseJSON = withObject "NodeOSInfo" parse
where
parse o = do
swap <- o .: "swap"
mem <- o .: "mem"
NodeOSInfo <$> swap .: "total_in_bytes"
<*> mem .: "total_in_bytes"
<*> o .: "cpu"
NodeOSInfo <$> (unMS <$> o .: "refresh_interval_in_millis")
<*> o .: "name"
<*> o .: "arch"
<*> o .: "version"
<*> o .: "available_processors"
<*> (unMS <$> o .: "refresh_interval_in_millis")
<*> o .: "allocated_processors"
instance FromJSON CPUInfo where
@ -5091,7 +4848,7 @@ instance FromJSON NodeProcessInfo where
parseJSON = withObject "NodeProcessInfo" parse
where
parse o = NodeProcessInfo <$> o .: "mlockall"
<*> o .: "max_file_descriptors"
<*> o .:? "max_file_descriptors"
<*> o .: "id"
<*> (unMS <$> o .: "refresh_interval_in_millis")
@ -5127,16 +4884,16 @@ instance FromJSON NodeThreadPoolsInfo where
where
parse o = NodeThreadPoolsInfo <$> o .: "refresh"
<*> o .: "management"
<*> o .: "percolate"
<*> o .:? "percolate"
<*> o .:? "listener"
<*> o .:? "fetch_shard_started"
<*> o .: "search"
<*> o .: "flush"
<*> o .: "warmer"
<*> o .: "optimize"
<*> o .:? "optimize"
<*> o .: "bulk"
<*> o .: "suggest"
<*> o .: "merge"
<*> o .:? "suggest"
<*> o .: "force_merge"
<*> o .: "snapshot"
<*> o .: "get"
<*> o .:? "fetch_shard_store"

View File

@ -1,15 +0,0 @@
flags: {}
packages:
- '.'
extra-deps:
- quickcheck-properties-0.1
# - http-client-0.5.0
# - fail-4.9.0.0
# - http-types-0.9
# - attoparsec-0.13.0.1
# - doctest-0.10.1
# - semigroups-0.18.0.1
# - uri-bytestring-0.1.9
# - temporary-resourcet-0.1.0.0
resolver: nightly-2016-08-20

View File

@ -1 +1,15 @@
stack-7.10.yaml
flags: {}
packages:
- '.'
extra-deps:
- quickcheck-properties-0.1
# - http-client-0.5.0
# - fail-4.9.0.0
# - http-types-0.9
# - attoparsec-0.13.0.1
# - doctest-0.10.1
# - semigroups-0.18.0.1
# - uri-bytestring-0.1.9
# - temporary-resourcet-0.1.0.0
resolver: nightly-2016-08-20

View File

@ -47,7 +47,7 @@ import qualified Network.HTTP.Types.Status as NHTS
import qualified Network.URI as URI
import Prelude hiding (filter)
import System.IO.Temp
import System.Posix.Files
import System.PosixCompat.Files
import Test.Hspec
import Test.QuickCheck.Property.Monoid (T (..), eq, prop_Monoid)
@ -100,6 +100,9 @@ es16 = Vers.Version [1, 6, 0] []
es20 :: Vers.Version
es20 = Vers.Version [2, 0, 0] []
es50 :: Vers.Version
es50 = Vers.Version [5, 0, 0] []
getServerVersion :: IO (Maybe Vers.Version)
getServerVersion = fmap extractVersion <$> withTestEnv getStatus
where
@ -179,13 +182,16 @@ data ParentMapping = ParentMapping deriving (Eq, Show)
instance ToJSON ParentMapping where
toJSON ParentMapping =
object ["properties" .=
object [ "user" .= object ["type" .= ("string" :: Text)]
object [ "user" .= object ["type" .= ("string" :: Text)
, "fielddata" .= True
]
-- Serializing the date as a date is breaking other tests, mysteriously.
-- , "postDate" .= object [ "type" .= ("date" :: Text)
-- , "format" .= ("YYYY-MM-dd`T`HH:mm:ss.SSSZZ" :: Text)]
, "message" .= object ["type" .= ("string" :: Text)]
, "age" .= object ["type" .= ("integer" :: Text)]
, "location" .= object ["type" .= ("geo_point" :: Text)]
, "extra" .= object ["type" .= ("keyword" :: Text)]
]]
data ChildMapping = ChildMapping deriving (Eq, Show)
@ -194,28 +200,35 @@ instance ToJSON ChildMapping where
toJSON ChildMapping =
object ["_parent" .= object ["type" .= ("parent" :: Text)]
, "properties" .=
object [ "user" .= object ["type" .= ("string" :: Text)]
object [ "user" .= object ["type" .= ("string" :: Text)
, "fielddata" .= True
]
-- Serializing the date as a date is breaking other tests, mysteriously.
-- , "postDate" .= object [ "type" .= ("date" :: Text)
-- , "format" .= ("YYYY-MM-dd`T`HH:mm:ss.SSSZZ" :: Text)]
, "message" .= object ["type" .= ("string" :: Text)]
, "age" .= object ["type" .= ("integer" :: Text)]
, "location" .= object ["type" .= ("geo_point" :: Text)]
, "extra" .= object ["type" .= ("keyword" :: Text)]
]]
data TweetMapping = TweetMapping deriving (Eq, Show)
instance ToJSON TweetMapping where
toJSON TweetMapping =
object ["properties" .=
object [ "user" .= object ["type" .= ("string" :: Text)]
-- Serializing the date as a date is breaking other tests, mysteriously.
-- , "postDate" .= object [ "type" .= ("date" :: Text)
-- , "format" .= ("YYYY-MM-dd`T`HH:mm:ss.SSSZZ" :: Text)]
, "message" .= object ["type" .= ("string" :: Text)]
, "age" .= object ["type" .= ("integer" :: Text)]
, "location" .= object ["type" .= ("geo_point" :: Text)]
]]
object ["tweet" .=
object ["properties" .=
object [ "user" .= object [ "type" .= ("string" :: Text)
, "fielddata" .= True
]
-- Serializing the date as a date is breaking other tests, mysteriously.
-- , "postDate" .= object [ "type" .= ("date" :: Text)
-- , "format" .= ("YYYY-MM-dd`T`HH:mm:ss.SSSZZ" :: Text)]
, "message" .= object ["type" .= ("string" :: Text)]
, "age" .= object ["type" .= ("integer" :: Text)]
, "location" .= object ["type" .= ("geo_point" :: Text)]
, "extra" .= object ["type" .= ("keyword" :: Text)]
]]]
exampleTweet :: Tweet
exampleTweet = Tweet { user = "bitemyapp"
@ -543,7 +556,6 @@ instance ApproxEq HasChildQuery
instance ApproxEq FuzzyQuery
instance ApproxEq FuzzyLikeFieldQuery
instance ApproxEq FuzzyLikeThisQuery
instance ApproxEq FilteredQuery
instance ApproxEq DisMaxQuery
instance ApproxEq CommonTermsQuery
instance ApproxEq CommonMinimumMatch
@ -577,7 +589,7 @@ instance ApproxEq BoolMatch
instance ApproxEq MultiMatchQuery
instance ApproxEq IndexSettings
instance ApproxEq AllocationPolicy
instance ApproxEq Char
instance ApproxEq Char where (=~) = (==)
instance ApproxEq Vers.Version where
(=~) = (==)
instance (ApproxEq a, Show a) => ApproxEq [a] where
@ -722,10 +734,8 @@ instance Arbitrary Query where
, QueryBoolQuery <$> arbitrary
, QueryBoostingQuery <$> arbitrary
, QueryCommonTermsQuery <$> arbitrary
, ConstantScoreFilter <$> arbitrary <*> arbitrary
, ConstantScoreQuery <$> arbitrary <*> arbitrary
, QueryDisMaxQuery <$> arbitrary
, QueryFilteredQuery <$> arbitrary
, QueryFuzzyLikeThisQuery <$> arbitrary
, QueryFuzzyLikeFieldQuery <$> arbitrary
, QueryFuzzyQuery <$> arbitrary
@ -746,24 +756,7 @@ instance Arbitrary Query where
shrink = genericShrink
instance Arbitrary Filter where
arbitrary = reduceSize $ oneof [ AndFilter <$> arbitrary <*> arbitrary
, OrFilter <$> arbitrary <*> arbitrary
, NotFilter <$> arbitrary <*> arbitrary
, pure IdentityFilter
, BoolFilter <$> arbitrary
, ExistsFilter <$> arbitrary
, GeoBoundingBoxFilter <$> arbitrary
, GeoDistanceFilter <$> arbitrary <*> arbitrary <*> arbitrary <*> arbitrary <*> arbitrary
, GeoDistanceRangeFilter <$> arbitrary <*> arbitrary
, GeoPolygonFilter <$> arbitrary <*> arbitrary
, IdsFilter <$> arbitrary <*> arbitrary
, LimitFilter <$> arbitrary
, MissingFilter <$> arbitrary <*> arbitrary <*> arbitrary
, PrefixFilter <$> arbitrary <*> arbitrary <*> arbitrary
, QueryFilter <$> arbitrary <*> arbitrary
, RangeFilter <$> arbitrary <*> arbitrary <*> arbitrary <*> arbitrary
, RegexpFilter <$> arbitrary <*> arbitrary <*> arbitrary <*> arbitrary <*> arbitrary <*> arbitrary
, TermFilter <$> arbitrary <*> arbitrary]
arbitrary = Filter <$> arbitrary
shrink = genericShrink
instance Arbitrary ReplicaBounds where
@ -855,7 +848,6 @@ $(derive makeArbitrary ''HasChildQuery)
$(derive makeArbitrary ''FuzzyQuery)
$(derive makeArbitrary ''FuzzyLikeFieldQuery)
$(derive makeArbitrary ''FuzzyLikeThisQuery)
$(derive makeArbitrary ''FilteredQuery)
$(derive makeArbitrary ''DisMaxQuery)
$(derive makeArbitrary ''CommonTermsQuery)
$(derive makeArbitrary ''DistanceRange)
@ -1028,7 +1020,7 @@ main = hspec $ do
it "returns document for term query and identity filter" $ withTestEnv $ do
_ <- insertData
let query = TermQuery (Term "user" "bitemyapp") Nothing
let filter = IdentityFilter <&&> IdentityFilter
let filter = Filter $ MatchAllQuery Nothing
let search = mkSearch (Just query) (Just filter)
myTweet <- searchTweet search
liftIO $
@ -1038,26 +1030,16 @@ main = hspec $ do
_ <- insertData
let query = TermsQuery "user" ("bitemyapp" :| [])
let cfQuery = ConstantScoreQuery query (Boost 1.0)
let filter = IdentityFilter
let filter = Filter $ MatchAllQuery Nothing
let search = mkSearch (Just cfQuery) (Just filter)
myTweet <- searchTweet search
liftIO $
myTweet `shouldBe` Right exampleTweet
it "handles constant score filters" $ withTestEnv $ do
_ <- insertData
let query = TermsQuery "user" ("bitemyapp" :| [])
let cfFilter = ConstantScoreFilter IdentityFilter (Boost 1.0)
let boolQuery = mkBoolQuery [query, cfFilter] [] []
let search = mkSearch (Just (QueryBoolQuery boolQuery)) Nothing
myTweet <- searchTweet search
liftIO $
myTweet `shouldBe` Right exampleTweet
it "returns document for terms query and identity filter" $ withTestEnv $ do
_ <- insertData
let query = TermsQuery "user" ("bitemyapp" :| [])
let filter = IdentityFilter <&&> IdentityFilter
let filter = Filter $ MatchAllQuery Nothing
let search = mkSearch (Just query) (Just filter)
myTweet <- searchTweet search
liftIO $
@ -1085,7 +1067,7 @@ main = hspec $ do
let innerQuery = QueryMatchQuery $
mkMatchQuery (FieldName "user") (QueryString "bitemyapp")
let query = QueryBoolQuery $
mkBoolQuery [innerQuery] [] []
mkBoolQuery [innerQuery] [] [] []
let search = mkSearch (Just query) Nothing
myTweet <- searchTweet search
liftIO $
@ -1120,173 +1102,13 @@ main = hspec $ do
_ <- insertOther
let sortSpec = DefaultSortSpec $ mkSort (FieldName "age") Ascending
let search = Search Nothing
(Just IdentityFilter) (Just [sortSpec]) Nothing Nothing
Nothing (Just [sortSpec]) Nothing Nothing
False (From 0) (Size 10) SearchTypeQueryThenFetch Nothing Nothing
result <- searchTweets search
let myTweet = grabFirst result
liftIO $
myTweet `shouldBe` Right otherTweet
describe "filtering API" $ do
it "returns document for composed boolmatch and identity" $ withTestEnv $ do
_ <- insertData
let queryFilter = BoolFilter (MustMatch (Term "user" "bitemyapp") False)
<&&> IdentityFilter
let search = mkSearch Nothing (Just queryFilter)
myTweet <- searchTweet search
liftIO $
myTweet `shouldBe` Right exampleTweet
it "returns document for term filter" $ withTestEnv $ do
_ <- insertData
let termFilter = TermFilter (Term "user" "bitemyapp") False
let search = mkSearch Nothing (Just termFilter)
myTweet <- searchTweet search
liftIO $
myTweet `shouldBe` Right exampleTweet
it "returns document for existential filter" $ withTestEnv $ do
_ <- insertData
let search = mkSearch Nothing (Just (ExistsFilter (FieldName "user")))
myTweet <- searchTweet search
liftIO $
myTweet `shouldBe` Right exampleTweet
it "returns document for geo boundingbox filter" $ withTestEnv $ do
_ <- insertData
let box = GeoBoundingBox (LatLon 40.73 (-74.1)) (LatLon 40.10 (-71.12))
let bbConstraint = GeoBoundingBoxConstraint (FieldName "tweet.location") box False GeoFilterMemory
let geoFilter = GeoBoundingBoxFilter bbConstraint
let search = mkSearch Nothing (Just geoFilter)
myTweet <- searchTweet search
liftIO $
myTweet `shouldBe` Right exampleTweet
it "doesn't return document for nonsensical boundingbox filter" $ withTestEnv $ do
_ <- insertData
let box = GeoBoundingBox (LatLon 0.73 (-4.1)) (LatLon 0.10 (-1.12))
let bbConstraint = GeoBoundingBoxConstraint (FieldName "tweet.location") box False GeoFilterMemory
let geoFilter = GeoBoundingBoxFilter bbConstraint
let search = mkSearch Nothing (Just geoFilter)
searchExpectNoResults search
it "returns document for geo distance filter" $ withTestEnv $ do
_ <- insertData
let geoPoint = GeoPoint (FieldName "tweet.location") (LatLon 40.12 (-71.34))
let distance = Distance 10.0 Miles
let optimizeBbox = OptimizeGeoFilterType GeoFilterMemory
let geoFilter = GeoDistanceFilter geoPoint distance SloppyArc optimizeBbox False
let search = mkSearch Nothing (Just geoFilter)
myTweet <- searchTweet search
liftIO $
myTweet `shouldBe` Right exampleTweet
it "returns document for geo distance range filter" $ withTestEnv $ do
_ <- insertData
let geoPoint = GeoPoint (FieldName "tweet.location") (LatLon 40.12 (-71.34))
let distanceRange = DistanceRange (Distance 0.0 Miles) (Distance 10.0 Miles)
let geoFilter = GeoDistanceRangeFilter geoPoint distanceRange
let search = mkSearch Nothing (Just geoFilter)
myTweet <- searchTweet search
liftIO $
myTweet `shouldBe` Right exampleTweet
it "doesn't return document for wild geo distance range filter" $ withTestEnv $ do
_ <- insertData
let geoPoint = GeoPoint (FieldName "tweet.location") (LatLon 40.12 (-71.34))
let distanceRange = DistanceRange (Distance 100.0 Miles) (Distance 1000.0 Miles)
let geoFilter = GeoDistanceRangeFilter geoPoint distanceRange
let search = mkSearch Nothing (Just geoFilter)
searchExpectNoResults search
it "returns document for geo polygon filter" $ withTestEnv $ do
_ <- insertData
let points = [LatLon 40.0 (-70.00),
LatLon 40.0 (-72.00),
LatLon 41.0 (-70.00),
LatLon 41.0 (-72.00)]
let geoFilter = GeoPolygonFilter (FieldName "tweet.location") points
let search = mkSearch Nothing (Just geoFilter)
myTweet <- searchTweet search
liftIO $
myTweet `shouldBe` Right exampleTweet
it "doesn't return document for bad geo polygon filter" $ withTestEnv $ do
_ <- insertData
let points = [LatLon 40.0 (-70.00),
LatLon 40.0 (-71.00),
LatLon 41.0 (-70.00),
LatLon 41.0 (-71.00)]
let geoFilter = GeoPolygonFilter (FieldName "tweet.location") points
let search = mkSearch Nothing (Just geoFilter)
searchExpectNoResults search
it "returns document for ids filter" $ withTestEnv $ do
_ <- insertData
let filter = IdsFilter (MappingName "tweet") [DocId "1"]
let search = mkSearch Nothing (Just filter)
myTweet <- searchTweet search
liftIO $
myTweet `shouldBe` Right exampleTweet
it "returns document for Double range filter" $ withTestEnv $ do
_ <- insertData
let filter = RangeFilter (FieldName "age")
(RangeDoubleGtLt (GreaterThan 1000.0) (LessThan 100000.0))
RangeExecutionIndex False
let search = mkSearch Nothing (Just filter)
myTweet <- searchTweet search
liftIO $
myTweet `shouldBe` Right exampleTweet
it "returns document for UTCTime date filter" $ withTestEnv $ do
_ <- insertData
let filter = RangeFilter (FieldName "postDate")
(RangeDateGtLt
(GreaterThanD (UTCTime
(ModifiedJulianDay 54000)
(secondsToDiffTime 0)))
(LessThanD (UTCTime
(ModifiedJulianDay 55000)
(secondsToDiffTime 11))))
RangeExecutionIndex False
let search = mkSearch Nothing (Just filter)
myTweet <- searchTweet search
liftIO $
myTweet `shouldBe` Right exampleTweet
it "returns document for regexp filter" $ withTestEnv $ do
_ <- insertData
let filter = RegexpFilter (FieldName "user") (Regexp "bite.*app")
AllRegexpFlags (CacheName "test") False (CacheKey "key")
let search = mkSearch Nothing (Just filter)
myTweet <- searchTweet search
liftIO $
myTweet `shouldBe` Right exampleTweet
it "doesn't return document for non-matching regexp filter" $ withTestEnv $ do
_ <- insertData
let filter = RegexpFilter (FieldName "user")
(Regexp "boy") AllRegexpFlags
(CacheName "test") False (CacheKey "key")
let search = mkSearch Nothing (Just filter)
searchExpectNoResults search
it "returns document for query filter, uncached" $ withTestEnv $ do
_ <- insertData
let filter = QueryFilter (TermQuery (Term "user" "bitemyapp") Nothing) True
search = mkSearch Nothing (Just filter)
myTweet <- searchTweet search
liftIO $ myTweet `shouldBe` Right exampleTweet
it "returns document for query filter, cached" $ withTestEnv $ do
_ <- insertData
let filter = QueryFilter (TermQuery (Term "user" "bitemyapp") Nothing) False
search = mkSearch Nothing (Just filter)
myTweet <- searchTweet search
liftIO $ myTweet `shouldBe` Right exampleTweet
describe "Aggregation API" $ do
it "returns term aggregation results" $ withTestEnv $ do
_ <- insertData
@ -1340,18 +1162,6 @@ main = hspec $ do
, docCountPair "bogus_count" 0
]))
it "can execute filter aggregations" $ withTestEnv $ do
_ <- insertData
_ <- insertOther
let ags = mkAggregations "bitemyapps" (FilterAgg (FilterAggregation (TermFilter (Term "user" "bitemyapp") defaultCache) Nothing)) <>
mkAggregations "notmyapps" (FilterAgg (FilterAggregation (TermFilter (Term "user" "notmyapp") defaultCache) Nothing))
let search = mkAggregateSearch Nothing ags
let docCountPair k n = (k, object ["doc_count" .= Number n])
res <- searchTweets search
liftIO $
fmap aggregations res `shouldBe` Right (Just (M.fromList [ docCountPair "bitemyapps" 1
, docCountPair "notmyapps" 1
]))
it "can execute date_range aggregations" $ withTestEnv $ do
let now = fromGregorian 2015 3 14
let ltAMonthAgo = UTCTime (fromGregorian 2015 3 1) 0
@ -1397,18 +1207,6 @@ main = hspec $ do
searchExpectAggs search
searchValidBucketAgg search "byDate" toDateHistogram
it "returns date histogram using fractional date" $ withTestEnv $ do
_ <- insertData
let periods = [Year, Quarter, Month, Week, Day, Hour, Minute, Second]
let fractionals = map (FractionalInterval 1.5) [Weeks, Days, Hours, Minutes, Seconds]
let intervals = periods ++ fractionals
let histogram = mkDateHistogram (FieldName "postDate")
let search interval = mkAggregateSearch Nothing $ mkAggregations "byDate" $ DateHistogramAgg (histogram interval)
let expect interval = searchExpectAggs (search interval)
let valid interval = searchValidBucketAgg (search interval) "byDate" toDateHistogram
forM_ intervals expect
forM_ intervals valid
it "can execute missing aggregations" $ withTestEnv $ do
_ <- insertData
_ <- insertExtra
@ -1424,7 +1222,7 @@ main = hspec $ do
it "returns highlight from query when there should be one" $ withTestEnv $ do
_ <- insertData
_ <- insertOther
let query = QueryMatchQuery $ mkMatchQuery (FieldName "_all") (QueryString "haskell")
let query = QueryMatchQuery $ mkMatchQuery (FieldName "message") (QueryString "haskell")
let testHighlight = Highlights Nothing [FieldHighlight (FieldName "message") Nothing]
let search = mkHighlightSearch (Just query) testHighlight
@ -1435,7 +1233,7 @@ main = hspec $ do
it "doesn't return highlight from a query when it shouldn't" $ withTestEnv $ do
_ <- insertData
_ <- insertOther
let query = QueryMatchQuery $ mkMatchQuery (FieldName "_all") (QueryString "haskell")
let query = QueryMatchQuery $ mkMatchQuery (FieldName "message") (QueryString "haskell")
let testHighlight = Highlights Nothing [FieldHighlight (FieldName "user") Nothing]
let search = mkHighlightSearch (Just query) testHighlight
@ -1691,27 +1489,6 @@ main = hspec $ do
L.find ((== alias) . indexAliasSummaryAlias) summs `shouldBe` Just expected
Left e -> expectationFailure ("Expected an IndexAliasesSummary but got " <> show e)) `finally` cleanup
it "handles an alias with routing and a filter" $ do
let alias = IndexAlias (testIndex) (IndexAliasName (IndexName "bloodhound-tests-twitter-1-alias"))
let sar = SearchAliasRouting (RoutingValue "search val" :| [])
let iar = IndexAliasRouting (RoutingValue "index val")
let routing = GranularAliasRouting (Just sar) (Just iar)
let filter = LimitFilter 42
let create = IndexAliasCreate (Just routing) (Just filter)
let action = AddAlias alias create
withTestEnv $ do
resetIndex
resp <- updateIndexAliases (action :| [])
liftIO $ validateStatus resp 200
let cleanup = withTestEnv (updateIndexAliases (RemoveAlias alias :| []))
(do aliases <- withTestEnv getIndexAliases
let expected = IndexAliasSummary alias create
case aliases of
Right (IndexAliasesSummary summs) ->
L.find ((== alias) . indexAliasSummaryAlias) summs `shouldBe` Just expected
Left e -> expectationFailure ("Expected an IndexAliasesSummary but got " <> show e)) `finally` cleanup
describe "Index Listing" $ do
it "returns a list of index names" $ withTestEnv $ do
_ <- createExampleIndex
@ -1735,7 +1512,7 @@ main = hspec $ do
describe "Index Optimization" $ do
it "returns a successful response upon completion" $ withTestEnv $ do
_ <- createExampleIndex
resp <- optimizeIndex (IndexList (testIndex :| [])) defaultIndexOptimizationSettings
resp <- forceMergeIndex (IndexList (testIndex :| [])) defaultForceMergeIndexSettings
liftIO $ validateStatus resp 200
describe "JSON instances" $ do
@ -1801,7 +1578,6 @@ main = hspec $ do
propJSON (Proxy :: Proxy FuzzyQuery)
propJSON (Proxy :: Proxy FuzzyLikeFieldQuery)
propJSON (Proxy :: Proxy FuzzyLikeThisQuery)
propJSON (Proxy :: Proxy FilteredQuery)
propJSON (Proxy :: Proxy DisMaxQuery)
propJSON (Proxy :: Proxy CommonTermsQuery)
propJSON (Proxy :: Proxy CommonMinimumMatch)