mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-14 08:02:15 +03:00
Bugfix to support 0-size HASURA_GRAPHQL_QUERY_PLAN_CACHE_SIZE
Also some minor refactoring of bounded cache module: - the maxBound check in `trim` was confusing and unnecessary - consequently trim was unnecessary for lookupPure Also add some basic tests
This commit is contained in:
parent
0ae5384115
commit
3a6b2ec744
@ -6,6 +6,7 @@
|
||||
|
||||
(Add entries here in the order of: server, console, cli, docs, others)
|
||||
|
||||
- server: bugfix to allow HASURA_GRAPHQL_QUERY_PLAN_CACHE_SIZE of 0 (#5363)
|
||||
- console: update sidebar icons for different action and trigger types
|
||||
|
||||
## `v1.3.0`
|
||||
|
@ -467,6 +467,7 @@ test-suite graphql-engine-tests
|
||||
Hasura.RQL.MetadataSpec
|
||||
Hasura.Server.MigrateSpec
|
||||
Hasura.Server.TelemetrySpec
|
||||
Hasura.CacheBoundedSpec
|
||||
Hasura.Server.AuthSpec
|
||||
|
||||
-- Benchmarks related to caching (e.g. the plan cache).
|
||||
|
@ -7,12 +7,15 @@ module Hasura.Cache
|
||||
|
||||
import Hasura.Prelude hiding (lookup)
|
||||
|
||||
import Control.Concurrent (getNumCapabilities)
|
||||
|
||||
import Hasura.Cache.Types
|
||||
import qualified Hasura.Cache.Bounded as B
|
||||
import qualified Hasura.Cache.Unbounded as U
|
||||
|
||||
initialise :: (Hashable k, Ord k) => Maybe B.CacheSize -> IO (Cache k v)
|
||||
initialise cacheSizeM =
|
||||
initialise cacheSizeM = do
|
||||
stripes <- getNumCapabilities
|
||||
case cacheSizeM of
|
||||
Nothing -> Cache <$> U.initialise
|
||||
Just cacheSize -> Cache <$> B.initialise cacheSize
|
||||
Just cacheSize -> Cache <$> B.initialise stripes cacheSize
|
||||
|
@ -1,3 +1,4 @@
|
||||
{-# LANGUAGE RecordWildCards #-}
|
||||
{-| An in-memory, Bounded by LRU strategy, capability-local cache implementation.
|
||||
By making the cache capability-local, data may be recomputed up to once per
|
||||
capability (which usually means up to once per OS thread), but write contention
|
||||
@ -10,12 +11,17 @@ module Hasura.Cache.Bounded
|
||||
|
||||
, initialise
|
||||
, insertAllStripes
|
||||
|
||||
-- * Exposed for testing
|
||||
, checkInvariants
|
||||
, getEntriesRecency
|
||||
, CacheObj(..)
|
||||
) where
|
||||
|
||||
import Hasura.Prelude hiding (lookup)
|
||||
import Hasura.Cache.Types
|
||||
|
||||
import Control.Concurrent (getNumCapabilities, myThreadId, threadCapability)
|
||||
import Control.Concurrent (myThreadId, threadCapability)
|
||||
import Data.Word (Word16)
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
@ -24,16 +30,23 @@ import qualified Data.IORef as IORef
|
||||
import qualified Data.Vector as V
|
||||
import GHC.Natural (Natural)
|
||||
|
||||
-- MISC TODO:
|
||||
-- - benchmark and consider unsafeLookupIncreasePriority and unsafeInsertIncreasePriorityView
|
||||
-- - our own concurrent cache, that doesn't need redundant stripes
|
||||
-- - this would save significant memory
|
||||
-- - we could choose something more sophisticated than LRU if warranted
|
||||
-- - we could probably keep things simple by evicting on hash collisions
|
||||
|
||||
newtype CacheSize
|
||||
= CacheSize { unCacheSize :: Word16 }
|
||||
deriving (Show, Read, Eq, Ord, Bounded, Num, Enum, J.ToJSON, J.FromJSON)
|
||||
deriving (Show, Read, Eq, Ord, Bounded, Num, Real, Integral, Enum, J.ToJSON, J.FromJSON)
|
||||
|
||||
parseCacheSize :: String -> Either String CacheSize
|
||||
parseCacheSize v =
|
||||
-- NOTE: naively using readMaybe Word16 will silently wrap
|
||||
case readMaybe v :: Maybe Natural of
|
||||
Just n | n <= max16 && n > 0 -> return (CacheSize $ fromIntegral n)
|
||||
_ -> throwError "cache size must be given as a number between 1 and 65535"
|
||||
Just n | n <= max16 && n >= 0 -> return (CacheSize $ fromIntegral n)
|
||||
_ -> throwError "cache size must be given as a number between 0 and 65535"
|
||||
where
|
||||
max16 = fromIntegral (maxBound :: Word16) :: Natural
|
||||
|
||||
@ -47,10 +60,11 @@ data LruCache k v = LruCache
|
||||
{ _lcCapacity :: !CacheSize
|
||||
-- ^ The maximum number of elements in the cache
|
||||
, _lcSize :: !CacheSize
|
||||
-- ^ The current number of elements in the cache
|
||||
-- ^ The current number of elements in the cache. We maintain this ourselves
|
||||
-- since 'HashPSQ.size' is O(n).
|
||||
, _lcTick :: !Tick
|
||||
-- ^ The priority is drawn from this tick, it is incremented
|
||||
-- after every access
|
||||
-- after insert or successful lookup
|
||||
, _lcQueue :: !(HashPSQ.HashPSQ k Tick v)
|
||||
-- ^ Underlying priority queue
|
||||
} deriving (Show, Eq, Functor, Foldable, Traversable)
|
||||
@ -65,27 +79,22 @@ emptyCache capacity =
|
||||
, _lcQueue = HashPSQ.empty
|
||||
}
|
||||
|
||||
-- | Restore 'LruCache' invariants
|
||||
trim :: (Hashable k, Ord k) => LruCache k v -> LruCache k v
|
||||
trim c
|
||||
| _lcTick c == maxBound = emptyCache (_lcCapacity c)
|
||||
| _lcSize c > _lcCapacity c =
|
||||
c { _lcSize = _lcSize c - 1
|
||||
, _lcQueue = HashPSQ.deleteMin (_lcQueue c)
|
||||
}
|
||||
| otherwise = c
|
||||
|
||||
-- | Insert an element into the 'LruCache'.
|
||||
insertPure :: (Hashable k, Ord k) => k -> v -> LruCache k v -> LruCache k v
|
||||
insertPure key val c =
|
||||
trim $!
|
||||
-- NOTE: we assume any rollover of 64-bit counters here to be impossible:
|
||||
let (mbOldVal,queue) = HashPSQ.insertView key (_lcTick c) val (_lcQueue c)
|
||||
in c { _lcSize = if isNothing mbOldVal
|
||||
then _lcSize c + 1
|
||||
else _lcSize c
|
||||
, _lcTick = _lcTick c + 1
|
||||
, _lcQueue = queue
|
||||
}
|
||||
cTicked = c{ _lcTick = _lcTick c + 1 }
|
||||
in case mbOldVal of
|
||||
Nothing
|
||||
-- at capacity; remove LRU to maintain _lcSize:
|
||||
| _lcSize c == _lcCapacity c ->
|
||||
cTicked{ _lcQueue = HashPSQ.deleteMin queue }
|
||||
-- under capacity; just record new size after insert:
|
||||
| otherwise ->
|
||||
cTicked{ _lcQueue = queue, _lcSize = _lcSize c + 1 }
|
||||
_valueAlreadyInserted -> -- nothing inserted; _lcSize stays the same
|
||||
cTicked{ _lcQueue = queue }
|
||||
|
||||
-- | Lookup an element in an 'LruCache' and mark it as the least
|
||||
-- recently accessed.
|
||||
@ -94,7 +103,7 @@ lookupPure k c =
|
||||
case HashPSQ.alter lookupAndBump k (_lcQueue c) of
|
||||
(Nothing, _) -> Nothing
|
||||
(Just x, q) ->
|
||||
let !c' = trim $ c {_lcTick = _lcTick c + 1, _lcQueue = q}
|
||||
let !c' = c {_lcTick = _lcTick c + 1, _lcQueue = q}
|
||||
in Just (x, c')
|
||||
where
|
||||
lookupAndBump Nothing = (Nothing, Nothing)
|
||||
@ -149,12 +158,14 @@ instance (Hashable k, Ord k) => CacheObj (BoundedCache k v) k v where
|
||||
getEntries (BoundedCache localCaches) =
|
||||
mapM getLocalEntries $ V.toList localCaches
|
||||
|
||||
-- | Creates a new BoundedCache of the specified size,
|
||||
-- with one stripe per capability
|
||||
initialise :: CacheSize -> IO (BoundedCache k v)
|
||||
initialise capacity = do
|
||||
capabilities <- getNumCapabilities
|
||||
BoundedCache <$> V.replicateM capabilities (initLocalCache capacity)
|
||||
-- | Creates a new BoundedCache of the specified size, for each stripe
|
||||
initialise
|
||||
:: Int
|
||||
-- ^ Stripes; to minimize contention this should probably match the number of capabilities.
|
||||
-> CacheSize
|
||||
-> IO (BoundedCache k v)
|
||||
initialise stripes capacity = do
|
||||
BoundedCache <$> V.replicateM stripes (initLocalCache capacity)
|
||||
|
||||
|
||||
{-# INLINE getLocal #-}
|
||||
@ -175,3 +186,24 @@ insertAllStripes
|
||||
insertAllStripes k v (BoundedCache handles) = do
|
||||
forM_ handles $ \localHandle->
|
||||
insertLocal localHandle k v
|
||||
|
||||
-- | Check internal invariants, throwing an error if things are off:
|
||||
checkInvariants :: (Hashable k, Ord k)=> BoundedCache k v -> IO ()
|
||||
checkInvariants (BoundedCache handles) =
|
||||
forM_ handles $ \(LocalCacheRef ref) -> do
|
||||
LruCache{..} <- IORef.readIORef ref
|
||||
-- check internal invariants (in case we use unsafe* functions that rely on
|
||||
-- us maintaining HashPSQ invariants):
|
||||
unless (HashPSQ.valid _lcQueue) $
|
||||
error "invalid HashPSQ!"
|
||||
unless (HashPSQ.size _lcQueue == fromIntegral _lcSize) $
|
||||
error "Size incorrect!"
|
||||
unless (fromIntegral _lcSize <= _lcTick)$
|
||||
error "Somehow tick wasn't incremented properly!"
|
||||
when (_lcSize > _lcCapacity) $
|
||||
error "Size > capacity!"
|
||||
|
||||
getEntriesRecency :: (Hashable k, Ord k) => BoundedCache k v -> IO [[(k, Tick, v)]]
|
||||
getEntriesRecency (BoundedCache localCaches) =
|
||||
forM (V.toList localCaches) $ \(LocalCacheRef ref) ->
|
||||
HashPSQ.toList . _lcQueue <$> IORef.readIORef ref
|
||||
|
54
server/src-test/Hasura/CacheBoundedSpec.hs
Normal file
54
server/src-test/Hasura/CacheBoundedSpec.hs
Normal file
@ -0,0 +1,54 @@
|
||||
module Hasura.CacheBoundedSpec (spec) where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Hasura.Cache.Bounded as Cache
|
||||
import Test.Hspec
|
||||
|
||||
spec :: Spec
|
||||
spec = describe "Bounded cache data structure" $ do
|
||||
-- assume a single stripe here for simplicity:
|
||||
let checkEntries c expected = do
|
||||
Cache.checkInvariants c
|
||||
Cache.getEntriesRecency c >>= \case
|
||||
[es] -> do
|
||||
sort es `shouldBe` expected
|
||||
_ -> error "stripes wrong"
|
||||
|
||||
it "works for 0 size" $ do
|
||||
c <- Cache.initialise 1 0
|
||||
Cache.lookup 'X' c `shouldReturn` Nothing
|
||||
Cache.insert 'Y' 'Y' c
|
||||
Cache.lookup 'Y' c `shouldReturn` Nothing
|
||||
checkEntries c []
|
||||
|
||||
-- basic functionality check:
|
||||
it "seems to be working right" $ do
|
||||
c <- Cache.initialise 1 3
|
||||
|
||||
Cache.insert 'A' 'A' c
|
||||
checkEntries c [('A', 0, 'A')]
|
||||
|
||||
-- lookups of non-existing keys don't increment the ticket (though it
|
||||
-- wouldn't hurt if it did):
|
||||
Cache.lookup 'X' c `shouldReturn` Nothing
|
||||
|
||||
Cache.lookup 'A' c `shouldReturn` Just 'A'
|
||||
checkEntries c [('A', 1, 'A')]
|
||||
|
||||
Cache.insert 'B' 'B' c
|
||||
checkEntries c [('A', 1, 'A'), ('B', 2, 'B')]
|
||||
|
||||
Cache.lookup 'B' c `shouldReturn` Just 'B'
|
||||
checkEntries c [('A', 1, 'A'), ('B', 3, 'B')]
|
||||
|
||||
Cache.lookup 'A' c `shouldReturn` Just 'A'
|
||||
checkEntries c [('A', 4, 'A'), ('B', 3, 'B')]
|
||||
|
||||
Cache.insert 'C' 'C' c
|
||||
checkEntries c [('A', 4, 'A'), ('B', 3, 'B'), ('C', 5, 'C')]
|
||||
|
||||
-- evict 'B':
|
||||
Cache.insert 'D' 'D' c
|
||||
checkEntries c [('A', 4, 'A'), ('C', 5, 'C'), ('D', 6, 'D')]
|
||||
Cache.lookup 'B' c `shouldReturn` Nothing
|
@ -35,6 +35,7 @@ import qualified Hasura.IncrementalSpec as IncrementalSpec
|
||||
-- import qualified Hasura.RQL.MetadataSpec as MetadataSpec
|
||||
import qualified Hasura.Server.MigrateSpec as MigrateSpec
|
||||
import qualified Hasura.Server.TelemetrySpec as TelemetrySpec
|
||||
import qualified Hasura.CacheBoundedSpec as CacheBoundedSpec
|
||||
|
||||
data TestSuites
|
||||
= AllSuites !RawConnInfo
|
||||
@ -65,6 +66,7 @@ unitSpecs = do
|
||||
-- describe "Hasura.RQL.Metadata" MetadataSpec.spec -- Commenting until optimizing the test in CI
|
||||
describe "Data.Time" TimeSpec.spec
|
||||
describe "Hasura.Server.Telemetry" TelemetrySpec.spec
|
||||
describe "Hasura.Cache.Bounded" CacheBoundedSpec.spec
|
||||
|
||||
buildPostgresSpecs :: (HasVersion) => RawConnInfo -> IO Spec
|
||||
buildPostgresSpecs pgConnOptions = do
|
||||
|
Loading…
Reference in New Issue
Block a user