1
0
mirror of https://github.com/hasura/graphql-engine.git synced 2024-12-21 14:31:55 +03:00
graphql-engine/server/src-lib/Hasura/Cache/Unbounded.hs
Auke Booij e17e47ef8c
server: avoid code duplication using type classes ()
There are two implementations of a Cache, namely a bounded and an
unbounded variant.  This can be elegantly captured in a type class.
In addition to reducing the amount of error-prone code in the
definition of the cache, this version reduces the amount of
error-prone code in usage sites of the cache, as it makes the cache
into an abstract object, so that a calling site cannot distinguish
between cache types.  Any decision about what should be cached should
be made through the interface of a cache, rather than at the callsite,
and this is captured by this variant.
2020-05-13 11:17:32 +02:00

84 lines
2.9 KiB
Haskell

{-| An in-memory, unbounded, capability-local cache implementation. By making the cache
capability-local, data may be recomputed up to once per capability (which usually means up to once
per OS thread), but write contention from multiple threads is unlikely. -}
module Hasura.Cache.Unbounded
( UnboundedCache
, initialise
, insertAllStripes
) where
import Hasura.Prelude hiding (lookup)
import Hasura.Cache.Types
import Control.Concurrent (getNumCapabilities, myThreadId, threadCapability)
import qualified Data.HashMap.Strict as Map
import qualified Data.IORef as IORef
import qualified Data.Vector as V
newtype LocalCacheRef k v = LocalCacheRef (IORef.IORef (Map.HashMap k v))
getEntriesLocal
:: LocalCacheRef k v -> IO [(k, v)]
getEntriesLocal (LocalCacheRef ioRef) =
Map.toList <$> IORef.readIORef ioRef
-- | Create a new LC cache of the given size.
initialiseLocal :: IO (LocalCacheRef k v)
initialiseLocal = LocalCacheRef <$> IORef.newIORef Map.empty
clearLocal :: LocalCacheRef k v -> IO ()
clearLocal (LocalCacheRef ref)=
IORef.atomicModifyIORef' ref $ const (Map.empty, ())
lookupLocal :: (Hashable k, Eq k) => LocalCacheRef k v -> k -> IO (Maybe v)
lookupLocal (LocalCacheRef ref) k =
Map.lookup k <$> IORef.readIORef ref
insertLocal :: (Hashable k, Eq k) => LocalCacheRef k v -> k -> v -> IO ()
insertLocal (LocalCacheRef ref) k v =
IORef.atomicModifyIORef' ref $ \c -> (Map.insert k v c, ())
-- | Using a stripe of multiple handles can improve the performance in
-- the case of concurrent accesses since several handles can be
-- accessed in parallel.
newtype UnboundedCache k v = UnboundedCache (V.Vector (LocalCacheRef k v))
instance (Hashable k, Ord k) => CacheObj (UnboundedCache k v) k v where
lookup k striped = do
localHandle <- getLocal striped
lookupLocal localHandle k
insert k v striped = do
localHandle <- getLocal striped
insertLocal localHandle k v
clear (UnboundedCache caches) =
V.mapM_ clearLocal caches
getEntries (UnboundedCache localCaches) =
mapM getEntriesLocal $ V.toList localCaches
-- | Create a new 'StripedHandle' with the given number of stripes and
-- the given capacity for each stripe.
initialise :: IO (UnboundedCache k v)
initialise = do
capabilities <- getNumCapabilities
UnboundedCache <$> V.replicateM capabilities initialiseLocal
{-# INLINE getLocal #-}
getLocal :: UnboundedCache k v -> IO (LocalCacheRef k v)
getLocal (UnboundedCache handles) = do
(i, _) <- myThreadId >>= threadCapability
-- The number of capabilities can grow dynamically so make sure we wrap
-- around when indexing.
let j = i `mod` V.length handles
return $ handles V.! j
-- | Insert into all stripes (non-atomically).
insertAllStripes
:: (Hashable k, Eq k) => k -> v -> UnboundedCache k v ->IO ()
insertAllStripes k v (UnboundedCache handles) = do
forM_ handles $ \localHandle->
insertLocal localHandle k v