diff --git a/server/graphql-engine.cabal b/server/graphql-engine.cabal index b0bf43efae3..91bc8e3dcb4 100644 --- a/server/graphql-engine.cabal +++ b/server/graphql-engine.cabal @@ -174,11 +174,16 @@ library -- HTTP compression , zlib + -- caching + , psqueues >= 0.2 + exposed-modules: Hasura.Prelude , Hasura.Logging , Hasura.EncJSON , Hasura.Db , Hasura.Cache + , Hasura.Cache.Bounded + , Hasura.Cache.Unbounded , Hasura.Server.App , Hasura.Server.Auth diff --git a/server/src-exec/Main.hs b/server/src-exec/Main.hs index 457d50c6143..8d268ac071c 100644 --- a/server/src-exec/Main.hs +++ b/server/src-exec/Main.hs @@ -65,29 +65,6 @@ parseHGECommand = <> command "version" (info (pure HCVersion) (progDesc "Prints the version of GraphQL Engine")) ) - where - serveOpts = RawServeOptions - <$> parseServerPort - <*> parseServerHost - <*> parseConnParams - <*> parseTxIsolation - <*> (parseAdminSecret <|> parseAccessKey) - <*> parseWebHook - <*> parseJwtSecret - <*> parseUnAuthRole - <*> parseCorsConfig - <*> parseEnableConsole - <*> parseConsoleAssetsDir - <*> parseEnableTelemetry - <*> parseWsReadCookie - <*> parseStringifyNum - <*> parseEnabledAPIs - <*> parseMxRefetchInt - <*> parseMxBatchSize - <*> parseEnableAllowlist - <*> parseEnabledLogs - <*> parseLogLevel - parseArgs :: IO HGEOptions parseArgs = do @@ -123,7 +100,7 @@ main = do HCServe so@(ServeOptions port host cp isoL mAdminSecret mAuthHook mJwtSecret mUnAuthRole corsCfg enableConsole consoleAssetsDir enableTelemetry strfyNum enabledAPIs lqOpts enableAL - enabledLogs serverLogLevel) -> do + enabledLogs serverLogLevel planCacheOptions) -> do let sqlGenCtx = SQLGenCtx strfyNum @@ -152,7 +129,7 @@ main = do HasuraApp app cacheRef cacheInitTime shutdownApp <- mkWaiApp isoL loggerCtx sqlGenCtx enableAL pool ci httpManager am corsCfg enableConsole consoleAssetsDir enableTelemetry - instanceId enabledAPIs lqOpts + instanceId enabledAPIs lqOpts planCacheOptions -- log inconsistent schema objects inconsObjs <- scInconsistentObjs <$> getSCFromRef cacheRef diff --git a/server/src-lib/Hasura/Cache.hs b/server/src-lib/Hasura/Cache.hs index a33d609774f..316370b0cc3 100644 --- a/server/src-lib/Hasura/Cache.hs +++ b/server/src-lib/Hasura/Cache.hs @@ -1,91 +1,54 @@ -{-| An in-memory, unbounded, capability-local cache implementation. By making the cache -capability-local, data may be recomputed up to once per capability (which usually means up to once -per OS thread), but write contention from multiple threads is unlikely. -} module Hasura.Cache - ( UnboundedCache - , initCache - , clearCache - , mapCache - , insert + ( Cache + , CacheOptions + , mkCacheOptions + , B.CacheSize + , B.mkCacheSize + + , initialise , lookup + , insert + , clear + , getEntries ) where -import Control.Concurrent (getNumCapabilities, myThreadId, - threadCapability) -import qualified Data.HashMap.Strict as Map -import qualified Data.IORef as IORef -import qualified Data.Vector as V +import Hasura.Prelude hiding (lookup) -import Hasura.Prelude hiding (lookup) +import qualified Hasura.Cache.Bounded as B +import qualified Hasura.Cache.Unbounded as U -newtype LocalCacheRef k v = LocalCacheRef (IORef.IORef (Map.HashMap k v)) +data Cache k v + = CacheBounded !(B.BoundedCache k v) + | CacheUnbounded !(U.UnboundedCache k v) -mapLocalCacheRef - :: ((k, v) -> a) -> LocalCacheRef k v -> IO [a] -mapLocalCacheRef f (LocalCacheRef ioRef) = - map f . Map.toList <$> IORef.readIORef ioRef +newtype CacheOptions + = CacheOptions (Maybe B.CacheSize) --- | Create a new LC cache of the given size. -initLocalCache :: IO (LocalCacheRef k v) -initLocalCache = LocalCacheRef <$> IORef.newIORef Map.empty +mkCacheOptions :: Maybe B.CacheSize -> CacheOptions +mkCacheOptions = CacheOptions -clearIO :: LocalCacheRef k v -> IO () -clearIO (LocalCacheRef ref)= - IORef.atomicModifyIORef' ref $ const (Map.empty, ()) +initialise :: CacheOptions -> IO (Cache k v) +initialise (CacheOptions cacheSizeM) = + case cacheSizeM of + Nothing -> CacheUnbounded <$> U.initialise + Just cacheSize -> CacheBounded <$> B.initialise cacheSize --- | Return the cached result of the action or, in the case of a cache --- miss, execute the action and insertLocal it in the cache. -lookupIO :: (Hashable k, Eq k) => LocalCacheRef k v -> k -> IO (Maybe v) -lookupIO (LocalCacheRef ref) k = - Map.lookup k <$> IORef.readIORef ref +lookup :: (Hashable k, Ord k) => k -> Cache k v -> IO (Maybe v) +lookup k = \case + CacheBounded cache -> B.lookup k cache + CacheUnbounded cache -> U.lookup k cache -insertIO :: (Hashable k, Eq k) => LocalCacheRef k v -> k -> v -> IO () -insertIO (LocalCacheRef ref) k v = - IORef.atomicModifyIORef' ref $ \c -> (Map.insert k v c, ()) +insert :: (Hashable k, Ord k) => k -> v -> Cache k v -> IO () +insert k v = \case + CacheBounded cache -> B.insert k v cache + CacheUnbounded cache -> U.insert k v cache --- | Using a stripe of multiple handles can improve the performance in --- the case of concurrent accesses since several handles can be --- accessed in parallel. -newtype UnboundedCache k v = UnboundedCache (V.Vector (LocalCacheRef k v)) +clear :: Cache k v -> IO () +clear = \case + CacheBounded cache -> B.clear cache + CacheUnbounded cache -> U.clear cache -mapCache - :: ((k, v) -> a) -> UnboundedCache k v -> IO [[a]] -mapCache f (UnboundedCache localCaches) = - mapM (mapLocalCacheRef f) $ V.toList localCaches - --- | Create a new 'StripedHandle' with the given number of stripes and --- the given capacity for each stripe. -initCache :: IO (UnboundedCache k v) -initCache = do - capabilities <- getNumCapabilities - UnboundedCache <$> V.replicateM capabilities initLocalCache - -clearCache :: UnboundedCache k v -> IO () -clearCache (UnboundedCache caches) = - V.mapM_ clearIO caches - -{-# INLINE getLocal #-} -getLocal :: UnboundedCache k v -> IO (LocalCacheRef k v) -getLocal (UnboundedCache handles) = do - - (i, _) <- myThreadId >>= threadCapability - - -- The number of capability could be dynamically changed. - -- So, let's check the upper boundary of the vector - let lim = V.length handles - j | i < lim = i - | otherwise = i `mod` lim - - return $ handles V.! j - --- | Striped version of 'cached'. -insert - :: (Hashable k, Eq k) => UnboundedCache k v -> k -> v -> IO () -insert striped k v = do - localHandle <- getLocal striped - insertIO localHandle k v - -lookup :: (Hashable k, Eq k) => UnboundedCache k v -> k -> IO (Maybe v) -lookup striped k = do - localHandle <- getLocal striped - lookupIO localHandle k +getEntries :: (Hashable k, Ord k) => Cache k v -> IO [[(k, v)]] +getEntries = \case + CacheBounded cache -> B.getEntries cache + CacheUnbounded cache -> U.getEntries cache diff --git a/server/src-lib/Hasura/Cache/Bounded.hs b/server/src-lib/Hasura/Cache/Bounded.hs new file mode 100644 index 00000000000..378d44cf1b2 --- /dev/null +++ b/server/src-lib/Hasura/Cache/Bounded.hs @@ -0,0 +1,179 @@ +{-| An in-memory, Bounded by LRU strategy, capability-local cache implementation. +By making the cache capability-local, data may be recomputed up to once per +capability (which usually means up to once per OS thread), but write contention +from multiple threads is unlikely. +-} +module Hasura.Cache.Bounded + ( BoundedCache + , CacheSize + , mkCacheSize + + , initialise + , clear + , insert + , lookup + , getEntries + ) where + +import Hasura.Prelude hiding (lookup) + +import Control.Concurrent (getNumCapabilities, myThreadId, + threadCapability) +import Data.Word (Word16) + +import qualified Data.Aeson as J +import qualified Data.HashPSQ as HashPSQ +import qualified Data.IORef as IORef +import qualified Data.Vector as V + +newtype CacheSize + = CacheSize { unCacheSize :: Word16 } + deriving (Show, Read, Eq, Ord, Bounded, Num, Enum, J.ToJSON, J.FromJSON) + +mkCacheSize :: String -> Either String CacheSize +mkCacheSize v = + case readMaybe v of + Just w16 -> return (CacheSize w16) + Nothing -> fail "cache size should be between 1 - 65,535" + +newtype Tick + = Tick { unTick :: Word64 } + deriving (Show, Eq, Ord, Hashable, Bounded, Num, Enum) + +-- | LRU cache based on hashing +-- Based on https://hackage.haskell.org/package/lrucaching +data LruCache k v = LruCache + { _lcCapacity :: !CacheSize + -- ^ The maximum number of elements in the cache + , _lcSize :: !CacheSize + -- ^ The current number of elements in the cache + , _lcTick :: !Tick + -- ^ The priority is drawn from this tick, it is incremented + -- after every access + , _lcQueue :: !(HashPSQ.HashPSQ k Tick v) + -- ^ Underlying priority queue + } deriving (Show, Eq, Functor, Foldable, Traversable) + +-- | Create an empty 'LruCache' of the given size. +emptyCache :: CacheSize -> LruCache k v +emptyCache capacity = + LruCache + { _lcCapacity = capacity + , _lcSize = 0 + , _lcTick = Tick 0 + , _lcQueue = HashPSQ.empty + } + +-- | Restore 'LruCache' invariants +trim :: (Hashable k, Ord k) => LruCache k v -> LruCache k v +trim c + | _lcTick c == maxBound = emptyCache (_lcCapacity c) + | _lcSize c > _lcCapacity c = + c { _lcSize = _lcSize c - 1 + , _lcQueue = HashPSQ.deleteMin (_lcQueue c) + } + | otherwise = c + +-- | Insert an element into the 'LruCache'. +insertPure :: (Hashable k, Ord k) => k -> v -> LruCache k v -> LruCache k v +insertPure key val c = + trim $! + let (mbOldVal,queue) = HashPSQ.insertView key (_lcTick c) val (_lcQueue c) + in c { _lcSize = if isNothing mbOldVal + then _lcSize c + 1 + else _lcSize c + , _lcTick = _lcTick c + 1 + , _lcQueue = queue + } + +-- | Lookup an element in an 'LruCache' and mark it as the least +-- recently accessed. +lookupPure :: (Hashable k, Ord k) => k -> LruCache k v -> Maybe (v, LruCache k v) +lookupPure k c = + case HashPSQ.alter lookupAndBump k (_lcQueue c) of + (Nothing, _) -> Nothing + (Just x, q) -> + let !c' = trim $ c {_lcTick = _lcTick c + 1, _lcQueue = q} + in Just (x, c') + where + lookupAndBump Nothing = (Nothing, Nothing) + lookupAndBump (Just (_, x)) = (Just x, Just (_lcTick c, x)) + +newtype LocalCacheRef k v = LocalCacheRef (IORef.IORef (LruCache k v)) + +getLocalEntries + :: (Hashable k, Ord k) => LocalCacheRef k v -> IO [(k, v)] +getLocalEntries (LocalCacheRef ioRef) = + map (\(k, _, v) -> (k, v)) . HashPSQ.toList . _lcQueue + <$> IORef.readIORef ioRef + +-- | Create a new Local cache of the given size. +initLocalCache :: CacheSize -> IO (LocalCacheRef k v) +initLocalCache capacity = LocalCacheRef <$> IORef.newIORef (emptyCache capacity) + +-- | clear a local cache +clearLocal :: LocalCacheRef k v -> IO () +clearLocal (LocalCacheRef ref)= + IORef.atomicModifyIORef' ref $ + \currentCache -> (emptyCache (_lcCapacity currentCache), ()) + +-- | lookup for a key in the local cache +lookupLocal :: (Hashable k, Ord k) => LocalCacheRef k v -> k -> IO (Maybe v) +lookupLocal (LocalCacheRef ref) k = + -- | Return the result and replace the cache if needed + IORef.atomicModifyIORef' ref $ \currentCache -> + case lookupPure k currentCache of + Just (v, newCache) -> (newCache, Just v) + Nothing -> (currentCache, Nothing) + +-- | insert into a local cache +insertLocal :: (Hashable k, Ord k) => LocalCacheRef k v -> k -> v -> IO () +insertLocal (LocalCacheRef ref) k v = + IORef.atomicModifyIORef' ref $ \c -> (insertPure k v c, ()) + +-- | Using a stripe of multiple handles can improve the performance in +-- the case of concurrent accesses since several handles can be +-- accessed in parallel. +newtype BoundedCache k v = BoundedCache (V.Vector (LocalCacheRef k v)) + +getEntries + :: (Hashable k, Ord k) + => BoundedCache k v -> IO [[(k, v)]] +getEntries (BoundedCache localCaches) = + mapM getLocalEntries $ V.toList localCaches + +-- | Creates a new BoundedCache of the specified size, +-- with one stripe per capability +initialise :: CacheSize -> IO (BoundedCache k v) +initialise capacity = do + capabilities <- getNumCapabilities + BoundedCache <$> V.replicateM capabilities (initLocalCache capacity) + +clear :: BoundedCache k v -> IO () +clear (BoundedCache caches) = + V.mapM_ clearLocal caches + +{-# INLINE getLocal #-} +getLocal :: BoundedCache k v -> IO (LocalCacheRef k v) +getLocal (BoundedCache handles) = do + + (i, _) <- myThreadId >>= threadCapability + + -- The number of capability could be dynamically changed. + -- So, let's check the upper boundary of the vector + let lim = V.length handles + j | i < lim = i + | otherwise = i `mod` lim + + return $ handles V.! j + +insert + :: (Hashable k, Ord k) => k -> v -> BoundedCache k v -> IO () +insert k v striped = do + localHandle <- getLocal striped + insertLocal localHandle k v + +lookup :: (Hashable k, Ord k) => k -> BoundedCache k v -> IO (Maybe v) +lookup k striped = do + localHandle <- getLocal striped + lookupLocal localHandle k diff --git a/server/src-lib/Hasura/Cache/Unbounded.hs b/server/src-lib/Hasura/Cache/Unbounded.hs new file mode 100644 index 00000000000..f48d89a57ce --- /dev/null +++ b/server/src-lib/Hasura/Cache/Unbounded.hs @@ -0,0 +1,90 @@ +{-| An in-memory, unbounded, capability-local cache implementation. By making the cache +capability-local, data may be recomputed up to once per capability (which usually means up to once +per OS thread), but write contention from multiple threads is unlikely. -} +module Hasura.Cache.Unbounded + ( UnboundedCache + , initialise + , clear + , insert + , lookup + , getEntries + ) where + +import Hasura.Prelude hiding (lookup) + +import Control.Concurrent (getNumCapabilities, myThreadId, + threadCapability) + +import qualified Data.HashMap.Strict as Map +import qualified Data.IORef as IORef +import qualified Data.Vector as V + +newtype LocalCacheRef k v = LocalCacheRef (IORef.IORef (Map.HashMap k v)) + +getEntriesLocal + :: LocalCacheRef k v -> IO [(k, v)] +getEntriesLocal (LocalCacheRef ioRef) = + Map.toList <$> IORef.readIORef ioRef + +-- | Create a new LC cache of the given size. +initialiseLocal :: IO (LocalCacheRef k v) +initialiseLocal = LocalCacheRef <$> IORef.newIORef Map.empty + +clearLocal :: LocalCacheRef k v -> IO () +clearLocal (LocalCacheRef ref)= + IORef.atomicModifyIORef' ref $ const (Map.empty, ()) + +lookupLocal :: (Hashable k, Eq k) => LocalCacheRef k v -> k -> IO (Maybe v) +lookupLocal (LocalCacheRef ref) k = + Map.lookup k <$> IORef.readIORef ref + +insertLocal :: (Hashable k, Eq k) => LocalCacheRef k v -> k -> v -> IO () +insertLocal (LocalCacheRef ref) k v = + IORef.atomicModifyIORef' ref $ \c -> (Map.insert k v c, ()) + +-- | Using a stripe of multiple handles can improve the performance in +-- the case of concurrent accesses since several handles can be +-- accessed in parallel. +newtype UnboundedCache k v = UnboundedCache (V.Vector (LocalCacheRef k v)) + +getEntries + :: UnboundedCache k v -> IO [[(k, v)]] +getEntries (UnboundedCache localCaches) = + mapM getEntriesLocal $ V.toList localCaches + +-- | Create a new 'StripedHandle' with the given number of stripes and +-- the given capacity for each stripe. +initialise :: IO (UnboundedCache k v) +initialise = do + capabilities <- getNumCapabilities + UnboundedCache <$> V.replicateM capabilities initialiseLocal + +clear :: UnboundedCache k v -> IO () +clear (UnboundedCache caches) = + V.mapM_ clearLocal caches + +{-# INLINE getLocal #-} +getLocal :: UnboundedCache k v -> IO (LocalCacheRef k v) +getLocal (UnboundedCache handles) = do + + (i, _) <- myThreadId >>= threadCapability + + -- The number of capability could be dynamically changed. + -- So, let's check the upper boundary of the vector + let lim = V.length handles + j | i < lim = i + | otherwise = i `mod` lim + + return $ handles V.! j + +-- | Striped version of 'cached'. +insert + :: (Hashable k, Eq k) => k -> v -> UnboundedCache k v ->IO () +insert k v striped = do + localHandle <- getLocal striped + insertLocal localHandle k v + +lookup :: (Hashable k, Eq k) => k -> UnboundedCache k v ->IO (Maybe v) +lookup k striped = do + localHandle <- getLocal striped + lookupLocal localHandle k diff --git a/server/src-lib/Hasura/GraphQL/Execute.hs b/server/src-lib/Hasura/GraphQL/Execute.hs index a2cedfbb583..2f332b3f071 100644 --- a/server/src-lib/Hasura/GraphQL/Execute.hs +++ b/server/src-lib/Hasura/GraphQL/Execute.hs @@ -11,6 +11,8 @@ module Hasura.GraphQL.Execute , getSubsOp , EP.PlanCache + , EP.mkPlanCacheOptions + , EP.PlanCacheOptions , EP.initPlanCache , EP.clearPlanCache , EP.dumpPlanCache diff --git a/server/src-lib/Hasura/GraphQL/Execute/Plan.hs b/server/src-lib/Hasura/GraphQL/Execute/Plan.hs index 22bb7b515a8..5aed21d819f 100644 --- a/server/src-lib/Hasura/GraphQL/Execute/Plan.hs +++ b/server/src-lib/Hasura/GraphQL/Execute/Plan.hs @@ -1,6 +1,8 @@ module Hasura.GraphQL.Execute.Plan ( ReusablePlan(..) , PlanCache + , PlanCacheOptions + , mkPlanCacheOptions , getPlan , addPlan , initPlanCache @@ -8,11 +10,13 @@ module Hasura.GraphQL.Execute.Plan , dumpPlanCache ) where -import qualified Hasura.Cache as Cache import Hasura.Prelude import qualified Data.Aeson as J +import qualified Data.Aeson.Casing as J +import qualified Data.Aeson.TH as J +import qualified Hasura.Cache as Cache import qualified Hasura.GraphQL.Execute.LiveQuery as LQ import qualified Hasura.GraphQL.Execute.Query as EQ import qualified Hasura.GraphQL.Transport.HTTP.Protocol as GH @@ -24,7 +28,7 @@ data PlanId , _piRole :: !RoleName , _piOperationName :: !(Maybe GH.OperationName) , _piQuery :: !GH.GQLQueryText - } deriving (Show, Eq, Generic) + } deriving (Show, Eq, Ord, Generic) instance Hashable PlanId @@ -38,9 +42,7 @@ instance J.ToJSON PlanId where ] newtype PlanCache - = PlanCache - { _unPlanCache :: Cache.UnboundedCache PlanId ReusablePlan - } + = PlanCache {_unPlanCache :: Cache.Cache PlanId ReusablePlan} data ReusablePlan = RPQuery !EQ.ReusableQueryPlan @@ -51,14 +53,24 @@ instance J.ToJSON ReusablePlan where RPQuery queryPlan -> J.toJSON queryPlan RPSubs subsPlan -> J.toJSON subsPlan -initPlanCache :: IO PlanCache -initPlanCache = PlanCache <$> Cache.initCache +newtype PlanCacheOptions + = PlanCacheOptions { unPlanCacheSize :: Maybe Cache.CacheSize } + deriving (Show, Eq) +$(J.deriveJSON (J.aesonDrop 2 J.snakeCase) ''PlanCacheOptions) + +mkPlanCacheOptions :: Maybe Cache.CacheSize -> PlanCacheOptions +mkPlanCacheOptions = PlanCacheOptions + +initPlanCache :: PlanCacheOptions -> IO PlanCache +initPlanCache options = + PlanCache <$> + Cache.initialise (Cache.mkCacheOptions $ unPlanCacheSize options) getPlan :: SchemaCacheVer -> RoleName -> Maybe GH.OperationName -> GH.GQLQueryText -> PlanCache -> IO (Maybe ReusablePlan) getPlan schemaVer rn opNameM q (PlanCache planCache) = - Cache.lookup planCache planId + Cache.lookup planId planCache where planId = PlanId schemaVer rn opNameM q @@ -66,17 +78,17 @@ addPlan :: SchemaCacheVer -> RoleName -> Maybe GH.OperationName -> GH.GQLQueryText -> ReusablePlan -> PlanCache -> IO () addPlan schemaVer rn opNameM q queryPlan (PlanCache planCache) = - Cache.insert planCache planId queryPlan + Cache.insert planId queryPlan planCache where planId = PlanId schemaVer rn opNameM q clearPlanCache :: PlanCache -> IO () clearPlanCache (PlanCache planCache) = - Cache.clearCache planCache + Cache.clear planCache dumpPlanCache :: PlanCache -> IO J.Value dumpPlanCache (PlanCache cache) = - J.toJSON <$> Cache.mapCache dumpEntry cache + J.toJSON . map (map dumpEntry) <$> Cache.getEntries cache where dumpEntry (planId, plan) = J.object diff --git a/server/src-lib/Hasura/GraphQL/Transport/HTTP/Protocol.hs b/server/src-lib/Hasura/GraphQL/Transport/HTTP/Protocol.hs index 14cbe501acf..a29b31da1f6 100644 --- a/server/src-lib/Hasura/GraphQL/Transport/HTTP/Protocol.hs +++ b/server/src-lib/Hasura/GraphQL/Transport/HTTP/Protocol.hs @@ -67,7 +67,7 @@ instance (Hashable a) => Hashable (GQLReq a) newtype GQLQueryText = GQLQueryText { _unGQLQueryText :: Text - } deriving (Show, Eq, J.FromJSON, J.ToJSON, Hashable) + } deriving (Show, Eq, Ord, J.FromJSON, J.ToJSON, Hashable) type GQLReqUnparsed = GQLReq GQLQueryText type GQLReqParsed = GQLReq GQLExecDoc diff --git a/server/src-lib/Hasura/RQL/Types/Permission.hs b/server/src-lib/Hasura/RQL/Types/Permission.hs index dacc4af0251..d4dc0ee822d 100644 --- a/server/src-lib/Hasura/RQL/Types/Permission.hs +++ b/server/src-lib/Hasura/RQL/Types/Permission.hs @@ -44,7 +44,7 @@ import qualified PostgreSQL.Binary.Decoding as PD newtype RoleName = RoleName {getRoleTxt :: NonEmptyText} - deriving ( Show, Eq, Hashable, FromJSONKey, ToJSONKey, FromJSON + deriving ( Show, Eq, Ord, Hashable, FromJSONKey, ToJSONKey, FromJSON , ToJSON, Q.FromCol, Q.ToPrepArg, Lift) instance DQuote RoleName where diff --git a/server/src-lib/Hasura/RQL/Types/SchemaCache.hs b/server/src-lib/Hasura/RQL/Types/SchemaCache.hs index 6a80f28e394..28af86224ef 100644 --- a/server/src-lib/Hasura/RQL/Types/SchemaCache.hs +++ b/server/src-lib/Hasura/RQL/Types/SchemaCache.hs @@ -427,7 +427,7 @@ removeFromDepMap = newtype SchemaCacheVer = SchemaCacheVer { unSchemaCacheVer :: Word64 } - deriving (Show, Eq, Hashable, ToJSON, FromJSON) + deriving (Show, Eq, Ord, Hashable, ToJSON, FromJSON) initSchemaCacheVer :: SchemaCacheVer initSchemaCacheVer = SchemaCacheVer 0 diff --git a/server/src-lib/Hasura/Server/App.hs b/server/src-lib/Hasura/Server/App.hs index 24563f8b94f..44739eb9e9c 100644 --- a/server/src-lib/Hasura/Server/App.hs +++ b/server/src-lib/Hasura/Server/App.hs @@ -455,10 +455,11 @@ mkWaiApp -> InstanceId -> S.HashSet API -> EL.LiveQueriesOptions + -> E.PlanCacheOptions -> IO HasuraApp mkWaiApp isoLevel loggerCtx sqlGenCtx enableAL pool ci httpManager mode corsCfg enableConsole consoleAssetsDir enableTelemetry - instanceId apis lqOpts = do + instanceId apis lqOpts planCacheOptions = do let pgExecCtx = PGExecCtx pool isoLevel pgExecCtxSer = PGExecCtx pool Q.Serializable @@ -472,7 +473,7 @@ mkWaiApp isoLevel loggerCtx sqlGenCtx enableAL pool ci httpManager mode return (scRef, snd <$> time) cacheLock <- newMVar () - planCache <- E.initPlanCache + planCache <- E.initPlanCache planCacheOptions let corsPolicy = mkDefaultCorsPolicy corsCfg logger = L.mkLogger loggerCtx diff --git a/server/src-lib/Hasura/Server/Init.hs b/server/src-lib/Hasura/Server/Init.hs index 6a1641de5bb..38dbe90df67 100644 --- a/server/src-lib/Hasura/Server/Init.hs +++ b/server/src-lib/Hasura/Server/Init.hs @@ -18,6 +18,8 @@ import Data.Time.Clock.Units (milliseconds) import Network.Wai.Handler.Warp (HostPreference) import Options.Applicative +import qualified Hasura.Cache as Cache +import qualified Hasura.GraphQL.Execute as E import qualified Hasura.GraphQL.Execute.LiveQuery as LQ import qualified Hasura.Logging as L @@ -77,6 +79,7 @@ data RawServeOptions , rsoEnableAllowlist :: !Bool , rsoEnabledLogTypes :: !(Maybe [L.EngineLogType]) , rsoLogLevel :: !(Maybe L.LogLevel) + , rsoPlanCacheSize :: !(Maybe Cache.CacheSize) } deriving (Show, Eq) data ServeOptions @@ -99,6 +102,7 @@ data ServeOptions , soEnableAllowlist :: !Bool , soEnabledLogTypes :: !(Set.HashSet L.EngineLogType) , soLogLevel :: !L.LogLevel + , soPlanCacheOptions :: !E.PlanCacheOptions } deriving (Show, Eq) data RawConnInfo = @@ -200,6 +204,9 @@ instance FromEnv [L.EngineLogType] where instance FromEnv L.LogLevel where fromEnv = readLogLevel +instance FromEnv Cache.CacheSize where + fromEnv = Cache.mkCacheSize + parseStrAsBool :: String -> Either String Bool parseStrAsBool t | map toLower t `elem` truthVals = Right True @@ -312,10 +319,11 @@ mkServeOptions rso = do enabledLogs <- Set.fromList . fromMaybe (Set.toList L.defaultEnabledLogTypes) <$> withEnv (rsoEnabledLogTypes rso) (fst enabledLogsEnv) serverLogLevel <- fromMaybe L.LevelInfo <$> withEnv (rsoLogLevel rso) (fst logLevelEnv) + planCacheOptions <- E.mkPlanCacheOptions <$> withEnv (rsoPlanCacheSize rso) (fst planCacheSizeEnv) return $ ServeOptions port host connParams txIso adminScrt authHook jwtSecret unAuthRole corsCfg enableConsole consoleAssetsDir enableTelemetry strfyNum enabledAPIs lqOpts enableAL - enabledLogs serverLogLevel + enabledLogs serverLogLevel planCacheOptions where #ifdef DeveloperAPIs defaultAPIs = [METADATA,GRAPHQL,PGDUMP,CONFIG,DEVELOPER] @@ -926,13 +934,23 @@ enableAllowlistEnv = , "Only accept allowed GraphQL queries" ) -fallbackRefetchDelayEnv :: (String, String) -fallbackRefetchDelayEnv = - ( "HASURA_GRAPHQL_LIVE_QUERIES_FALLBACK_REFETCH_INTERVAL" - , "results will only be sent once in this interval (in milliseconds) for " - <> "live queries which cannot be multiplexed. Default: 1000 (1sec)" +planCacheSizeEnv :: (String, String) +planCacheSizeEnv = + ( "HASURA_GRAPHQL_QUERY_PLAN_CACHE_SIZE" + , "The maximum number of query plans that can be cached, allowed values: 0-65535, " <> + "0 disables the cache. If this value is not set, there is no limit on the number " <> + "of plans that are cached" ) +parsePlanCacheSize :: Parser (Maybe Cache.CacheSize) +parsePlanCacheSize = + optional $ + option (eitherReader Cache.mkCacheSize) + ( long "query-plan-cache-size" <> + metavar "QUERY_PLAN_CACHE_SIZE" <> + help (snd planCacheSizeEnv) + ) + parseEnabledLogs :: Parser (Maybe [L.EngineLogType]) parseEnabledLogs = optional $ option (eitherReader readLogTypes) @@ -976,26 +994,29 @@ serveOptsToLog :: ServeOptions -> StartupLog serveOptsToLog so = StartupLog L.LevelInfo "server_configuration" infoVal where - infoVal = J.object [ "port" J..= soPort so - , "server_host" J..= show (soHost so) - , "transaction_isolation" J..= show (soTxIso so) - , "admin_secret_set" J..= isJust (soAdminSecret so) - , "auth_hook" J..= (ahUrl <$> soAuthHook so) - , "auth_hook_mode" J..= (show . ahType <$> soAuthHook so) - , "jwt_secret" J..= (J.toJSON <$> soJwtSecret so) - , "unauth_role" J..= soUnAuthRole so - , "cors_config" J..= soCorsConfig so - , "enable_console" J..= soEnableConsole so - , "console_assets_dir" J..= soConsoleAssetsDir so - , "enable_telemetry" J..= soEnableTelemetry so - , "use_prepared_statements" J..= (Q.cpAllowPrepare . soConnParams) so - , "stringify_numeric_types" J..= soStringifyNum so - , "enabled_apis" J..= soEnabledAPIs so - , "live_query_options" J..= soLiveQueryOpts so - , "enable_allowlist" J..= soEnableAllowlist so - , "enabled_log_types" J..= soEnabledLogTypes so - , "log_level" J..= soLogLevel so - ] + infoVal = + J.object + [ "port" J..= soPort so + , "server_host" J..= show (soHost so) + , "transaction_isolation" J..= show (soTxIso so) + , "admin_secret_set" J..= isJust (soAdminSecret so) + , "auth_hook" J..= (ahUrl <$> soAuthHook so) + , "auth_hook_mode" J..= (show . ahType <$> soAuthHook so) + , "jwt_secret" J..= (J.toJSON <$> soJwtSecret so) + , "unauth_role" J..= soUnAuthRole so + , "cors_config" J..= soCorsConfig so + , "enable_console" J..= soEnableConsole so + , "console_assets_dir" J..= soConsoleAssetsDir so + , "enable_telemetry" J..= soEnableTelemetry so + , "use_prepared_statements" J..= (Q.cpAllowPrepare . soConnParams) so + , "stringify_numeric_types" J..= soStringifyNum so + , "enabled_apis" J..= soEnabledAPIs so + , "live_query_options" J..= soLiveQueryOpts so + , "enable_allowlist" J..= soEnableAllowlist so + , "enabled_log_types" J..= soEnabledLogTypes so + , "log_level" J..= soLogLevel so + , "plan_cache_options" J..= soPlanCacheOptions so + ] mkGenericStrLog :: L.LogLevel -> T.Text -> String -> StartupLog mkGenericStrLog logLevel k msg = @@ -1010,3 +1031,28 @@ inconsistentMetadataLog sc = StartupLog L.LevelWarn "inconsistent_metadata" infoVal where infoVal = J.object ["objects" J..= scInconsistentObjs sc] + +serveOpts :: Parser RawServeOptions +serveOpts = + RawServeOptions + <$> parseServerPort + <*> parseServerHost + <*> parseConnParams + <*> parseTxIsolation + <*> (parseAdminSecret <|> parseAccessKey) + <*> parseWebHook + <*> parseJwtSecret + <*> parseUnAuthRole + <*> parseCorsConfig + <*> parseEnableConsole + <*> parseConsoleAssetsDir + <*> parseEnableTelemetry + <*> parseWsReadCookie + <*> parseStringifyNum + <*> parseEnabledAPIs + <*> parseMxRefetchInt + <*> parseMxBatchSize + <*> parseEnableAllowlist + <*> parseEnabledLogs + <*> parseLogLevel + <*> parsePlanCacheSize