server: refactor serverctx data types

## Description
This PR merges the data type `ServeCtx` into `ServerCtx` to create a single data type which has all the required context to run HGE.

## Motivation
This consolidated data type will be easier to update/maintain in case of any changes to the user config.

### Related Issues
https://hasurahq.atlassian.net/browse/GS-301

[GS-301]: https://hasurahq.atlassian.net/browse/GS-301?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ

PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7396
GitOrigin-RevId: f37594d15855bb50e556a4b11a58609af3f9f312
This commit is contained in:
Puru Gupta 2023-01-06 15:03:13 +05:30 committed by hasura-bot
parent f047b7dd17
commit bf201e534c
4 changed files with 175 additions and 263 deletions

View File

@ -58,10 +58,10 @@ import Harness.Http qualified as Http
import Harness.Logging import Harness.Logging
import Harness.Quoter.Yaml (fromYaml, yaml) import Harness.Quoter.Yaml (fromYaml, yaml)
import Harness.TestEnvironment (Server (..), TestEnvironment (..), getServer, serverUrl, testLogMessage) import Harness.TestEnvironment (Server (..), TestEnvironment (..), getServer, serverUrl, testLogMessage)
import Hasura.App (Loggers (..), ServeCtx (..))
import Hasura.App qualified as App import Hasura.App qualified as App
import Hasura.Logging (Hasura) import Hasura.Logging (Hasura)
import Hasura.Prelude import Hasura.Prelude
import Hasura.Server.App (Loggers (..), ServerCtx (..))
import Hasura.Server.Init (PostgresConnInfo (..), ServeOptions (..), unsafePort) import Hasura.Server.Init (PostgresConnInfo (..), ServeOptions (..), unsafePort)
import Hasura.Server.Metrics (ServerMetricsSpec, createServerMetrics) import Hasura.Server.Metrics (ServerMetricsSpec, createServerMetrics)
import Hasura.Server.Prometheus (makeDummyPrometheusMetrics) import Hasura.Server.Prometheus (makeDummyPrometheusMetrics)
@ -320,24 +320,20 @@ runApp serveOptions = do
liftIO $ createServerMetrics $ EKG.subset ServerSubset store liftIO $ createServerMetrics $ EKG.subset ServerSubset store
pure (EKG.subset EKG.emptyOf store, serverMetrics) pure (EKG.subset EKG.emptyOf store, serverMetrics)
prometheusMetrics <- makeDummyPrometheusMetrics prometheusMetrics <- makeDummyPrometheusMetrics
runManagedT (App.initialiseServeCtx env globalCtx serveOptions serverMetrics) $ \serveCtx -> runManagedT (App.initialiseServerCtx env globalCtx serveOptions Nothing serverMetrics prometheusMetrics sampleAlways) $ \serverCtx@ServerCtx {..} ->
do do
let Loggers _ _logger pgLogger = _scLoggers serveCtx let Loggers _ _ pgLogger = scLoggers
flip App.runPGMetadataStorageAppT (_scMetadataDbPool serveCtx, pgLogger) flip App.runPGMetadataStorageAppT (scMetadataDbPool, pgLogger)
. lowerManagedT . lowerManagedT
$ do $ do
App.runHGEServer App.runHGEServer
(const $ pure ()) (const $ pure ())
env env
serveOptions serveOptions
serveCtx serverCtx
initTime initTime
Nothing Nothing
serverMetrics
ekgStore ekgStore
Nothing
prometheusMetrics
sampleAlways
-- | Used only for 'runApp' above. -- | Used only for 'runApp' above.
data TestMetricsSpec name metricType tags data TestMetricsSpec name metricType tags

View File

@ -22,6 +22,7 @@ import Hasura.GC qualified as GC
import Hasura.Logging (Hasura, LogLevel (..), defaultEnabledEngineLogTypes) import Hasura.Logging (Hasura, LogLevel (..), defaultEnabledEngineLogTypes)
import Hasura.Prelude import Hasura.Prelude
import Hasura.RQL.DDL.Schema import Hasura.RQL.DDL.Schema
import Hasura.Server.App (Loggers (..), ServerCtx (..))
import Hasura.Server.Init import Hasura.Server.Init
import Hasura.Server.Metrics (ServerMetricsSpec, createServerMetrics) import Hasura.Server.Metrics (ServerMetricsSpec, createServerMetrics)
import Hasura.Server.Migrate (downgradeCatalog) import Hasura.Server.Migrate (downgradeCatalog)
@ -66,32 +67,26 @@ runApp env (HGEOptions rci metadataDbUrl hgeCmd) = do
-- It'd be nice if we didn't have to call runManagedT twice here, but -- It'd be nice if we didn't have to call runManagedT twice here, but
-- there is a data dependency problem since the call to runPGMetadataStorageApp -- there is a data dependency problem since the call to runPGMetadataStorageApp
-- below depends on serveCtx. -- below depends on serverCtx.
runManagedT (initialiseServeCtx env globalCtx serveOptions serverMetrics) $ \serveCtx -> do runManagedT (initialiseServerCtx env globalCtx serveOptions Nothing serverMetrics prometheusMetrics sampleAlways) $ \serverCtx@ServerCtx {..} -> do
-- Catches the SIGTERM signal and initiates a graceful shutdown. -- Catches the SIGTERM signal and initiates a graceful shutdown.
-- Graceful shutdown for regular HTTP requests is already implemented in -- Graceful shutdown for regular HTTP requests is already implemented in
-- Warp, and is triggered by invoking the 'closeSocket' callback. -- Warp, and is triggered by invoking the 'closeSocket' callback.
-- We only catch the SIGTERM signal once, that is, if the user hits CTRL-C -- We only catch the SIGTERM signal once, that is, if the user hits CTRL-C
-- once again, we terminate the process immediately. -- once again, we terminate the process immediately.
-- The function is written in this style to avoid the shutdown liftIO $ do
-- handler retaining a reference to the entire serveCtx (see #344) void $ Signals.installHandler Signals.sigTERM (Signals.CatchOnce (shutdownGracefully scShutdownLatch)) Nothing
-- If you modify this code then you should check the core to see void $ Signals.installHandler Signals.sigINT (Signals.CatchOnce (shutdownGracefully scShutdownLatch)) Nothing
-- that serveCtx is not retained.
_ <- case serveCtx of
ServeCtx {_scShutdownLatch} ->
liftIO $ do
void $ Signals.installHandler Signals.sigTERM (Signals.CatchOnce (shutdownGracefully _scShutdownLatch)) Nothing
void $ Signals.installHandler Signals.sigINT (Signals.CatchOnce (shutdownGracefully _scShutdownLatch)) Nothing
let Loggers _ logger pgLogger = _scLoggers serveCtx let Loggers _ logger pgLogger = scLoggers
_idleGCThread <- _idleGCThread <-
C.forkImmortal "ourIdleGC" logger $ C.forkImmortal "ourIdleGC" logger $
GC.ourIdleGC logger (seconds 0.3) (seconds 10) (seconds 60) GC.ourIdleGC logger (seconds 0.3) (seconds 10) (seconds 60)
flip runPGMetadataStorageAppT (_scMetadataDbPool serveCtx, pgLogger) . lowerManagedT $ do flip runPGMetadataStorageAppT (scMetadataDbPool, pgLogger) . lowerManagedT $ do
runHGEServer (const $ pure ()) env serveOptions serveCtx initTime Nothing serverMetrics ekgStore Nothing prometheusMetrics sampleAlways runHGEServer (const $ pure ()) env serveOptions serverCtx initTime Nothing ekgStore
HCExport -> do HCExport -> do
GlobalCtx {..} <- initGlobalCtx env metadataDbUrl rci GlobalCtx {..} <- initGlobalCtx env metadataDbUrl rci
res <- runTxWithMinimalPool _gcMetadataDbConnInfo fetchMetadataFromCatalog res <- runTxWithMinimalPool _gcMetadataDbConnInfo fetchMetadataFromCatalog

View File

@ -5,17 +5,17 @@
-- | Imported by 'server/src-exec/Main.hs'. -- | Imported by 'server/src-exec/Main.hs'.
module Hasura.App module Hasura.App
( ExitCode (DatabaseMigrationError, DowngradeProcessError, MetadataCleanError, MetadataExportError, SchemaCacheInitError), ( ExitCode (AuthConfigurationError, DatabaseMigrationError, DowngradeProcessError, MetadataCleanError, MetadataExportError, SchemaCacheInitError),
ExitException (ExitException), ExitException (ExitException),
GlobalCtx (..), GlobalCtx (..),
Loggers (..),
PGMetadataStorageAppT (runPGMetadataStorageAppT), PGMetadataStorageAppT (runPGMetadataStorageAppT),
ServeCtx (ServeCtx, _scLoggers, _scMetadataDbPool, _scShutdownLatch),
accessDeniedErrMsg, accessDeniedErrMsg,
flushLogger, flushLogger,
getCatalogStateTx, getCatalogStateTx,
initGlobalCtx, initGlobalCtx,
initialiseServeCtx, initAuthMode,
initialiseServerCtx,
initSubscriptionsState,
migrateCatalogSchema, migrateCatalogSchema,
mkLoggers, mkLoggers,
mkPGLogger, mkPGLogger,
@ -86,6 +86,7 @@ import Hasura.GraphQL.Execute.Action
import Hasura.GraphQL.Execute.Action.Subscription import Hasura.GraphQL.Execute.Action.Subscription
import Hasura.GraphQL.Execute.Backend qualified as EB import Hasura.GraphQL.Execute.Backend qualified as EB
import Hasura.GraphQL.Execute.Subscription.Poll qualified as ES import Hasura.GraphQL.Execute.Subscription.Poll qualified as ES
import Hasura.GraphQL.Execute.Subscription.State qualified as ES
import Hasura.GraphQL.Logging (MonadQueryLog (..)) import Hasura.GraphQL.Logging (MonadQueryLog (..))
import Hasura.GraphQL.Schema.Options qualified as Options import Hasura.GraphQL.Schema.Options qualified as Options
import Hasura.GraphQL.Transport.HTTP import Hasura.GraphQL.Transport.HTTP
@ -142,10 +143,10 @@ import Hasura.Server.Version
import Hasura.Session import Hasura.Session
import Hasura.ShutdownLatch import Hasura.ShutdownLatch
import Hasura.Tracing qualified as Tracing import Hasura.Tracing qualified as Tracing
import Network.HTTP.Client qualified as HTTP
import Network.HTTP.Client.Blocklisting (Blocklist) import Network.HTTP.Client.Blocklisting (Blocklist)
import Network.HTTP.Client.CreateManager (mkHttpManager) import Network.HTTP.Client.CreateManager (mkHttpManager)
import Network.HTTP.Client.Manager (HasHttpManagerM (..)) import Network.HTTP.Client.Manager (HasHttpManagerM (..))
import Network.HTTP.Client.Transformable qualified as HTTP
import Network.Wai (Application) import Network.Wai (Application)
import Network.Wai.Handler.Warp qualified as Warp import Network.Wai.Handler.Warp qualified as Warp
import Options.Applicative import Options.Applicative
@ -268,26 +269,6 @@ initGlobalCtx env metadataDbUrl defaultPgConnInfo = do
let mdConnInfo = mkConnInfoFromMDb mdUrl let mdConnInfo = mkConnInfoFromMDb mdUrl
mkGlobalCtx mdConnInfo (Just (dbUrl, srcConnInfo)) mkGlobalCtx mdConnInfo (Just (dbUrl, srcConnInfo))
-- | Context required for the 'serve' CLI command.
data ServeCtx = ServeCtx
{ _scHttpManager :: HTTP.Manager,
_scInstanceId :: InstanceId,
_scLoggers :: Loggers,
_scEnabledLogTypes :: HashSet (EngineLogType Hasura),
_scMetadataDbPool :: PG.PGPool,
_scShutdownLatch :: ShutdownLatch,
_scSchemaCacheRef :: SchemaCacheRef,
_scMetaVersionRef :: STM.TMVar MetadataResourceVersion
}
-- | Collection of the LoggerCtx, the regular Logger and the PGLogger
-- TODO (from master): better naming?
data Loggers = Loggers
{ _lsLoggerCtx :: !(LoggerCtx Hasura),
_lsLogger :: !(Logger Hasura),
_lsPgLogger :: !PG.PGLogger
}
-- | An application with Postgres database as a metadata storage -- | An application with Postgres database as a metadata storage
newtype PGMetadataStorageAppT m a = PGMetadataStorageAppT {runPGMetadataStorageAppT :: (PG.PGPool, PG.PGLogger) -> m a} newtype PGMetadataStorageAppT m a = PGMetadataStorageAppT {runPGMetadataStorageAppT :: (PG.PGPool, PG.PGLogger) -> m a}
deriving deriving
@ -321,15 +302,51 @@ resolvePostgresConnInfo env dbUrlConf maybeRetries = do
where where
retries = fromMaybe 1 maybeRetries retries = fromMaybe 1 maybeRetries
initAuthMode ::
(C.ForkableMonadIO m, Tracing.HasReporter m) =>
ServeOptions impl ->
HTTP.Manager ->
Logger Hasura ->
m AuthMode
initAuthMode ServeOptions {..} httpManager logger = do
authModeRes <-
runExceptT $
setupAuthMode
soAdminSecret
soAuthHook
soJwtSecret
soUnAuthRole
logger
httpManager
authMode <- onLeft authModeRes (throwErrExit AuthConfigurationError . T.unpack)
-- forking a dedicated polling thread to dynamically get the latest JWK settings
-- set by the user and update the JWK accordingly. This will help in applying the
-- updates without restarting HGE.
_ <- C.forkImmortal "update JWK" logger $ updateJwkCtx authMode httpManager logger
return authMode
initSubscriptionsState ::
ServeOptions impl ->
Logger Hasura ->
Maybe ES.SubscriptionPostPollHook ->
IO ES.SubscriptionsState
initSubscriptionsState ServeOptions {..} logger liveQueryHook = ES.initSubscriptionsState soLiveQueryOpts soStreamingQueryOpts postPollHook
where
postPollHook = fromMaybe (ES.defaultSubscriptionPostPollHook logger) liveQueryHook
-- | Initializes or migrates the catalog and returns the context required to start the server. -- | Initializes or migrates the catalog and returns the context required to start the server.
initialiseServeCtx :: initialiseServerCtx ::
(C.ForkableMonadIO m, MonadCatch m) => (C.ForkableMonadIO m, MonadCatch m) =>
Env.Environment -> Env.Environment ->
GlobalCtx -> GlobalCtx ->
ServeOptions Hasura -> ServeOptions Hasura ->
Maybe ES.SubscriptionPostPollHook ->
ServerMetrics -> ServerMetrics ->
ManagedT m ServeCtx PrometheusMetrics ->
initialiseServeCtx env GlobalCtx {..} so@ServeOptions {..} serverMetrics = do Tracing.SamplingPolicy ->
ManagedT m ServerCtx
initialiseServerCtx env GlobalCtx {..} serveOptions@ServeOptions {..} liveQueryHook serverMetrics prometheusMetrics traceSamplingPolicy = do
instanceId <- liftIO generateInstanceId instanceId <- liftIO generateInstanceId
latch <- liftIO newShutdownLatch latch <- liftIO newShutdownLatch
loggers@(Loggers loggerCtx logger pgLogger) <- mkLoggers soEnabledLogTypes soLogLevel loggers@(Loggers loggerCtx logger pgLogger) <- mkLoggers soEnabledLogTypes soLogLevel
@ -343,7 +360,7 @@ initialiseServeCtx env GlobalCtx {..} so@ServeOptions {..} serverMetrics = do
slInfo = A.toJSON errMsg slInfo = A.toJSON errMsg
} }
-- log serve options -- log serve options
unLogger logger $ serveOptsToLog so unLogger logger $ serveOptsToLog serveOptions
-- log postgres connection info -- log postgres connection info
unLogger logger $ connInfoToLog _gcMetadataDbConnInfo unLogger logger $ connInfoToLog _gcMetadataDbConnInfo
@ -414,16 +431,40 @@ initialiseServeCtx env GlobalCtx {..} so@ServeOptions {..} serverMetrics = do
srvMgr <- liftIO $ mkHttpManager (readTlsAllowlist schemaCacheRef) mempty srvMgr <- liftIO $ mkHttpManager (readTlsAllowlist schemaCacheRef) mempty
authMode <- liftIO $ initAuthMode serveOptions srvMgr logger
subscriptionsState <- liftIO $ initSubscriptionsState serveOptions logger liveQueryHook
pure $ pure $
ServeCtx ServerCtx
srvMgr { scLoggers = loggers,
instanceId scCacheRef = schemaCacheRef,
loggers scAuthMode = authMode,
soEnabledLogTypes scManager = srvMgr,
metadataDbPool scSQLGenCtx = sqlGenCtx,
latch scEnabledAPIs = soEnabledAPIs,
schemaCacheRef scInstanceId = instanceId,
metaVersionRef scSubscriptionState = subscriptionsState,
scEnableAllowlist = soEnableAllowlist,
scEnvironment = env,
scResponseInternalErrorsConfig = soResponseInternalErrorsConfig,
scRemoteSchemaPermsCtx = soEnableRemoteSchemaPermissions,
scFunctionPermsCtx = soInferFunctionPermissions,
scEnableMaintenanceMode = soEnableMaintenanceMode,
scExperimentalFeatures = soExperimentalFeatures,
scLoggingSettings = LoggingSettings soEnabledLogTypes soEnableMetadataQueryLogging,
scEventingMode = soEventingMode,
scEnableReadOnlyMode = soReadOnlyMode,
scDefaultNamingConvention = soDefaultNamingConvention,
scServerMetrics = serverMetrics,
scMetadataDefaults = soMetadataDefaults,
scEnabledLogTypes = soEnabledLogTypes,
scMetadataDbPool = metadataDbPool,
scShutdownLatch = latch,
scMetaVersionRef = metaVersionRef,
scPrometheusMetrics = prometheusMetrics,
scTraceSamplingPolicy = traceSamplingPolicy
}
mkLoggers :: mkLoggers ::
(MonadIO m, MonadBaseControl IO m) => (MonadIO m, MonadBaseControl IO m) =>
@ -558,24 +599,18 @@ runHGEServer ::
(ServerCtx -> Spock.SpockT m ()) -> (ServerCtx -> Spock.SpockT m ()) ->
Env.Environment -> Env.Environment ->
ServeOptions impl -> ServeOptions impl ->
ServeCtx -> ServerCtx ->
-- and mutations
-- | start time -- | start time
UTCTime -> UTCTime ->
Maybe ES.SubscriptionPostPollHook ->
ServerMetrics ->
EKG.Store EKG.EmptyMetrics ->
-- | A hook which can be called to indicate when the server is started succesfully -- | A hook which can be called to indicate when the server is started succesfully
Maybe (IO ()) -> Maybe (IO ()) ->
PrometheusMetrics -> EKG.Store EKG.EmptyMetrics ->
Tracing.SamplingPolicy ->
ManagedT m () ManagedT m ()
runHGEServer setupHook env serveOptions serveCtx initTime postPollHook serverMetrics ekgStore startupStatusHook prometheusMetrics traceSamplingPolicy = do runHGEServer setupHook env serveOptions serverCtx@ServerCtx {..} initTime startupStatusHook ekgStore = do
waiApplication <- waiApplication <-
mkHGEServer setupHook env serveOptions serveCtx postPollHook serverMetrics ekgStore prometheusMetrics traceSamplingPolicy mkHGEServer setupHook env serveOptions serverCtx ekgStore
let logger = _lsLogger $ _scLoggers serveCtx let logger = _lsLogger $ scLoggers
-- `startupStatusHook`: add `Service started successfully` message to config_status -- `startupStatusHook`: add `Service started successfully` message to config_status
-- table when a tenant starts up in multitenant -- table when a tenant starts up in multitenant
let warpSettings :: Warp.Settings let warpSettings :: Warp.Settings
@ -595,12 +630,12 @@ runHGEServer setupHook env serveOptions serveCtx initTime postPollHook serverMet
( \unmask -> ( \unmask ->
bracket_ bracket_
( do ( do
EKG.Gauge.inc (smWarpThreads serverMetrics) EKG.Gauge.inc (smWarpThreads scServerMetrics)
incWarpThreads (pmConnections prometheusMetrics) incWarpThreads (pmConnections scPrometheusMetrics)
) )
( do ( do
EKG.Gauge.dec (smWarpThreads serverMetrics) EKG.Gauge.dec (smWarpThreads scServerMetrics)
decWarpThreads (pmConnections prometheusMetrics) decWarpThreads (pmConnections scPrometheusMetrics)
) )
(f unmask) (f unmask)
) )
@ -608,7 +643,7 @@ runHGEServer setupHook env serveOptions serveCtx initTime postPollHook serverMet
shutdownHandler :: IO () -> IO () shutdownHandler :: IO () -> IO ()
shutdownHandler closeSocket = shutdownHandler closeSocket =
LA.link =<< LA.async do LA.link =<< LA.async do
waitForShutdown $ _scShutdownLatch serveCtx waitForShutdown $ scShutdownLatch
unLogger logger $ mkGenericLog @Text LevelInfo "server" "gracefully shutting down server" unLogger logger $ mkGenericLog @Text LevelInfo "server" "gracefully shutting down server"
closeSocket closeSocket
@ -654,16 +689,10 @@ mkHGEServer ::
(ServerCtx -> Spock.SpockT m ()) -> (ServerCtx -> Spock.SpockT m ()) ->
Env.Environment -> Env.Environment ->
ServeOptions impl -> ServeOptions impl ->
ServeCtx -> ServerCtx ->
-- and mutations
Maybe ES.SubscriptionPostPollHook ->
ServerMetrics ->
EKG.Store EKG.EmptyMetrics -> EKG.Store EKG.EmptyMetrics ->
PrometheusMetrics ->
Tracing.SamplingPolicy ->
ManagedT m Application ManagedT m Application
mkHGEServer setupHook env ServeOptions {..} ServeCtx {..} postPollHook serverMetrics ekgStore prometheusMetrics traceSamplingPolicy = do mkHGEServer setupHook env ServeOptions {..} serverCtx@ServerCtx {..} ekgStore = do
-- Comment this to enable expensive assertions from "GHC.AssertNF". These -- Comment this to enable expensive assertions from "GHC.AssertNF". These
-- will log lines to STDOUT containing "not in normal form". In the future we -- will log lines to STDOUT containing "not in normal form". In the future we
-- could try to integrate this into our tests. For now this is a development -- could try to integrate this into our tests. For now this is a development
@ -681,25 +710,7 @@ mkHGEServer setupHook env ServeOptions {..} ServeCtx {..} postPollHook serverMet
| otherwise = Options.DisableBigQueryStringNumericInput | otherwise = Options.DisableBigQueryStringNumericInput
sqlGenCtx = SQLGenCtx soStringifyNum soDangerousBooleanCollapse optimizePermissionFilters bigqueryStringNumericInput sqlGenCtx = SQLGenCtx soStringifyNum soDangerousBooleanCollapse optimizePermissionFilters bigqueryStringNumericInput
Loggers loggerCtx logger _ = _scLoggers Loggers loggerCtx logger _ = scLoggers
authModeRes <-
lift $
runExceptT $
setupAuthMode
soAdminSecret
soAuthHook
soJwtSecret
soUnAuthRole
logger
_scHttpManager
authMode <- onLeft authModeRes (throwErrExit AuthConfigurationError . T.unpack)
-- forking a dedicated polling thread to dynamically get the latest JWK settings
-- set by the user and update the JWK accordingly. This will help in applying the
-- updates without restarting HGE.
_ <- C.forkManagedT "update JWK" logger $ updateJwkCtx authMode _scHttpManager logger
HasuraApp app cacheRef actionSubState stopWsServer <- HasuraApp app cacheRef actionSubState stopWsServer <-
lift $ lift $
@ -707,41 +718,20 @@ mkHGEServer setupHook env ServeOptions {..} ServeCtx {..} postPollHook serverMet
mkWaiApp mkWaiApp
setupHook setupHook
env env
logger
sqlGenCtx
soEnableAllowlist
_scHttpManager
authMode
soCorsConfig soCorsConfig
soEnableConsole soEnableConsole
soConsoleAssetsDir soConsoleAssetsDir
soConsoleSentryDsn soConsoleSentryDsn
soEnableTelemetry soEnableTelemetry
_scInstanceId scCacheRef
soEnabledAPIs
soLiveQueryOpts
soStreamingQueryOpts
soResponseInternalErrorsConfig
postPollHook
_scSchemaCacheRef
ekgStore
serverMetrics
prometheusMetrics
soEnableRemoteSchemaPermissions
soInferFunctionPermissions
soConnectionOptions soConnectionOptions
soWebSocketKeepAlive soWebSocketKeepAlive
soEnableMaintenanceMode scEnabledLogTypes
soEventingMode serverCtx
soReadOnlyMode
soExperimentalFeatures
_scEnabledLogTypes
soWebSocketConnectionInitTimeout soWebSocketConnectionInitTimeout
soEnableMetadataQueryLogging ekgStore
soDefaultNamingConvention
soMetadataDefaults
traceSamplingPolicy
-- Init ServerConfigCtx
let serverConfigCtx = let serverConfigCtx =
ServerConfigCtx ServerConfigCtx
soInferFunctionPermissions soInferFunctionPermissions
@ -770,10 +760,10 @@ mkHGEServer setupHook env ServeOptions {..} ServeCtx {..} postPollHook serverMet
_ <- _ <-
startSchemaSyncProcessorThread startSchemaSyncProcessorThread
logger logger
_scHttpManager scManager
_scMetaVersionRef scMetaVersionRef
cacheRef cacheRef
_scInstanceId scInstanceId
serverConfigCtx serverConfigCtx
newLogTVar newLogTVar
@ -803,7 +793,7 @@ mkHGEServer setupHook env ServeOptions {..} ServeCtx {..} postPollHook serverMet
_updateThread <- _updateThread <-
C.forkManagedT "checkForUpdates" logger $ C.forkManagedT "checkForUpdates" logger $
liftIO $ liftIO $
checkForUpdates loggerCtx _scHttpManager checkForUpdates loggerCtx scManager
-- Start a background thread for source pings -- Start a background thread for source pings
_sourcePingPoller <- _sourcePingPoller <-
@ -826,13 +816,13 @@ mkHGEServer setupHook env ServeOptions {..} ServeCtx {..} postPollHook serverMet
runMetadataStorageT getMetadataDbUid runMetadataStorageT getMetadataDbUid
>>= (`onLeft` throwErrJExit DatabaseMigrationError) >>= (`onLeft` throwErrJExit DatabaseMigrationError)
pgVersion <- pgVersion <-
liftIO (runExceptT $ PG.runTx _scMetadataDbPool (PG.ReadCommitted, Nothing) $ getPgVersion) liftIO (runExceptT $ PG.runTx scMetadataDbPool (PG.ReadCommitted, Nothing) $ getPgVersion)
>>= (`onLeft` throwErrJExit DatabaseMigrationError) >>= (`onLeft` throwErrJExit DatabaseMigrationError)
telemetryThread <- telemetryThread <-
C.forkManagedT "runTelemetry" logger $ C.forkManagedT "runTelemetry" logger $
liftIO $ liftIO $
runTelemetry logger _scHttpManager (getSchemaCache cacheRef) dbUid _scInstanceId pgVersion soExperimentalFeatures runTelemetry logger scManager (getSchemaCache cacheRef) dbUid scInstanceId pgVersion soExperimentalFeatures
return $ Just telemetryThread return $ Just telemetryThread
else return Nothing else return Nothing
@ -962,12 +952,12 @@ mkHGEServer setupHook env ServeOptions {..} ServeCtx {..} postPollHook serverMet
(C.ThreadShutdown (liftIO eventsGracefulShutdownAction)) (C.ThreadShutdown (liftIO eventsGracefulShutdownAction))
$ processEventQueue $ processEventQueue
logger logger
_scHttpManager scManager
(getSchemaCache cacheRef) (getSchemaCache cacheRef)
eventEngineCtx eventEngineCtx
lockedEventsCtx lockedEventsCtx
serverMetrics scServerMetrics
(pmEventTriggerMetrics prometheusMetrics) (pmEventTriggerMetrics scPrometheusMetrics)
soEnableMaintenanceMode soEnableMaintenanceMode
startAsyncActionsPollerThread logger lockedEventsCtx cacheRef actionSubState = do startAsyncActionsPollerThread logger lockedEventsCtx cacheRef actionSubState = do
@ -997,8 +987,8 @@ mkHGEServer setupHook env ServeOptions {..} ServeCtx {..} postPollHook serverMet
logger logger
(getSchemaCache cacheRef) (getSchemaCache cacheRef)
(leActionEvents lockedEventsCtx) (leActionEvents lockedEventsCtx)
_scHttpManager scManager
prometheusMetrics scPrometheusMetrics
sleepTime sleepTime
Nothing Nothing
@ -1032,8 +1022,8 @@ mkHGEServer setupHook env ServeOptions {..} ServeCtx {..} postPollHook serverMet
$ processScheduledTriggers $ processScheduledTriggers
env env
logger logger
_scHttpManager scManager
prometheusMetrics scPrometheusMetrics
(getSchemaCache cacheRef) (getSchemaCache cacheRef)
lockedEventsCtx lockedEventsCtx

View File

@ -7,9 +7,10 @@ module Hasura.Server.App
Handler, Handler,
HandlerCtx (hcReqHeaders, hcServerCtx, hcUser), HandlerCtx (hcReqHeaders, hcServerCtx, hcUser),
HasuraApp (HasuraApp), HasuraApp (HasuraApp),
Loggers (..),
MonadConfigApiHandler (..), MonadConfigApiHandler (..),
MonadMetadataApiAuthorization (..), MonadMetadataApiAuthorization (..),
ServerCtx (scManager, scLoggingSettings, scEnabledAPIs), ServerCtx (..),
boolToText, boolToText,
configApiGetHandler, configApiGetHandler,
isAdminSecretSet, isAdminSecretSet,
@ -22,6 +23,7 @@ module Hasura.Server.App
where where
import Control.Concurrent.Async.Lifted.Safe qualified as LA import Control.Concurrent.Async.Lifted.Safe qualified as LA
import Control.Concurrent.STM qualified as STM
import Control.Exception (IOException, try) import Control.Exception (IOException, try)
import Control.Monad.Stateless import Control.Monad.Stateless
import Control.Monad.Trans.Control (MonadBaseControl) import Control.Monad.Trans.Control (MonadBaseControl)
@ -43,6 +45,7 @@ import Data.Text.Conversions (convertText)
import Data.Text.Extended import Data.Text.Extended
import Data.Text.Lazy qualified as LT import Data.Text.Lazy qualified as LT
import Data.Text.Lazy.Encoding qualified as TL import Data.Text.Lazy.Encoding qualified as TL
import Database.PG.Query qualified as PG
import GHC.Stats.Extended qualified as RTS import GHC.Stats.Extended qualified as RTS
import Hasura.Backends.DataConnector.API (openApiSchema) import Hasura.Backends.DataConnector.API (openApiSchema)
import Hasura.Backends.Postgres.Execute.Types import Hasura.Backends.Postgres.Execute.Types
@ -50,8 +53,6 @@ import Hasura.Base.Error
import Hasura.EncJSON import Hasura.EncJSON
import Hasura.GraphQL.Execute qualified as E import Hasura.GraphQL.Execute qualified as E
import Hasura.GraphQL.Execute.Backend qualified as EB import Hasura.GraphQL.Execute.Backend qualified as EB
import Hasura.GraphQL.Execute.Subscription.Options qualified as ES
import Hasura.GraphQL.Execute.Subscription.Poll qualified as ES
import Hasura.GraphQL.Execute.Subscription.State qualified as ES import Hasura.GraphQL.Execute.Subscription.State qualified as ES
import Hasura.GraphQL.Explain qualified as GE import Hasura.GraphQL.Explain qualified as GE
import Hasura.GraphQL.Logging (MonadQueryLog) import Hasura.GraphQL.Logging (MonadQueryLog)
@ -99,6 +100,7 @@ import Hasura.Server.Types
import Hasura.Server.Utils import Hasura.Server.Utils
import Hasura.Server.Version import Hasura.Server.Version
import Hasura.Session import Hasura.Session
import Hasura.ShutdownLatch
import Hasura.Tracing qualified as Tracing import Hasura.Tracing qualified as Tracing
import Network.HTTP.Client qualified as HTTP import Network.HTTP.Client qualified as HTTP
import Network.HTTP.Types qualified as HTTP import Network.HTTP.Types qualified as HTTP
@ -115,7 +117,7 @@ import Web.Spock.Core ((<//>))
import Web.Spock.Core qualified as Spock import Web.Spock.Core qualified as Spock
data ServerCtx = ServerCtx data ServerCtx = ServerCtx
{ scLogger :: !(L.Logger L.Hasura), { scLoggers :: !Loggers,
scCacheRef :: !SchemaCacheRef, scCacheRef :: !SchemaCacheRef,
scAuthMode :: !AuthMode, scAuthMode :: !AuthMode,
scManager :: !HTTP.Manager, scManager :: !HTTP.Manager,
@ -124,7 +126,6 @@ data ServerCtx = ServerCtx
scInstanceId :: !InstanceId, scInstanceId :: !InstanceId,
scSubscriptionState :: !ES.SubscriptionsState, scSubscriptionState :: !ES.SubscriptionsState,
scEnableAllowlist :: !Bool, scEnableAllowlist :: !Bool,
scEkgStore :: !(EKG.Store EKG.EmptyMetrics),
scResponseInternalErrorsConfig :: !ResponseInternalErrorsConfig, scResponseInternalErrorsConfig :: !ResponseInternalErrorsConfig,
scEnvironment :: !Env.Environment, scEnvironment :: !Env.Environment,
scRemoteSchemaPermsCtx :: !Options.RemoteSchemaPermissions, scRemoteSchemaPermsCtx :: !Options.RemoteSchemaPermissions,
@ -135,9 +136,22 @@ data ServerCtx = ServerCtx
scEventingMode :: !EventingMode, scEventingMode :: !EventingMode,
scEnableReadOnlyMode :: !ReadOnlyMode, scEnableReadOnlyMode :: !ReadOnlyMode,
scDefaultNamingConvention :: !(Maybe NamingCase), scDefaultNamingConvention :: !(Maybe NamingCase),
scPrometheusMetrics :: !PrometheusMetrics, scServerMetrics :: !ServerMetrics,
scMetadataDefaults :: !MetadataDefaults, scMetadataDefaults :: !MetadataDefaults,
scTraceSamplingPolicy :: !Tracing.SamplingPolicy scEnabledLogTypes :: HashSet (L.EngineLogType L.Hasura),
scMetadataDbPool :: PG.PGPool,
scShutdownLatch :: ShutdownLatch,
scMetaVersionRef :: STM.TMVar MetadataResourceVersion,
scPrometheusMetrics :: PrometheusMetrics,
scTraceSamplingPolicy :: Tracing.SamplingPolicy
}
-- | Collection of the LoggerCtx, the regular Logger and the PGLogger
-- TODO (from master): better naming?
data Loggers = Loggers
{ _lsLoggerCtx :: !(L.LoggerCtx L.Hasura),
_lsLogger :: !(L.Logger L.Hasura),
_lsPgLogger :: !PG.PGLogger
} }
data HandlerCtx = HandlerCtx data HandlerCtx = HandlerCtx
@ -315,7 +329,7 @@ mkSpockAction serverCtx@ServerCtx {..} qErrEncoder qErrModifier apiHandler = do
runMetadataStorageT $ flip runReaderT handlerCtx $ runResourceLimits handlerLimit $ handler runMetadataStorageT $ flip runReaderT handlerCtx $ runResourceLimits handlerLimit $ handler
getInfo parsedRequest = do getInfo parsedRequest = do
authenticationResp <- lift (resolveUserInfo scLogger scManager headers scAuthMode parsedRequest) authenticationResp <- lift (resolveUserInfo (_lsLogger scLoggers) scManager headers scAuthMode parsedRequest)
authInfo <- onLeft authenticationResp (logErrorAndResp Nothing requestId req (reqBody, Nothing) False origHeaders (ExtraUserInfo Nothing) . qErrModifier) authInfo <- onLeft authenticationResp (logErrorAndResp Nothing requestId req (reqBody, Nothing) False origHeaders (ExtraUserInfo Nothing) . qErrModifier)
let (userInfo, _, authHeaders, extraUserInfo) = authInfo let (userInfo, _, authHeaders, extraUserInfo) = authInfo
pure pure
@ -382,7 +396,7 @@ mkSpockAction serverCtx@ServerCtx {..} qErrEncoder qErrModifier apiHandler = do
Spock.ActionCtxT ctx m3 a3 Spock.ActionCtxT ctx m3 a3
logErrorAndResp userInfo reqId waiReq req includeInternal headers extraUserInfo qErr = do logErrorAndResp userInfo reqId waiReq req includeInternal headers extraUserInfo qErr = do
let httpLogMetadata = buildHttpLogMetadata @m3 emptyHttpLogGraphQLInfo extraUserInfo let httpLogMetadata = buildHttpLogMetadata @m3 emptyHttpLogGraphQLInfo extraUserInfo
lift $ logHttpError scLogger scLoggingSettings userInfo reqId waiReq req qErr headers httpLogMetadata lift $ logHttpError (_lsLogger scLoggers) scLoggingSettings userInfo reqId waiReq req qErr headers httpLogMetadata
Spock.setStatus $ qeStatus qErr Spock.setStatus $ qeStatus qErr
Spock.json $ qErrEncoder includeInternal qErr Spock.json $ qErrEncoder includeInternal qErr
@ -394,7 +408,7 @@ mkSpockAction serverCtx@ServerCtx {..} qErrEncoder qErrModifier apiHandler = do
encodingHeader = maybeToList (contentEncodingHeader <$> encodingType) encodingHeader = maybeToList (contentEncodingHeader <$> encodingType)
reqIdHeader = (requestIdHeader, txtToBs $ unRequestId reqId) reqIdHeader = (requestIdHeader, txtToBs $ unRequestId reqId)
allRespHeaders = pure reqIdHeader <> encodingHeader <> respHeaders <> authHdrs allRespHeaders = pure reqIdHeader <> encodingHeader <> respHeaders <> authHdrs
lift $ logHttpSuccess scLogger scLoggingSettings userInfo reqId waiReq req respBytes compressedResp qTime encodingType reqHeaders httpLoggingMetadata lift $ logHttpSuccess (_lsLogger scLoggers) scLoggingSettings userInfo reqId waiReq req respBytes compressedResp qTime encodingType reqHeaders httpLoggingMetadata
mapM_ setHeader allRespHeaders mapM_ setHeader allRespHeaders
Spock.lazyBytes compressedResp Spock.lazyBytes compressedResp
@ -414,7 +428,7 @@ v1QueryHandler ::
v1QueryHandler query = do v1QueryHandler query = do
(liftEitherM . authorizeV1QueryApi query) =<< ask (liftEitherM . authorizeV1QueryApi query) =<< ask
scRef <- asks (scCacheRef . hcServerCtx) scRef <- asks (scCacheRef . hcServerCtx)
logger <- asks (scLogger . hcServerCtx) logger <- asks (_lsLogger . scLoggers . hcServerCtx)
res <- bool (fst <$> (action logger)) (withSchemaCacheUpdate scRef logger Nothing (action logger)) $ queryModifiesSchemaCache query res <- bool (fst <$> (action logger)) (withSchemaCacheUpdate scRef logger Nothing (action logger)) $ queryModifiesSchemaCache query
return $ HttpResponse res [] return $ HttpResponse res []
where where
@ -476,7 +490,7 @@ v1MetadataHandler query = Tracing.trace "Metadata" $ do
_sccSQLGenCtx <- asks (scSQLGenCtx . hcServerCtx) _sccSQLGenCtx <- asks (scSQLGenCtx . hcServerCtx)
env <- asks (scEnvironment . hcServerCtx) env <- asks (scEnvironment . hcServerCtx)
instanceId <- asks (scInstanceId . hcServerCtx) instanceId <- asks (scInstanceId . hcServerCtx)
logger <- asks (scLogger . hcServerCtx) logger <- asks (_lsLogger . scLoggers . hcServerCtx)
_sccRemoteSchemaPermsCtx <- asks (scRemoteSchemaPermsCtx . hcServerCtx) _sccRemoteSchemaPermsCtx <- asks (scRemoteSchemaPermsCtx . hcServerCtx)
_sccFunctionPermsCtx <- asks (scFunctionPermsCtx . hcServerCtx) _sccFunctionPermsCtx <- asks (scFunctionPermsCtx . hcServerCtx)
_sccExperimentalFeatures <- asks (scExperimentalFeatures . hcServerCtx) _sccExperimentalFeatures <- asks (scExperimentalFeatures . hcServerCtx)
@ -517,7 +531,7 @@ v2QueryHandler ::
v2QueryHandler query = Tracing.trace "v2 Query" $ do v2QueryHandler query = Tracing.trace "v2 Query" $ do
(liftEitherM . authorizeV2QueryApi query) =<< ask (liftEitherM . authorizeV2QueryApi query) =<< ask
scRef <- asks (scCacheRef . hcServerCtx) scRef <- asks (scCacheRef . hcServerCtx)
logger <- asks (scLogger . hcServerCtx) logger <- asks (_lsLogger . scLoggers . hcServerCtx)
res <- res <-
bool (fst <$> dbAction) (withSchemaCacheUpdate scRef logger Nothing dbAction) $ bool (fst <$> dbAction) (withSchemaCacheUpdate scRef logger Nothing dbAction) $
V2Q.queryModifiesSchema query V2Q.queryModifiesSchema query
@ -575,7 +589,7 @@ v1Alpha1GQHandler queryType query = do
reqHeaders <- asks hcReqHeaders reqHeaders <- asks hcReqHeaders
ipAddress <- asks hcSourceIpAddress ipAddress <- asks hcSourceIpAddress
requestId <- asks hcRequestId requestId <- asks hcRequestId
logger <- asks (scLogger . hcServerCtx) logger <- asks (_lsLogger . scLoggers . hcServerCtx)
responseErrorsConfig <- asks (scResponseInternalErrorsConfig . hcServerCtx) responseErrorsConfig <- asks (scResponseInternalErrorsConfig . hcServerCtx)
env <- asks (scEnvironment . hcServerCtx) env <- asks (scEnvironment . hcServerCtx)
@ -595,7 +609,7 @@ mkExecutionContext = do
(sc, scVer) <- liftIO $ readSchemaCacheRef scRef (sc, scVer) <- liftIO $ readSchemaCacheRef scRef
sqlGenCtx <- asks (scSQLGenCtx . hcServerCtx) sqlGenCtx <- asks (scSQLGenCtx . hcServerCtx)
enableAL <- asks (scEnableAllowlist . hcServerCtx) enableAL <- asks (scEnableAllowlist . hcServerCtx)
logger <- asks (scLogger . hcServerCtx) logger <- asks (_lsLogger . scLoggers . hcServerCtx)
readOnlyMode <- asks (scEnableReadOnlyMode . hcServerCtx) readOnlyMode <- asks (scEnableReadOnlyMode . hcServerCtx)
prometheusMetrics <- asks (scPrometheusMetrics . hcServerCtx) prometheusMetrics <- asks (scPrometheusMetrics . hcServerCtx)
pure $ E.ExecutionCtx logger sqlGenCtx (lastBuiltSchemaCache sc) scVer manager enableAL readOnlyMode prometheusMetrics pure $ E.ExecutionCtx logger sqlGenCtx (lastBuiltSchemaCache sc) scVer manager enableAL readOnlyMode prometheusMetrics
@ -776,15 +790,6 @@ mkWaiApp ::
(ServerCtx -> Spock.SpockT m ()) -> (ServerCtx -> Spock.SpockT m ()) ->
-- | Set of environment variables for reference in UIs -- | Set of environment variables for reference in UIs
Env.Environment -> Env.Environment ->
-- | a 'L.Hasura' specific logger
L.Logger L.Hasura ->
SQLGenCtx ->
-- | is AllowList enabled - TODO: change this boolean to sumtype
Bool ->
-- | HTTP manager so that we can re-use sessions
HTTP.Manager ->
-- | 'AuthMode' in which the application should operate in
AuthMode ->
CorsConfig -> CorsConfig ->
-- | is console enabled - TODO: better type -- | is console enabled - TODO: better type
Bool -> Bool ->
@ -794,136 +799,60 @@ mkWaiApp ::
Maybe Text -> Maybe Text ->
-- | is telemetry enabled -- | is telemetry enabled
Bool -> Bool ->
-- | each application, when run, gets an 'InstanceId'. this is used at various places including
-- schema syncing and telemetry
InstanceId ->
-- | set of the enabled 'API's
S.HashSet API ->
ES.LiveQueriesOptions ->
ES.StreamQueriesOptions ->
ResponseInternalErrorsConfig ->
Maybe ES.SubscriptionPostPollHook ->
SchemaCacheRef -> SchemaCacheRef ->
EKG.Store EKG.EmptyMetrics ->
ServerMetrics ->
PrometheusMetrics ->
Options.RemoteSchemaPermissions ->
Options.InferFunctionPermissions ->
WS.ConnectionOptions -> WS.ConnectionOptions ->
KeepAliveDelay -> KeepAliveDelay ->
MaintenanceMode () ->
EventingMode ->
ReadOnlyMode ->
-- | Set of the enabled experimental features
S.HashSet ExperimentalFeature ->
S.HashSet (L.EngineLogType L.Hasura) -> S.HashSet (L.EngineLogType L.Hasura) ->
ServerCtx ->
WSConnectionInitTimeout -> WSConnectionInitTimeout ->
-- | is metadata query logging in http-log enabled EKG.Store EKG.EmptyMetrics ->
MetadataQueryLoggingMode ->
-- | default naming convention
Maybe NamingCase ->
-- | default metadata entries
MetadataDefaults ->
Tracing.SamplingPolicy ->
m HasuraApp m HasuraApp
mkWaiApp mkWaiApp
setupHook setupHook
env env
logger
sqlGenCtx
enableAL
httpManager
mode
corsCfg corsCfg
enableConsole enableConsole
consoleAssetsDir consoleAssetsDir
consoleSentryDsn consoleSentryDsn
enableTelemetry enableTelemetry
instanceId
apis
lqOpts
streamQOpts
responseErrorsConfig
liveQueryHook
schemaCacheRef schemaCacheRef
ekgStore
serverMetrics
prometheusMetrics
enableRSPermsCtx
functionPermsCtx
connectionOptions connectionOptions
keepAliveDelay keepAliveDelay
maintenanceMode
eventingMode
readOnlyMode
experimentalFeatures
enabledLogTypes enabledLogTypes
serverCtx@ServerCtx {..}
wsConnInitTimeout wsConnInitTimeout
enableMetadataQueryLogging ekgStore = do
defaultNC
metadataDefaults
traceSamplingPolicy = do
let getSchemaCache' = first lastBuiltSchemaCache <$> readSchemaCacheRef schemaCacheRef let getSchemaCache' = first lastBuiltSchemaCache <$> readSchemaCacheRef schemaCacheRef
let corsPolicy = mkDefaultCorsPolicy corsCfg let corsPolicy = mkDefaultCorsPolicy corsCfg
postPollHook = fromMaybe (ES.defaultSubscriptionPostPollHook logger) liveQueryHook
subscriptionsState <- liftIO $ ES.initSubscriptionsState lqOpts streamQOpts postPollHook
wsServerEnv <- wsServerEnv <-
WS.createWSServerEnv WS.createWSServerEnv
logger (_lsLogger scLoggers)
subscriptionsState scSubscriptionState
getSchemaCache' getSchemaCache'
httpManager scManager
corsPolicy corsPolicy
sqlGenCtx scSQLGenCtx
readOnlyMode scEnableReadOnlyMode
enableAL scEnableAllowlist
keepAliveDelay keepAliveDelay
serverMetrics scServerMetrics
prometheusMetrics scPrometheusMetrics
traceSamplingPolicy scTraceSamplingPolicy
let serverCtx =
ServerCtx
{ scLogger = logger,
scCacheRef = schemaCacheRef,
scAuthMode = mode,
scManager = httpManager,
scSQLGenCtx = sqlGenCtx,
scEnabledAPIs = apis,
scInstanceId = instanceId,
scSubscriptionState = subscriptionsState,
scEnableAllowlist = enableAL,
scEkgStore = ekgStore,
scEnvironment = env,
scResponseInternalErrorsConfig = responseErrorsConfig,
scRemoteSchemaPermsCtx = enableRSPermsCtx,
scFunctionPermsCtx = functionPermsCtx,
scEnableMaintenanceMode = maintenanceMode,
scExperimentalFeatures = experimentalFeatures,
scLoggingSettings = LoggingSettings enabledLogTypes enableMetadataQueryLogging,
scEventingMode = eventingMode,
scEnableReadOnlyMode = readOnlyMode,
scDefaultNamingConvention = defaultNC,
scPrometheusMetrics = prometheusMetrics,
scMetadataDefaults = metadataDefaults,
scTraceSamplingPolicy = traceSamplingPolicy
}
spockApp <- liftWithStateless $ \lowerIO -> spockApp <- liftWithStateless $ \lowerIO ->
Spock.spockAsApp $ Spock.spockAsApp $
Spock.spockT lowerIO $ Spock.spockT lowerIO $
httpApp setupHook corsCfg serverCtx enableConsole consoleAssetsDir consoleSentryDsn enableTelemetry httpApp setupHook corsCfg serverCtx enableConsole consoleAssetsDir consoleSentryDsn enableTelemetry ekgStore
let wsServerApp = WS.createWSServerApp env enabledLogTypes mode wsServerEnv wsConnInitTimeout -- TODO: Lyndon: Can we pass environment through wsServerEnv? let wsServerApp = WS.createWSServerApp env enabledLogTypes scAuthMode wsServerEnv wsConnInitTimeout -- TODO: Lyndon: Can we pass environment through wsServerEnv?
stopWSServer = WS.stopWSServerApp wsServerEnv stopWSServer = WS.stopWSServerApp wsServerEnv
waiApp <- liftWithStateless $ \lowerIO -> waiApp <- liftWithStateless $ \lowerIO ->
pure $ WSC.websocketsOr connectionOptions (\ip conn -> lowerIO $ wsServerApp ip conn) spockApp pure $ WSC.websocketsOr connectionOptions (\ip conn -> lowerIO $ wsServerApp ip conn) spockApp
return $ HasuraApp waiApp schemaCacheRef (ES._ssAsyncActions subscriptionsState) stopWSServer return $ HasuraApp waiApp schemaCacheRef (ES._ssAsyncActions scSubscriptionState) stopWSServer
httpApp :: httpApp ::
forall m. forall m.
@ -953,8 +882,9 @@ httpApp ::
Maybe Text -> Maybe Text ->
Maybe Text -> Maybe Text ->
Bool -> Bool ->
EKG.Store EKG.EmptyMetrics ->
Spock.SpockT m () Spock.SpockT m ()
httpApp setupHook corsCfg serverCtx enableConsole consoleAssetsDir consoleSentryDsn enableTelemetry = do httpApp setupHook corsCfg serverCtx enableConsole consoleAssetsDir consoleSentryDsn enableTelemetry ekgStore = do
-- Additional spock action to run -- Additional spock action to run
setupHook serverCtx setupHook serverCtx
@ -1085,7 +1015,8 @@ httpApp setupHook corsCfg serverCtx enableConsole consoleAssetsDir consoleSentry
Spock.post "v1/graphql" $ Spock.post "v1/graphql" $
spockAction GH.encodeGQErr allMod200 $ spockAction GH.encodeGQErr allMod200 $
mkGQLRequestHandler $ mkGQLRequestHandler $
mkGQLAPIRespHandler v1GQHandler mkGQLAPIRespHandler $
v1GQHandler
Spock.post "v1beta1/relay" $ Spock.post "v1beta1/relay" $
spockAction GH.encodeGQErr allMod200 $ spockAction GH.encodeGQErr allMod200 $
@ -1110,7 +1041,7 @@ httpApp setupHook corsCfg serverCtx enableConsole consoleAssetsDir consoleSentry
spockAction encodeQErr id $ spockAction encodeQErr id $
mkGetHandler $ do mkGetHandler $ do
onlyAdmin onlyAdmin
respJ <- liftIO $ EKG.sampleAll $ scEkgStore serverCtx respJ <- liftIO $ EKG.sampleAll ekgStore
return (emptyHttpLogGraphQLInfo, JSONResp $ HttpResponse (encJFromJValue $ EKG.sampleToJson respJ) []) return (emptyHttpLogGraphQLInfo, JSONResp $ HttpResponse (encJFromJValue $ EKG.sampleToJson respJ) [])
-- This deprecated endpoint used to show the query plan cache pre-PDV. -- This deprecated endpoint used to show the query plan cache pre-PDV.
-- Eventually this endpoint can be removed. -- Eventually this endpoint can be removed.
@ -1150,7 +1081,7 @@ httpApp setupHook corsCfg serverCtx enableConsole consoleAssetsDir consoleSentry
qErr = err404 NotFound "resource does not exist" qErr = err404 NotFound "resource does not exist"
raiseGenericApiError logger (scLoggingSettings serverCtx) headers qErr raiseGenericApiError logger (scLoggingSettings serverCtx) headers qErr
where where
logger = scLogger serverCtx logger = (_lsLogger . scLoggers) serverCtx
logSuccess msg = do logSuccess msg = do
req <- Spock.request req <- Spock.request