2019-11-26 15:14:21 +03:00
|
|
|
{-# LANGUAGE RecordWildCards #-}
|
|
|
|
{-# LANGUAGE UndecidableInstances #-}
|
|
|
|
|
|
|
|
module Hasura.App where
|
|
|
|
|
|
|
|
import Control.Monad.Base
|
|
|
|
import Control.Monad.Stateless
|
2019-12-11 04:04:49 +03:00
|
|
|
import Control.Monad.STM (atomically)
|
|
|
|
import Control.Monad.Trans.Control (MonadBaseControl (..))
|
|
|
|
import Data.Aeson ((.=))
|
|
|
|
import Data.Time.Clock (UTCTime, getCurrentTime)
|
2019-11-26 15:14:21 +03:00
|
|
|
import Options.Applicative
|
2019-12-11 04:04:49 +03:00
|
|
|
import System.Environment (getEnvironment, lookupEnv)
|
|
|
|
import System.Exit (exitFailure)
|
|
|
|
|
|
|
|
import qualified Control.Concurrent as C
|
|
|
|
import qualified Control.Concurrent.Async.Lifted.Safe as LA
|
|
|
|
import qualified Data.Aeson as A
|
|
|
|
import qualified Data.ByteString.Char8 as BC
|
|
|
|
import qualified Data.ByteString.Lazy.Char8 as BLC
|
|
|
|
import qualified Data.Text as T
|
|
|
|
import qualified Data.Time.Clock as Clock
|
|
|
|
import qualified Data.Yaml as Y
|
|
|
|
import qualified Database.PG.Query as Q
|
|
|
|
import qualified Network.HTTP.Client as HTTP
|
|
|
|
import qualified Network.HTTP.Client.TLS as HTTP
|
|
|
|
import qualified Network.Wai.Handler.Warp as Warp
|
|
|
|
import qualified System.Posix.Signals as Signals
|
|
|
|
import qualified Text.Mustache.Compile as M
|
2019-11-26 15:14:21 +03:00
|
|
|
|
|
|
|
import Hasura.Db
|
|
|
|
import Hasura.EncJSON
|
|
|
|
import Hasura.Events.Lib
|
|
|
|
import Hasura.Logging
|
|
|
|
import Hasura.Prelude
|
2019-12-13 00:46:33 +03:00
|
|
|
import Hasura.RQL.Types (CacheRWM, Code (..), HasHttpManager,
|
|
|
|
HasSQLGenCtx, HasSystemDefined, QErr (..),
|
|
|
|
SQLGenCtx (..), SchemaCache (..), UserInfoM,
|
|
|
|
adminRole, adminUserInfo,
|
|
|
|
buildSchemaCacheStrict, decodeValue,
|
|
|
|
throw400, userRole, withPathK)
|
2019-12-09 01:17:39 +03:00
|
|
|
import Hasura.RQL.Types.Run
|
2019-11-26 15:14:21 +03:00
|
|
|
import Hasura.Server.App
|
|
|
|
import Hasura.Server.Auth
|
2019-12-11 04:04:49 +03:00
|
|
|
import Hasura.Server.CheckUpdates (checkForUpdates)
|
2019-11-26 15:14:21 +03:00
|
|
|
import Hasura.Server.Init
|
|
|
|
import Hasura.Server.Logging
|
2019-12-13 00:46:33 +03:00
|
|
|
import Hasura.Server.Migrate (migrateCatalog)
|
|
|
|
import Hasura.Server.Query (requiresAdmin, runQueryM)
|
2019-11-26 15:14:21 +03:00
|
|
|
import Hasura.Server.SchemaUpdate
|
|
|
|
import Hasura.Server.Telemetry
|
|
|
|
import Hasura.Server.Version
|
|
|
|
|
|
|
|
|
|
|
|
printErrExit :: (MonadIO m) => forall a . String -> m a
|
|
|
|
printErrExit = liftIO . (>> exitFailure) . putStrLn
|
|
|
|
|
|
|
|
printErrJExit :: (A.ToJSON a, MonadIO m) => forall b . a -> m b
|
|
|
|
printErrJExit = liftIO . (>> exitFailure) . printJSON
|
|
|
|
|
|
|
|
parseHGECommand :: EnabledLogTypes impl => Parser (RawHGECommand impl)
|
|
|
|
parseHGECommand =
|
|
|
|
subparser
|
|
|
|
( command "serve" (info (helper <*> (HCServe <$> serveOptionsParser))
|
|
|
|
( progDesc "Start the GraphQL Engine Server"
|
|
|
|
<> footerDoc (Just serveCmdFooter)
|
|
|
|
))
|
|
|
|
<> command "export" (info (pure HCExport)
|
|
|
|
( progDesc "Export graphql-engine's metadata to stdout" ))
|
|
|
|
<> command "clean" (info (pure HCClean)
|
|
|
|
( progDesc "Clean graphql-engine's metadata to start afresh" ))
|
|
|
|
<> command "execute" (info (pure HCExecute)
|
|
|
|
( progDesc "Execute a query" ))
|
2020-02-07 14:03:12 +03:00
|
|
|
<> command "downgrade" (info (HCDowngrade <$> downgradeOptionsParser)
|
|
|
|
(progDesc "Downgrade the GraphQL Engine schema to the specified version"))
|
2019-11-26 15:14:21 +03:00
|
|
|
<> command "version" (info (pure HCVersion)
|
|
|
|
(progDesc "Prints the version of GraphQL Engine"))
|
|
|
|
)
|
|
|
|
|
|
|
|
parseArgs :: EnabledLogTypes impl => IO (HGEOptions impl)
|
|
|
|
parseArgs = do
|
|
|
|
rawHGEOpts <- execParser opts
|
|
|
|
env <- getEnvironment
|
|
|
|
let eitherOpts = runWithEnv env $ mkHGEOptions rawHGEOpts
|
|
|
|
either printErrExit return eitherOpts
|
|
|
|
where
|
|
|
|
opts = info (helper <*> hgeOpts)
|
|
|
|
( fullDesc <>
|
|
|
|
header "Hasura GraphQL Engine: Realtime GraphQL API over Postgres with access control" <>
|
|
|
|
footerDoc (Just mainCmdFooter)
|
|
|
|
)
|
|
|
|
hgeOpts = HGEOptionsG <$> parseRawConnInfo <*> parseHGECommand
|
|
|
|
|
|
|
|
printJSON :: (A.ToJSON a, MonadIO m) => a -> m ()
|
|
|
|
printJSON = liftIO . BLC.putStrLn . A.encode
|
|
|
|
|
|
|
|
printYaml :: (A.ToJSON a, MonadIO m) => a -> m ()
|
|
|
|
printYaml = liftIO . BC.putStrLn . Y.encode
|
|
|
|
|
|
|
|
mkPGLogger :: Logger Hasura -> Q.PGLogger
|
|
|
|
mkPGLogger (Logger logger) (Q.PLERetryMsg msg) =
|
|
|
|
logger $ PGLog LevelWarn msg
|
|
|
|
|
|
|
|
|
|
|
|
-- | most of the required types for initializing graphql-engine
|
|
|
|
data InitCtx
|
|
|
|
= InitCtx
|
|
|
|
{ _icHttpManager :: !HTTP.Manager
|
|
|
|
, _icInstanceId :: !InstanceId
|
|
|
|
, _icDbUid :: !Text
|
|
|
|
, _icLoggers :: !Loggers
|
|
|
|
, _icConnInfo :: !Q.ConnInfo
|
|
|
|
, _icPgPool :: !Q.PGPool
|
|
|
|
}
|
|
|
|
|
|
|
|
-- | Collection of the LoggerCtx, the regular Logger and the PGLogger
|
|
|
|
-- TODO: better naming?
|
|
|
|
data Loggers
|
|
|
|
= Loggers
|
|
|
|
{ _lsLoggerCtx :: !(LoggerCtx Hasura)
|
|
|
|
, _lsLogger :: !(Logger Hasura)
|
|
|
|
, _lsPgLogger :: !Q.PGLogger
|
|
|
|
}
|
|
|
|
|
|
|
|
newtype AppM a = AppM { unAppM :: IO a }
|
|
|
|
deriving (Functor, Applicative, Monad, MonadIO, MonadBase IO, MonadBaseControl IO)
|
|
|
|
|
|
|
|
-- | this function initializes the catalog and returns an @InitCtx@, based on the command given
|
|
|
|
-- - for serve command it creates a proper PG connection pool
|
|
|
|
-- - for other commands, it creates a minimal pool
|
|
|
|
-- this exists as a separate function because the context (logger, http manager, pg pool) can be
|
|
|
|
-- used by other functions as well
|
|
|
|
initialiseCtx
|
2020-01-23 00:55:55 +03:00
|
|
|
:: (HasVersion, MonadIO m)
|
2019-11-26 15:14:21 +03:00
|
|
|
=> HGECommand Hasura
|
|
|
|
-> RawConnInfo
|
|
|
|
-> m (InitCtx, UTCTime)
|
|
|
|
initialiseCtx hgeCmd rci = do
|
|
|
|
initTime <- liftIO Clock.getCurrentTime
|
|
|
|
-- global http manager
|
|
|
|
httpManager <- liftIO $ HTTP.newManager HTTP.tlsManagerSettings
|
|
|
|
instanceId <- liftIO generateInstanceId
|
|
|
|
connInfo <- liftIO procConnInfo
|
|
|
|
(loggers, pool) <- case hgeCmd of
|
|
|
|
-- for server command generate a proper pool
|
|
|
|
HCServe so@ServeOptions{..} -> do
|
|
|
|
l@(Loggers _ logger pgLogger) <- mkLoggers soEnabledLogTypes soLogLevel
|
|
|
|
-- log serve options
|
|
|
|
unLogger logger $ serveOptsToLog so
|
|
|
|
-- log postgres connection info
|
|
|
|
unLogger logger $ connInfoToLog connInfo
|
|
|
|
pool <- liftIO $ Q.initPGPool connInfo soConnParams pgLogger
|
|
|
|
-- safe init catalog
|
|
|
|
initialiseCatalog pool (SQLGenCtx soStringifyNum) httpManager logger
|
|
|
|
|
|
|
|
return (l, pool)
|
|
|
|
|
|
|
|
-- for other commands generate a minimal pool
|
|
|
|
_ -> do
|
|
|
|
l@(Loggers _ _ pgLogger) <- mkLoggers defaultEnabledLogTypes LevelInfo
|
|
|
|
pool <- getMinimalPool pgLogger connInfo
|
|
|
|
return (l, pool)
|
|
|
|
|
|
|
|
-- get the unique db id
|
|
|
|
eDbId <- liftIO $ runExceptT $ Q.runTx pool (Q.Serializable, Nothing) getDbId
|
|
|
|
dbId <- either printErrJExit return eDbId
|
|
|
|
|
2019-11-28 12:03:14 +03:00
|
|
|
return (InitCtx httpManager instanceId dbId loggers connInfo pool, initTime)
|
2019-11-26 15:14:21 +03:00
|
|
|
where
|
|
|
|
procConnInfo =
|
|
|
|
either (printErrExit . connInfoErrModifier) return $ mkConnInfo rci
|
|
|
|
|
|
|
|
getMinimalPool pgLogger ci = do
|
|
|
|
let connParams = Q.defaultConnParams { Q.cpConns = 1 }
|
|
|
|
liftIO $ Q.initPGPool ci connParams pgLogger
|
|
|
|
|
|
|
|
mkLoggers enabledLogs logLevel = do
|
|
|
|
loggerCtx <- liftIO $ mkLoggerCtx (defaultLoggerSettings True logLevel) enabledLogs
|
|
|
|
let logger = mkLogger loggerCtx
|
|
|
|
pgLogger = mkPGLogger logger
|
|
|
|
return $ Loggers loggerCtx logger pgLogger
|
|
|
|
|
|
|
|
initialiseCatalog pool sqlGenCtx httpManager (Logger logger) = do
|
|
|
|
currentTime <- liftIO getCurrentTime
|
|
|
|
-- initialise the catalog
|
|
|
|
initRes <- runAsAdmin pool sqlGenCtx httpManager $ migrateCatalog currentTime
|
2019-11-20 21:21:30 +03:00
|
|
|
either printErrJExit (\(result, schemaCache) -> logger result $> schemaCache) initRes
|
2019-11-26 15:14:21 +03:00
|
|
|
|
|
|
|
runHGEServer
|
2020-01-23 00:55:55 +03:00
|
|
|
:: ( HasVersion
|
|
|
|
, MonadIO m
|
2019-11-26 15:14:21 +03:00
|
|
|
, MonadStateless IO m
|
|
|
|
, UserAuthentication m
|
|
|
|
, MetadataApiAuthorization m
|
|
|
|
, HttpLog m
|
|
|
|
, ConsoleRenderer m
|
2019-12-11 04:04:49 +03:00
|
|
|
, LA.Forall (LA.Pure m)
|
2019-11-26 15:14:21 +03:00
|
|
|
)
|
|
|
|
=> ServeOptions impl
|
|
|
|
-> InitCtx
|
|
|
|
-> UTCTime
|
|
|
|
-- ^ start time
|
|
|
|
-> m ()
|
|
|
|
runHGEServer ServeOptions{..} InitCtx{..} initTime = do
|
|
|
|
let sqlGenCtx = SQLGenCtx soStringifyNum
|
|
|
|
Loggers loggerCtx logger _ = _icLoggers
|
|
|
|
|
|
|
|
authModeRes <- runExceptT $ mkAuthMode soAdminSecret soAuthHook soJwtSecret soUnAuthRole
|
2019-11-28 12:03:14 +03:00
|
|
|
_icHttpManager logger
|
2019-11-26 15:14:21 +03:00
|
|
|
|
|
|
|
authMode <- either (printErrExit . T.unpack) return authModeRes
|
|
|
|
|
|
|
|
HasuraApp app cacheRef cacheInitTime shutdownApp <- mkWaiApp soTxIso
|
2019-11-28 12:03:14 +03:00
|
|
|
logger
|
2019-11-26 15:14:21 +03:00
|
|
|
sqlGenCtx
|
|
|
|
soEnableAllowlist
|
|
|
|
_icPgPool
|
|
|
|
_icConnInfo
|
|
|
|
_icHttpManager
|
|
|
|
authMode
|
|
|
|
soCorsConfig
|
|
|
|
soEnableConsole
|
|
|
|
soConsoleAssetsDir
|
|
|
|
soEnableTelemetry
|
|
|
|
_icInstanceId
|
|
|
|
soEnabledAPIs
|
|
|
|
soLiveQueryOpts
|
|
|
|
soPlanCacheOptions
|
|
|
|
|
|
|
|
-- log inconsistent schema objects
|
|
|
|
inconsObjs <- scInconsistentObjs <$> liftIO (getSCFromRef cacheRef)
|
2019-11-20 21:21:30 +03:00
|
|
|
liftIO $ logInconsObjs logger inconsObjs
|
2019-11-26 15:14:21 +03:00
|
|
|
|
|
|
|
-- start a background thread for schema sync
|
|
|
|
startSchemaSync sqlGenCtx _icPgPool logger _icHttpManager
|
|
|
|
cacheRef _icInstanceId cacheInitTime
|
|
|
|
|
|
|
|
let warpSettings = Warp.setPort soPort
|
|
|
|
. Warp.setHost soHost
|
|
|
|
. Warp.setGracefulShutdownTimeout (Just 30) -- 30s graceful shutdown
|
|
|
|
. Warp.setInstallShutdownHandler (shutdownHandler logger shutdownApp)
|
|
|
|
$ Warp.defaultSettings
|
|
|
|
|
|
|
|
maxEvThrds <- liftIO $ getFromEnv defaultMaxEventThreads "HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE"
|
2020-02-05 15:54:26 +03:00
|
|
|
fetchI <- fmap milliseconds $ liftIO $
|
2020-01-16 04:56:57 +03:00
|
|
|
getFromEnv defaultFetchIntervalMilliSec "HASURA_GRAPHQL_EVENTS_FETCH_INTERVAL"
|
2019-11-26 15:14:21 +03:00
|
|
|
logEnvHeaders <- liftIO $ getFromEnv False "LOG_HEADERS_FROM_ENV"
|
|
|
|
|
|
|
|
-- prepare event triggers data
|
|
|
|
prepareEvents _icPgPool logger
|
2020-01-16 04:56:57 +03:00
|
|
|
eventEngineCtx <- liftIO $ atomically $ initEventEngineCtx maxEvThrds fetchI
|
2019-11-26 15:14:21 +03:00
|
|
|
unLogger logger $ mkGenericStrLog LevelInfo "event_triggers" "starting workers"
|
2019-11-28 12:03:14 +03:00
|
|
|
void $ liftIO $ C.forkIO $ processEventQueue logger logEnvHeaders
|
2019-11-20 21:21:30 +03:00
|
|
|
_icHttpManager _icPgPool (getSCFromRef cacheRef) eventEngineCtx
|
2019-11-26 15:14:21 +03:00
|
|
|
|
|
|
|
-- start a background thread to check for updates
|
|
|
|
void $ liftIO $ C.forkIO $ checkForUpdates loggerCtx _icHttpManager
|
|
|
|
|
2020-01-16 04:56:57 +03:00
|
|
|
-- TODO async/immortal:
|
2019-11-26 15:14:21 +03:00
|
|
|
-- start a background thread for telemetry
|
|
|
|
when soEnableTelemetry $ do
|
|
|
|
unLogger logger $ mkGenericStrLog LevelInfo "telemetry" telemetryNotice
|
2019-11-20 21:21:30 +03:00
|
|
|
void $ liftIO $ C.forkIO $ runTelemetry logger _icHttpManager (getSCFromRef cacheRef) _icDbUid _icInstanceId
|
2019-11-26 15:14:21 +03:00
|
|
|
|
|
|
|
finishTime <- liftIO Clock.getCurrentTime
|
|
|
|
let apiInitTime = realToFrac $ Clock.diffUTCTime finishTime initTime
|
|
|
|
unLogger logger $
|
|
|
|
mkGenericLog LevelInfo "server" $ StartupTimeInfo "starting API server" apiInitTime
|
|
|
|
liftIO $ Warp.runSettings warpSettings app
|
|
|
|
|
|
|
|
where
|
|
|
|
prepareEvents pool (Logger logger) = do
|
|
|
|
liftIO $ logger $ mkGenericStrLog LevelInfo "event_triggers" "preparing data"
|
|
|
|
res <- runTx pool unlockAllEvents
|
|
|
|
either printErrJExit return res
|
|
|
|
|
|
|
|
getFromEnv :: (Read a) => a -> String -> IO a
|
|
|
|
getFromEnv defaults env = do
|
|
|
|
mEnv <- lookupEnv env
|
|
|
|
let mRes = case mEnv of
|
|
|
|
Nothing -> Just defaults
|
|
|
|
Just val -> readMaybe val
|
|
|
|
eRes = maybe (Left $ "Wrong expected type for environment variable: " <> env) Right mRes
|
|
|
|
either printErrExit return eRes
|
|
|
|
|
|
|
|
runTx pool tx =
|
|
|
|
liftIO $ runExceptT $ Q.runTx pool (Q.Serializable, Nothing) tx
|
|
|
|
|
|
|
|
-- | Catches the SIGTERM signal and initiates a graceful shutdown. Graceful shutdown for regular HTTP
|
|
|
|
-- requests is already implemented in Warp, and is triggered by invoking the 'closeSocket' callback.
|
|
|
|
-- We only catch the SIGTERM signal once, that is, if the user hits CTRL-C once again, we terminate
|
|
|
|
-- the process immediately.
|
|
|
|
shutdownHandler :: Logger Hasura -> IO () -> IO () -> IO ()
|
|
|
|
shutdownHandler (Logger logger) shutdownApp closeSocket =
|
|
|
|
void $ Signals.installHandler
|
|
|
|
Signals.sigTERM
|
|
|
|
(Signals.CatchOnce shutdownSequence)
|
|
|
|
Nothing
|
|
|
|
where
|
|
|
|
shutdownSequence = do
|
|
|
|
closeSocket
|
|
|
|
shutdownApp
|
|
|
|
logShutdown
|
|
|
|
|
|
|
|
logShutdown = logger $ mkGenericStrLog LevelInfo "server" "gracefully shutting down server"
|
|
|
|
|
|
|
|
runAsAdmin
|
|
|
|
:: (MonadIO m)
|
|
|
|
=> Q.PGPool
|
|
|
|
-> SQLGenCtx
|
|
|
|
-> HTTP.Manager
|
|
|
|
-> Run a
|
|
|
|
-> m (Either QErr a)
|
|
|
|
runAsAdmin pool sqlGenCtx httpManager m = do
|
2019-11-20 21:21:30 +03:00
|
|
|
let runCtx = RunCtx adminUserInfo httpManager sqlGenCtx
|
|
|
|
pgCtx = PGExecCtx pool Q.Serializable
|
|
|
|
runExceptT $ peelRun runCtx pgCtx Q.ReadWrite m
|
2019-11-26 15:14:21 +03:00
|
|
|
|
|
|
|
execQuery
|
2020-01-23 00:55:55 +03:00
|
|
|
:: ( HasVersion
|
|
|
|
, CacheRWM m
|
2019-11-26 15:14:21 +03:00
|
|
|
, MonadTx m
|
|
|
|
, MonadIO m
|
|
|
|
, HasHttpManager m
|
|
|
|
, HasSQLGenCtx m
|
|
|
|
, UserInfoM m
|
|
|
|
, HasSystemDefined m
|
|
|
|
)
|
|
|
|
=> BLC.ByteString
|
|
|
|
-> m BLC.ByteString
|
|
|
|
execQuery queryBs = do
|
|
|
|
query <- case A.decode queryBs of
|
|
|
|
Just jVal -> decodeValue jVal
|
|
|
|
Nothing -> throw400 InvalidJSON "invalid json"
|
|
|
|
buildSchemaCacheStrict
|
|
|
|
encJToLBS <$> runQueryM query
|
|
|
|
|
|
|
|
|
|
|
|
instance HttpLog AppM where
|
|
|
|
logHttpError logger userInfoM reqId httpReq req qErr headers =
|
|
|
|
unLogger logger $ mkHttpLog $
|
|
|
|
mkHttpErrorLogContext userInfoM reqId httpReq qErr req Nothing Nothing headers
|
|
|
|
|
2019-12-17 00:02:05 +03:00
|
|
|
logHttpSuccess logger userInfoM reqId httpReq _ _ compressedResponse qTime cType headers =
|
2019-11-26 15:14:21 +03:00
|
|
|
unLogger logger $ mkHttpLog $
|
|
|
|
mkHttpAccessLogContext userInfoM reqId httpReq compressedResponse qTime cType headers
|
|
|
|
|
|
|
|
instance UserAuthentication AppM where
|
|
|
|
resolveUserInfo logger manager headers authMode =
|
2019-12-11 04:04:49 +03:00
|
|
|
runExceptT $ getUserInfoWithExpTime logger manager headers authMode
|
2019-11-26 15:14:21 +03:00
|
|
|
|
|
|
|
instance MetadataApiAuthorization AppM where
|
|
|
|
authorizeMetadataApi query userInfo = do
|
|
|
|
let currRole = userRole userInfo
|
|
|
|
when (requiresAdmin query && currRole /= adminRole) $
|
|
|
|
withPathK "args" $ throw400 AccessDenied errMsg
|
|
|
|
where
|
|
|
|
errMsg = "restricted access : admin only"
|
|
|
|
|
|
|
|
instance ConsoleRenderer AppM where
|
|
|
|
renderConsole path authMode enableTelemetry consoleAssetsDir =
|
|
|
|
return $ mkConsoleHTML path authMode enableTelemetry consoleAssetsDir
|
|
|
|
|
2020-01-23 00:55:55 +03:00
|
|
|
mkConsoleHTML :: HasVersion => Text -> AuthMode -> Bool -> Maybe Text -> Either String Text
|
2019-11-26 15:14:21 +03:00
|
|
|
mkConsoleHTML path authMode enableTelemetry consoleAssetsDir =
|
|
|
|
renderHtmlTemplate consoleTmplt $
|
|
|
|
-- variables required to render the template
|
|
|
|
A.object [ "isAdminSecretSet" .= isAdminSecretSet authMode
|
|
|
|
, "consolePath" .= consolePath
|
|
|
|
, "enableTelemetry" .= boolToText enableTelemetry
|
|
|
|
, "cdnAssets" .= boolToText (isNothing consoleAssetsDir)
|
2020-01-23 00:55:55 +03:00
|
|
|
, "assetsVersion" .= consoleAssetsVersion
|
2019-11-26 15:14:21 +03:00
|
|
|
, "serverVersion" .= currentVersion
|
|
|
|
]
|
|
|
|
where
|
|
|
|
consolePath = case path of
|
|
|
|
"" -> "/console"
|
|
|
|
r -> "/console/" <> r
|
|
|
|
|
|
|
|
consoleTmplt = $(M.embedSingleTemplate "src-rsr/console.html")
|
|
|
|
|
|
|
|
|
|
|
|
telemetryNotice :: String
|
|
|
|
telemetryNotice =
|
|
|
|
"Help us improve Hasura! The graphql-engine server collects anonymized "
|
|
|
|
<> "usage stats which allows us to keep improving Hasura at warp speed. "
|
|
|
|
<> "To read more or opt-out, visit https://docs.hasura.io/1.0/graphql/manual/guides/telemetry.html"
|