mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-19 13:31:43 +03:00
d7309b811e
### Description As part of another project (the continuation of #8421), i have started a cleanup of `Hasura.App`, focusing on deleting old code and grouping together things that belong together. This quickly grew into a refactor of `GlobalCtx`, now renamed into `BasicConnectionInfo`. This small refactor adds comments, and aims at making clear what the purpose of those types and functions is. Furthermore, it also changes the way the default postgres connection info is created, by making that part of the process of creating the `BasicConnectionInfo`, to deduplicate similar effort across different files. This is expected to be a no-op. PR-URL: https://github.com/hasura/graphql-engine-mono/pull/8440 GitOrigin-RevId: 412c5b1905f629beb9c6cd262b9798cb31c93bdb
174 lines
6.9 KiB
Haskell
174 lines
6.9 KiB
Haskell
{-# LANGUAGE UndecidableInstances #-}
|
|
|
|
module Main (main) where
|
|
|
|
import Constants qualified
|
|
import Control.Concurrent.MVar
|
|
import Control.Monad.Trans.Managed (ManagedT (..))
|
|
import Control.Natural ((:~>) (..))
|
|
import Data.Aeson qualified as A
|
|
import Data.ByteString.Lazy.Char8 qualified as BL
|
|
import Data.ByteString.Lazy.UTF8 qualified as LBS
|
|
import Data.Environment qualified as Env
|
|
import Data.Text qualified as T
|
|
import Data.Time.Clock (getCurrentTime)
|
|
import Data.URL.Template
|
|
import Database.PG.Query qualified as PG
|
|
import Hasura.App
|
|
( BasicConnectionInfo (..),
|
|
PGMetadataStorageAppT,
|
|
initMetadataConnectionInfo,
|
|
initialiseContext,
|
|
mkMSSQLSourceResolver,
|
|
mkPgSourceResolver,
|
|
runPGMetadataStorageAppT,
|
|
)
|
|
import Hasura.Backends.Postgres.Connection.Settings
|
|
import Hasura.Backends.Postgres.Execute.Types
|
|
import Hasura.Base.Error
|
|
import Hasura.GraphQL.Schema.Options qualified as Options
|
|
import Hasura.Logging
|
|
import Hasura.Prelude
|
|
import Hasura.RQL.DDL.Schema.Cache
|
|
import Hasura.RQL.DDL.Schema.Cache.Common
|
|
import Hasura.RQL.Types.Common
|
|
import Hasura.RQL.Types.Metadata (emptyMetadataDefaults)
|
|
import Hasura.RQL.Types.ResizePool
|
|
import Hasura.RQL.Types.SchemaCache.Build
|
|
import Hasura.Server.Init
|
|
import Hasura.Server.Init.FeatureFlag as FF
|
|
import Hasura.Server.Metrics (ServerMetricsSpec, createServerMetrics)
|
|
import Hasura.Server.Migrate
|
|
import Hasura.Server.Prometheus (makeDummyPrometheusMetrics)
|
|
import Hasura.Server.Types
|
|
import Hasura.Tracing (sampleAlways)
|
|
import Network.HTTP.Client qualified as HTTP
|
|
import Network.HTTP.Client.TLS qualified as HTTP
|
|
import System.Environment (getEnvironment)
|
|
import System.Exit (exitFailure)
|
|
import System.Metrics qualified as EKG
|
|
import Test.Hasura.EventTriggerCleanupSuite qualified as EventTriggerCleanupSuite
|
|
import Test.Hasura.Server.MigrateSuite qualified as MigrateSuite
|
|
import Test.Hasura.StreamingSubscriptionSuite qualified as StreamingSubscriptionSuite
|
|
import Test.Hspec
|
|
|
|
{-# ANN main ("HLINT: ignore Use env_from_function_argument" :: String) #-}
|
|
main :: IO ()
|
|
main = do
|
|
env <- getEnvironment
|
|
let envMap = Env.mkEnvironment env
|
|
|
|
pgUrlText <- flip onLeft printErrExit $
|
|
runWithEnv env $ do
|
|
let envVar = _envVar databaseUrlOption
|
|
maybeV <- considerEnv envVar
|
|
onNothing maybeV $
|
|
throwError $
|
|
"Expected: " <> envVar
|
|
|
|
let pgConnInfo = PG.ConnInfo 1 $ PG.CDDatabaseURI $ txtToBs pgUrlText
|
|
urlConf = UrlValue $ InputWebhook $ mkPlainURLTemplate pgUrlText
|
|
sourceConnInfo =
|
|
PostgresSourceConnInfo urlConf (Just setPostgresPoolSettings) True PG.ReadCommitted Nothing
|
|
sourceConfig = PostgresConnConfiguration sourceConnInfo Nothing defaultPostgresExtensionsSchema Nothing mempty
|
|
rci =
|
|
PostgresConnInfo
|
|
{ _pciDatabaseConn = Nothing,
|
|
_pciRetries = Nothing
|
|
}
|
|
serveOptions = Constants.serveOptions
|
|
metadataDbUrl = Just (T.unpack pgUrlText)
|
|
|
|
pgPool <- PG.initPGPool pgConnInfo PG.defaultConnParams {PG.cpConns = 1} print
|
|
let pgContext = mkPGExecCtx PG.Serializable pgPool NeverResizePool
|
|
|
|
logger :: Logger Hasura = Logger $ \l -> do
|
|
let (logLevel, logType :: EngineLogType Hasura, logDetail) = toEngineLog l
|
|
t <- liftIO $ getFormattedTime Nothing
|
|
liftIO $ putStrLn $ LBS.toString $ A.encode $ EngineLog t logLevel logType logDetail
|
|
|
|
setupCacheRef = do
|
|
httpManager <- HTTP.newManager HTTP.tlsManagerSettings
|
|
metadataConnectionInfo <- initMetadataConnectionInfo envMap metadataDbUrl rci
|
|
let globalCtx = BasicConnectionInfo metadataConnectionInfo Nothing
|
|
(_, serverMetrics) <-
|
|
liftIO $ do
|
|
store <- EKG.newStore @TestMetricsSpec
|
|
serverMetrics <-
|
|
liftIO $ createServerMetrics $ EKG.subset ServerSubset store
|
|
pure (EKG.subset EKG.emptyOf store, serverMetrics)
|
|
prometheusMetrics <- makeDummyPrometheusMetrics
|
|
let sqlGenCtx =
|
|
SQLGenCtx
|
|
Options.Don'tStringifyNumbers
|
|
Options.Don'tDangerouslyCollapseBooleans
|
|
Options.Don'tOptimizePermissionFilters
|
|
Options.EnableBigQueryStringNumericInput
|
|
maintenanceMode = MaintenanceModeDisabled
|
|
readOnlyMode = ReadOnlyModeDisabled
|
|
serverConfigCtx =
|
|
ServerConfigCtx
|
|
Options.InferFunctionPermissions
|
|
Options.DisableRemoteSchemaPermissions
|
|
sqlGenCtx
|
|
maintenanceMode
|
|
mempty
|
|
EventingEnabled
|
|
readOnlyMode
|
|
(_default defaultNamingConventionOption)
|
|
emptyMetadataDefaults
|
|
(CheckFeatureFlag $ FF.checkFeatureFlag mempty)
|
|
ApolloFederationDisabled
|
|
cacheBuildParams = CacheBuildParams httpManager (mkPgSourceResolver print) mkMSSQLSourceResolver serverConfigCtx
|
|
|
|
(_appStateRef, appEnv) <- runManagedT
|
|
( initialiseContext
|
|
envMap
|
|
globalCtx
|
|
serveOptions
|
|
Nothing
|
|
serverMetrics
|
|
prometheusMetrics
|
|
sampleAlways
|
|
)
|
|
$ \(appStateRef, appEnv) -> return (appStateRef, appEnv)
|
|
|
|
let run :: ExceptT QErr (PGMetadataStorageAppT IO) a -> IO a
|
|
run =
|
|
runExceptT
|
|
>>> runPGMetadataStorageAppT appEnv
|
|
>>> flip onLeftM printErrJExit
|
|
|
|
-- why are we building the schema cache here? it's already built in initialiseContext
|
|
(metadata, schemaCache) <- run do
|
|
metadata <-
|
|
snd
|
|
<$> (liftEitherM . runExceptT . _pecRunTx pgContext (PGExecCtxInfo (Tx PG.ReadWrite Nothing) InternalRawQuery))
|
|
(migrateCatalog (Just sourceConfig) defaultPostgresExtensionsSchema maintenanceMode =<< liftIO getCurrentTime)
|
|
schemaCache <- runCacheBuild cacheBuildParams $ buildRebuildableSchemaCache logger envMap metadata
|
|
pure (metadata, schemaCache)
|
|
|
|
cacheRef <- newMVar schemaCache
|
|
pure $ NT (run . flip MigrateSuite.runCacheRefT (serverConfigCtx, cacheRef) . fmap fst . runMetadataT metadata emptyMetadataDefaults)
|
|
|
|
streamingSubscriptionSuite <- StreamingSubscriptionSuite.buildStreamingSubscriptionSuite
|
|
eventTriggerLogCleanupSuite <- EventTriggerCleanupSuite.buildEventTriggerCleanupSuite
|
|
|
|
hspec do
|
|
describe "Migrate suite" $
|
|
beforeAll setupCacheRef $
|
|
describe "Hasura.Server.Migrate" $
|
|
MigrateSuite.suite sourceConfig pgContext pgConnInfo
|
|
describe "Streaming subscription suite" $ streamingSubscriptionSuite
|
|
describe "Event trigger log cleanup suite" $ eventTriggerLogCleanupSuite
|
|
|
|
printErrExit :: String -> IO a
|
|
printErrExit = (*> exitFailure) . putStrLn
|
|
|
|
printErrJExit :: (A.ToJSON a) => a -> IO b
|
|
printErrJExit = (*> exitFailure) . BL.putStrLn . A.encode
|
|
|
|
-- | Used only for 'runApp' above.
|
|
data TestMetricsSpec name metricType tags
|
|
= ServerSubset (ServerMetricsSpec name metricType tags)
|