mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-15 09:22:43 +03:00
cf531b05cb
This PR is on top of #7789. ### Description This PR entirely rewrites the API of the Tracing library, to make `interpTraceT` a thing of the past. Before this change, we ran traces by sticking a `TraceT` on top of whatever we were doing. This had several major drawbacks: - we were carrying a bunch of `TraceT` across the codebase, and the entire codebase had to know about it - we needed to carry a second class constraint around (`HasReporterM`) to be able to run all of those traces - we kept having to do stack rewriting with `interpTraceT`, which went from inconvenient to horrible - we had to declare several behavioral instances on `TraceT m` This PR rewrite all of `Tracing` using a more conventional model: there is ONE `TraceT` at the bottom of the stack, and there is an associated class constraint `MonadTrace`: any part of the code that happens to satisfy `MonadTrace` is able to create new traces. We NEVER have to do stack rewriting, `interpTraceT` is gone, and `TraceT` and `Reporter` become implementation details that 99% of the code is blissfully unaware of: code that needs to do tracing only needs to declare that the monad in which it operates implements `MonadTrace`. In doing so, this PR revealed **several bugs in the codebase**: places where we were expecting to trace something, but due to the default instance of `HasReporterM IO` we would actually not do anything. This PR also splits the code of `Tracing` in more byte-sized modules, with the goal of potentially moving to `server/lib` down the line. ### Remaining work This PR is a draft; what's left to do is: - [x] make Pro compile; i haven't updated `HasuraPro/Main` yet - [x] document Tracing by writing a note that explains how to use the library, and the meaning of "reporter", "trace" and "span", as well as the pitfalls - [x] discuss some of the trade-offs in the implementation, which is why i'm opening this PR already despite it not fully building yet - [x] it depends on #7789 being merged first PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7791 GitOrigin-RevId: cadd32d039134c93ddbf364599a2f4dd988adea8
173 lines
6.7 KiB
Haskell
173 lines
6.7 KiB
Haskell
{-# LANGUAGE UndecidableInstances #-}
|
|
|
|
module Main (main) where
|
|
|
|
import Constants qualified
|
|
import Control.Concurrent.MVar
|
|
import Control.Monad.Trans.Managed (ManagedT (..))
|
|
import Control.Natural ((:~>) (..))
|
|
import Data.Aeson qualified as A
|
|
import Data.ByteString.Lazy.Char8 qualified as BL
|
|
import Data.ByteString.Lazy.UTF8 qualified as LBS
|
|
import Data.Environment qualified as Env
|
|
import Data.Text qualified as T
|
|
import Data.Time.Clock (getCurrentTime)
|
|
import Data.URL.Template
|
|
import Database.PG.Query qualified as PG
|
|
import Hasura.App
|
|
( PGMetadataStorageAppT,
|
|
initGlobalCtx,
|
|
initialiseContext,
|
|
mkMSSQLSourceResolver,
|
|
mkPgSourceResolver,
|
|
runPGMetadataStorageAppT,
|
|
)
|
|
import Hasura.Backends.Postgres.Connection.Settings
|
|
import Hasura.Backends.Postgres.Execute.Types
|
|
import Hasura.Base.Error
|
|
import Hasura.GraphQL.Schema.Options qualified as Options
|
|
import Hasura.Logging
|
|
import Hasura.Prelude
|
|
import Hasura.RQL.DDL.Schema.Cache
|
|
import Hasura.RQL.DDL.Schema.Cache.Common
|
|
import Hasura.RQL.Types.Common
|
|
import Hasura.RQL.Types.Metadata (emptyMetadataDefaults)
|
|
import Hasura.RQL.Types.ResizePool
|
|
import Hasura.RQL.Types.SchemaCache.Build
|
|
import Hasura.Server.Init
|
|
import Hasura.Server.Init.FeatureFlag as FF
|
|
import Hasura.Server.Metrics (ServerMetricsSpec, createServerMetrics)
|
|
import Hasura.Server.Migrate
|
|
import Hasura.Server.Prometheus (makeDummyPrometheusMetrics)
|
|
import Hasura.Server.Types
|
|
import Hasura.Tracing (sampleAlways)
|
|
import Network.HTTP.Client qualified as HTTP
|
|
import Network.HTTP.Client.TLS qualified as HTTP
|
|
import System.Environment (getEnvironment)
|
|
import System.Exit (exitFailure)
|
|
import System.Metrics qualified as EKG
|
|
import Test.Hasura.EventTriggerCleanupSuite qualified as EventTriggerCleanupSuite
|
|
import Test.Hasura.Server.MigrateSuite qualified as MigrateSuite
|
|
import Test.Hasura.StreamingSubscriptionSuite qualified as StreamingSubscriptionSuite
|
|
import Test.Hspec
|
|
|
|
{-# ANN main ("HLINT: ignore Use env_from_function_argument" :: String) #-}
|
|
main :: IO ()
|
|
main = do
|
|
env <- getEnvironment
|
|
let envMap = Env.mkEnvironment env
|
|
|
|
pgUrlText <- flip onLeft printErrExit $
|
|
runWithEnv env $ do
|
|
let envVar = _envVar databaseUrlOption
|
|
maybeV <- considerEnv envVar
|
|
onNothing maybeV $
|
|
throwError $
|
|
"Expected: " <> envVar
|
|
|
|
let pgConnInfo = PG.ConnInfo 1 $ PG.CDDatabaseURI $ txtToBs pgUrlText
|
|
urlConf = UrlValue $ InputWebhook $ mkPlainURLTemplate pgUrlText
|
|
sourceConnInfo =
|
|
PostgresSourceConnInfo urlConf (Just setPostgresPoolSettings) True PG.ReadCommitted Nothing
|
|
sourceConfig = PostgresConnConfiguration sourceConnInfo Nothing defaultPostgresExtensionsSchema Nothing mempty
|
|
rci =
|
|
PostgresConnInfo
|
|
{ _pciDatabaseConn = Nothing,
|
|
_pciRetries = Nothing
|
|
}
|
|
serveOptions = Constants.serveOptions
|
|
metadataDbUrl = Just (T.unpack pgUrlText)
|
|
|
|
pgPool <- PG.initPGPool pgConnInfo PG.defaultConnParams {PG.cpConns = 1} print
|
|
let pgContext = mkPGExecCtx PG.Serializable pgPool NeverResizePool
|
|
|
|
logger :: Logger Hasura = Logger $ \l -> do
|
|
let (logLevel, logType :: EngineLogType Hasura, logDetail) = toEngineLog l
|
|
t <- liftIO $ getFormattedTime Nothing
|
|
liftIO $ putStrLn $ LBS.toString $ A.encode $ EngineLog t logLevel logType logDetail
|
|
|
|
setupCacheRef = do
|
|
httpManager <- HTTP.newManager HTTP.tlsManagerSettings
|
|
globalCtx <- initGlobalCtx envMap metadataDbUrl rci
|
|
(_, serverMetrics) <-
|
|
liftIO $ do
|
|
store <- EKG.newStore @TestMetricsSpec
|
|
serverMetrics <-
|
|
liftIO $ createServerMetrics $ EKG.subset ServerSubset store
|
|
pure (EKG.subset EKG.emptyOf store, serverMetrics)
|
|
prometheusMetrics <- makeDummyPrometheusMetrics
|
|
let sqlGenCtx =
|
|
SQLGenCtx
|
|
Options.Don'tStringifyNumbers
|
|
Options.Don'tDangerouslyCollapseBooleans
|
|
Options.Don'tOptimizePermissionFilters
|
|
Options.EnableBigQueryStringNumericInput
|
|
maintenanceMode = MaintenanceModeDisabled
|
|
readOnlyMode = ReadOnlyModeDisabled
|
|
serverConfigCtx =
|
|
ServerConfigCtx
|
|
Options.InferFunctionPermissions
|
|
Options.DisableRemoteSchemaPermissions
|
|
sqlGenCtx
|
|
maintenanceMode
|
|
mempty
|
|
EventingEnabled
|
|
readOnlyMode
|
|
(_default defaultNamingConventionOption)
|
|
emptyMetadataDefaults
|
|
(FF.checkFeatureFlag mempty)
|
|
cacheBuildParams = CacheBuildParams httpManager (mkPgSourceResolver print) mkMSSQLSourceResolver serverConfigCtx
|
|
|
|
(appCtx, appEnv) <- runManagedT
|
|
( initialiseContext
|
|
envMap
|
|
globalCtx
|
|
serveOptions
|
|
Nothing
|
|
serverMetrics
|
|
prometheusMetrics
|
|
sampleAlways
|
|
)
|
|
$ \(appCtx, appEnv) -> return (appCtx, appEnv)
|
|
|
|
let run :: ExceptT QErr (PGMetadataStorageAppT CacheBuild) a -> IO a
|
|
run =
|
|
runExceptT
|
|
>>> runPGMetadataStorageAppT (appCtx, appEnv)
|
|
>>> runCacheBuild cacheBuildParams
|
|
>>> runExceptT
|
|
>=> flip onLeft printErrJExit
|
|
>=> flip onLeft printErrJExit
|
|
|
|
(metadata, schemaCache) <- run do
|
|
metadata <-
|
|
snd
|
|
<$> (liftEitherM . runExceptT . _pecRunTx pgContext (PGExecCtxInfo (Tx PG.ReadWrite Nothing) InternalRawQuery))
|
|
(migrateCatalog (Just sourceConfig) defaultPostgresExtensionsSchema maintenanceMode =<< liftIO getCurrentTime)
|
|
schemaCache <- lift $ lift $ buildRebuildableSchemaCache logger envMap metadata
|
|
pure (metadata, schemaCache)
|
|
|
|
cacheRef <- newMVar schemaCache
|
|
pure $ NT (run . flip MigrateSuite.runCacheRefT cacheRef . fmap fst . runMetadataT metadata emptyMetadataDefaults)
|
|
|
|
streamingSubscriptionSuite <- StreamingSubscriptionSuite.buildStreamingSubscriptionSuite
|
|
eventTriggerLogCleanupSuite <- EventTriggerCleanupSuite.buildEventTriggerCleanupSuite
|
|
|
|
hspec do
|
|
describe "Migrate suite" $
|
|
beforeAll setupCacheRef $
|
|
describe "Hasura.Server.Migrate" $
|
|
MigrateSuite.suite sourceConfig pgContext pgConnInfo
|
|
describe "Streaming subscription suite" $ streamingSubscriptionSuite
|
|
describe "Event trigger log cleanup suite" $ eventTriggerLogCleanupSuite
|
|
|
|
printErrExit :: String -> IO a
|
|
printErrExit = (*> exitFailure) . putStrLn
|
|
|
|
printErrJExit :: (A.ToJSON a) => a -> IO b
|
|
printErrJExit = (*> exitFailure) . BL.putStrLn . A.encode
|
|
|
|
-- | Used only for 'runApp' above.
|
|
data TestMetricsSpec name metricType tags
|
|
= ServerSubset (ServerMetricsSpec name metricType tags)
|