server: drop LazyTxT newtype

This is a follow-up to #1959.

Today, I spent a while in review figuring out that a harmless PR change didn't do anything,
because it was moving from a `runLazy...` to something without the `Lazy`. So let's get
that source of confusion removed.

This should be a bit easier to review commit by commit, since some of the functions had
confusing names. (E.g. there was a misnamed `Migrate.Internal.runTx` before.)

The change should be a no-op.

https://github.com/hasura/graphql-engine-mono/pull/2335

GitOrigin-RevId: 0f284c4c0f814482d7827e7732a6d49e7735b302
This commit is contained in:
Robert 2021-09-15 22:45:49 +02:00 committed by hasura-bot
parent 9b7234b861
commit fe035125f4
25 changed files with 103 additions and 154 deletions

View File

@ -745,7 +745,7 @@ runHGEServer setupHook env ServeOptions{..} ServeCtx{..} initTime postPollHook s
-- event triggers should be tied to the life cycle of a source
forM_ pgSources $ \pgSource -> do
logger $ mkGenericStrLog LevelInfo "event_triggers" "unlocking events that are locked by the HGE"
let unlockEvents' l = MetadataStorageT $ runLazyTx (_pscExecCtx pgSource) Q.ReadWrite $ liftTx $ unlockEvents l
let unlockEvents' l = MetadataStorageT $ runTx (_pscExecCtx pgSource) Q.ReadWrite $ liftTx $ unlockEvents l
unlockEventsForShutdown hasuraLogger "event_triggers" "" unlockEvents' leEvents
shutdownAsyncActions

View File

@ -5,11 +5,10 @@
module Hasura.Backends.Postgres.Connection
( MonadTx(..)
, LazyTxT(..)
, runLazyTx
, runTx
, runTxWithCtx
, runQueryTx
, runQueryTxWithCtx
, withUserInfo
, withTraceContext
, setHeadersTx
@ -98,56 +97,51 @@ instance (MonadTx m) => MonadTx (Tracing.TraceT m) where
instance (MonadIO m) => MonadTx (Q.TxET QErr m) where
liftTx = hoist liftIO
-- | This type *used to be* like 'Q.TxE', but deferred acquiring a Postgres
-- connection until the first execution of 'liftTx'. If no call to 'liftTx' is
-- ever reached (i.e. a successful result is returned or an error is raised
-- before ever executing a query), no connection was ever acquired.
--
-- This was useful for certain code paths that only conditionally need database
-- access. For example, although most queries will eventually hit Postgres,
-- introspection queries or queries that exclusively use remote schemas never
-- will; using 'LazyTxT e m' keeps those branches from unnecessarily allocating
-- a connection.
--
-- However introspection queries and remote queries now never end up producing a
-- 'LazyTxT' action, and hence the laziness adds no benefit, so that we could
-- simplify this type, its name being a mere reminder of a past design of
-- graphql-engine.
--
-- It may be worthwhile in the future to simply replace this type with Q.TxET
-- entirely.
newtype LazyTxT e m a = LazyTxT {unLazyTxT :: Q.TxET e m a}
deriving (Functor, Applicative, Monad, MonadError e, MonadIO, MonadTrans)
runLazyTx
-- | Executes the given query in a transaction of the specified
-- mode, within the provided PGExecCtx.
runTx
:: ( MonadIO m
, MonadBaseControl IO m
)
=> PGExecCtx
-> Q.TxAccess
-> LazyTxT QErr m a -> ExceptT QErr m a
runLazyTx pgExecCtx = \case
Q.ReadOnly -> _pecRunReadOnly pgExecCtx . unLazyTxT
Q.ReadWrite -> _pecRunReadWrite pgExecCtx . unLazyTxT
-> Q.TxET QErr m a
-> ExceptT QErr m a
runTx pgExecCtx = \case
Q.ReadOnly -> _pecRunReadOnly pgExecCtx
Q.ReadWrite -> _pecRunReadWrite pgExecCtx
runTxWithCtx
:: ( MonadIO m
, MonadBaseControl IO m
, MonadError QErr m
, Tracing.MonadTrace m
, UserInfoM m
)
=> PGExecCtx
-> Q.TxAccess
-> Q.TxET QErr m a
-> m a
runTxWithCtx pgExecCtx txAccess tx = do
traceCtx <- Tracing.currentContext
userInfo <- askUserInfo
liftEitherM
$ runExceptT
$ runTx pgExecCtx txAccess
$ withTraceContext traceCtx
$ withUserInfo userInfo tx
-- | This runs the given set of statements (Tx) without wrapping them in BEGIN
-- and COMMIT. This should only be used for running a single statement query!
runQueryTx
:: (MonadIO m, MonadError QErr m) => PGExecCtx -> LazyTxT QErr IO a -> m a
runQueryTx pgExecCtx ltx =
liftEither =<< liftIO (runExceptT $ _pecRunReadNoTx pgExecCtx (unLazyTxT ltx))
-- NOTE: Same warning as 'runQueryTx' applies here.
-- This variant of 'runQueryTx' allows passing the `userInfo` context and `tracecontext`.
runQueryTxWithCtx
:: (MonadIO m, MonadError QErr m)
=> UserInfo
-> Tracing.TraceContext
-> PGExecCtx
-> LazyTxT QErr IO a
:: ( MonadIO m
, MonadError QErr m
)
=> PGExecCtx
-> Q.TxET QErr IO a
-> m a
runQueryTxWithCtx userInfo traceCtx pgExecCtx =
runQueryTx pgExecCtx . withUserInfo userInfo . withTraceContext traceCtx
runQueryTx pgExecCtx tx =
liftEither =<< liftIO (runExceptT $ _pecRunReadNoTx pgExecCtx tx)
setHeadersTx :: (MonadIO m) => SessionVariables -> Q.TxET QErr m ()
setHeadersTx session = do
@ -159,8 +153,8 @@ setHeadersTx session = do
sessionInfoJsonExp :: SessionVariables -> S.SQLExp
sessionInfoJsonExp = S.SELit . encodeToStrictText
withUserInfo :: (MonadIO m) => UserInfo -> LazyTxT QErr m a -> LazyTxT QErr m a
withUserInfo uInfo ltx = LazyTxT (setHeadersTx $ _uiSession uInfo) >> ltx
withUserInfo :: (MonadIO m) => UserInfo -> Q.TxET QErr m a -> Q.TxET QErr m a
withUserInfo uInfo tx = setHeadersTx (_uiSession uInfo) >> tx
setTraceContextInTx :: (MonadIO m) => Tracing.TraceContext -> Q.TxET QErr m ()
setTraceContextInTx traceCtx = Q.unitQE defaultTxErrorHandler sql () False
@ -173,20 +167,13 @@ setTraceContextInTx traceCtx = Q.unitQE defaultTxErrorHandler sql () False
withTraceContext
:: (MonadIO m)
=> Tracing.TraceContext
-> LazyTxT QErr m a
-> LazyTxT QErr m a
withTraceContext ctx ltx = LazyTxT (setTraceContextInTx ctx) >> ltx
-> Q.TxET QErr m a
-> Q.TxET QErr m a
withTraceContext ctx tx = setTraceContextInTx ctx >> tx
deriving instance Tracing.MonadTrace m => Tracing.MonadTrace (Q.TxET e m)
deriving instance Tracing.MonadTrace m => Tracing.MonadTrace (LazyTxT e m)
deriving instance (MonadIO m) => MonadTx (LazyTxT QErr m)
deriving instance (MonadBase IO m) => MonadBase IO (LazyTxT e m)
deriving instance (MonadBaseControl IO m) => MonadBaseControl IO (LazyTxT e m)
instance (MonadIO m) => MonadUnique (LazyTxT e m) where
instance (MonadIO m) => MonadUnique (Q.TxET e m) where
newUnique = liftIO newUnique
doesSchemaExist :: MonadTx m => SchemaName -> m Bool

View File

@ -31,7 +31,6 @@ import Hasura.RQL.DDL.Schema.Common
import Hasura.RQL.DDL.Schema.Diff
import Hasura.RQL.Types hiding (ConstraintName, fmFunction,
tmComputedFields, tmTable)
import Hasura.RQL.Types.Run
import Hasura.Server.Utils (quoteRegex)
import Hasura.Session
@ -161,7 +160,7 @@ runRunSQL q@RunSQL{..} = do
$ withUserInfo userInfo
$ execRawSQL rSql
else do
runQueryLazyTx pgExecCtx rTxAccessMode $ execRawSQL rSql
runTxWithCtx pgExecCtx rTxAccessMode $ execRawSQL rSql
where
execRawSQL :: (MonadTx n) => Text -> n EncJSON
execRawSQL =
@ -185,12 +184,12 @@ withMetadataCheck
, MonadError QErr m
, MonadIO m
)
=> SourceName -> Bool -> Q.TxAccess -> LazyTxT QErr m a -> m a
=> SourceName -> Bool -> Q.TxAccess -> Q.TxET QErr m a -> m a
withMetadataCheck source cascade txAccess action = do
SourceInfo _ preActionTables preActionFunctions sourceConfig <- askSourceInfo @('Postgres pgKind) source
(actionResult, metadataUpdater) <-
liftEitherM $ runExceptT $ runLazyTx (_pscExecCtx sourceConfig) txAccess $ do
liftEitherM $ runExceptT $ runTx (_pscExecCtx sourceConfig) txAccess $ do
-- Drop event triggers so no interference is caused to the sql query
forM_ (M.elems preActionTables) $ \tableInfo -> do
let eventTriggers = _tiEventTriggerInfoMap tableInfo

View File

@ -64,7 +64,7 @@ resolveDatabaseMetadata
. (Backend ('Postgres pgKind), ToMetadataFetchQuery pgKind, MonadIO m, MonadBaseControl IO m)
=> SourceConfig ('Postgres pgKind) -> m (Either QErr (ResolvedSource ('Postgres pgKind)))
resolveDatabaseMetadata sourceConfig = runExceptT do
(tablesMeta, functionsMeta, pgScalars) <- runLazyTx (_pscExecCtx sourceConfig) Q.ReadOnly $ do
(tablesMeta, functionsMeta, pgScalars) <- runTx (_pscExecCtx sourceConfig) Q.ReadOnly $ do
tablesMeta <- fetchTableMetadata
functionsMeta <- fetchFunctionMetadata
pgScalars <- fetchPgScalars
@ -126,7 +126,7 @@ initCatalogForSource maintenanceMode migrationTime = do
case NE.nonEmpty neededMigrations of
Just nonEmptyNeededMigrations -> do
-- Migrations aren't empty. We need to update the catalog version after migrations
traverse_ snd nonEmptyNeededMigrations
liftTx $ traverse_ snd nonEmptyNeededMigrations
setCatalogVersion "43" migrationTime
Nothing ->
-- No migrations exists, implies the database is migrated to latest metadata catalog version
@ -161,18 +161,18 @@ migrateSourceCatalogFrom prevVersion
<> latestSourceCatalogVersionText
<> ", but the current version is " <> prevVersion
| otherwise = do
traverse_ snd neededMigrations
liftTx $ traverse_ snd neededMigrations
setSourceCatalogVersion
where
neededMigrations =
dropWhile ((/= prevVersion) . fst) sourceMigrations
sourceMigrations :: (MonadTx m) => [(Text, m ())]
sourceMigrations :: [(Text, Q.TxE QErr ())]
sourceMigrations =
$(let migrationFromFile from =
let to = from + 1
path = "src-rsr/pg_source_migrations/" <> show from <> "_to_" <> show to <> ".sql"
in [| runTx $(makeRelativeToProject path >>= Q.sqlFromFile) |]
in [| Q.multiQE defaultTxErrorHandler $(makeRelativeToProject path >>= Q.sqlFromFile) |]
migrationsFromFile = map $ \(from :: Integer) ->
[| ($(TH.lift $ tshow from), $(migrationFromFile from)) |]
@ -181,11 +181,11 @@ sourceMigrations =
)
-- Upgrade the hdb_catalog schema to v43 (Metadata catalog)
upMigrationsUntil43 :: MonadTx m => [(Text, m ())]
upMigrationsUntil43 :: [(Text, Q.TxE QErr ())]
upMigrationsUntil43 =
$(let migrationFromFile from to =
let path = "src-rsr/migrations/" <> from <> "_to_" <> to <> ".sql"
in [| runTx $(makeRelativeToProject path >>= Q.sqlFromFile) |]
in [| Q.multiQE defaultTxErrorHandler $(makeRelativeToProject path >>= Q.sqlFromFile) |]
migrationsFromFile = map $ \(to :: Integer) ->
let from = to - 1

View File

@ -55,7 +55,7 @@ instance
type PreparedQuery ('Postgres pgKind) = PreparedSql
type MultiplexedQuery ('Postgres pgKind) = PGL.MultiplexedQuery
type ExecutionMonad ('Postgres pgKind) = Tracing.TraceT (LazyTxT QErr IO)
type ExecutionMonad ('Postgres pgKind) = Tracing.TraceT (Q.TxET QErr IO)
mkDBQueryPlan = pgDBQueryPlan
mkDBMutationPlan = pgDBMutationPlan
@ -125,7 +125,7 @@ pgDBLiveQueryExplain plan = do
-- query, maybe resulting in privilege escalation:
explainQuery = Q.fromText $ "EXPLAIN (FORMAT TEXT) " <> queryText
cohortId <- newCohortId
explanationLines <- liftEitherM $ runExceptT $ runLazyTx pgExecCtx Q.ReadOnly $
explanationLines <- liftEitherM $ runExceptT $ runTx pgExecCtx Q.ReadOnly $
map runIdentity <$> PGL.executeQuery explainQuery [(cohortId, _lqpVariables plan)]
pure $ LiveQueryPlanExplanation queryText explanationLines $ _lqpVariables plan
@ -142,7 +142,7 @@ convertDelete
-> IR.AnnDelG ('Postgres pgKind) (Const Void) (UnpreparedValue ('Postgres pgKind))
-> Bool
-> QueryTagsComment
-> m (Tracing.TraceT (LazyTxT QErr IO) EncJSON)
-> m (Tracing.TraceT (Q.TxET QErr IO) EncJSON)
convertDelete userInfo deleteOperation stringifyNum queryTags = do
preparedDelete <- traverse (prepareWithoutPlan userInfo) deleteOperation
pure $ flip runReaderT queryTags $ PGE.execDeleteQuery stringifyNum userInfo (preparedDelete, Seq.empty)
@ -157,7 +157,7 @@ convertUpdate
-> IR.AnnUpdG ('Postgres pgKind) (Const Void) (UnpreparedValue ('Postgres pgKind))
-> Bool
-> QueryTagsComment
-> m (Tracing.TraceT (LazyTxT QErr IO) EncJSON)
-> m (Tracing.TraceT (Q.TxET QErr IO) EncJSON)
convertUpdate userInfo updateOperation stringifyNum queryTags = do
preparedUpdate <- traverse (prepareWithoutPlan userInfo) updateOperation
if null $ IR.uqp1OpExps updateOperation
@ -178,7 +178,7 @@ convertInsert
-> IR.AnnInsert ('Postgres pgKind) (Const Void) (UnpreparedValue ('Postgres pgKind))
-> Bool
-> QueryTagsComment
-> m (Tracing.TraceT (LazyTxT QErr IO) EncJSON)
-> m (Tracing.TraceT (Q.TxET QErr IO) EncJSON)
convertInsert userInfo insertOperation stringifyNum queryTags = do
preparedInsert <- traverse (prepareWithoutPlan userInfo) insertOperation
pure $ flip runReaderT queryTags $ convertToSQLTransaction preparedInsert userInfo Seq.empty stringifyNum
@ -197,7 +197,7 @@ convertFunction
-- ^ VOLATILE function as 'SelectExp'
-> QueryTagsComment
-- ^ Query Tags
-> m (Tracing.TraceT (LazyTxT QErr IO) EncJSON)
-> m (Tracing.TraceT (Q.TxET QErr IO) EncJSON)
convertFunction userInfo jsonAggSelect unpreparedQuery queryTags = do
-- Transform the RQL AST into a prepared SQL query
(preparedQuery, PlanningSt _ _ planVals )
@ -277,7 +277,7 @@ pgDBSubscriptionPlan userInfo _sourceName sourceConfig unpreparedAST queryTags =
mkCurPlanTx
:: UserInfo
-> PreparedSql
-> (Tracing.TraceT (LazyTxT QErr IO) EncJSON, Maybe PreparedSql)
-> (Tracing.TraceT (Q.TxET QErr IO) EncJSON, Maybe PreparedSql)
mkCurPlanTx userInfo ps@(PreparedSql q prepMap) =
-- generate the SQL and prepared vars or the bytestring
let args = withUserVars (_uiSession userInfo) prepMap

View File

@ -58,7 +58,7 @@ runPGQuery
-> UserInfo
-> L.Logger L.Hasura
-> SourceConfig ('Postgres pgKind)
-> Tracing.TraceT (LazyTxT QErr IO) EncJSON
-> Tracing.TraceT (Q.TxET QErr IO) EncJSON
-> Maybe EQ.PreparedSql
-> m (DiffTime, EncJSON)
-- ^ Also return the time spent in the PG query; for telemetry.
@ -80,7 +80,7 @@ runPGMutation
-> UserInfo
-> L.Logger L.Hasura
-> SourceConfig ('Postgres pgKind)
-> Tracing.TraceT (LazyTxT QErr IO) EncJSON
-> Tracing.TraceT (Q.TxET QErr IO) EncJSON
-> Maybe EQ.PreparedSql
-> m (DiffTime, EncJSON)
-- ^ Also return 'Mutation' when the operation was a mutation, and the time
@ -92,7 +92,7 @@ runPGMutation reqId query fieldName userInfo logger sourceConfig tx _genSql = d
withElapsedTime $ trace ("Postgres Mutation for root field " <>> fieldName) $
Tracing.interpTraceT (
liftEitherM . liftIO . runExceptT
. runLazyTx (_pscExecCtx sourceConfig) Q.ReadWrite
. runTx (_pscExecCtx sourceConfig) Q.ReadWrite
. withTraceContext ctx
. withUserInfo userInfo
) tx
@ -159,7 +159,7 @@ runPGMutationTransaction reqId query userInfo logger sourceConfig mutations = do
withElapsedTime $ do
Tracing.interpTraceT (
liftEitherM . liftIO . runExceptT
. runLazyTx (_pscExecCtx sourceConfig) Q.ReadWrite
. runTx (_pscExecCtx sourceConfig) Q.ReadWrite
. withTraceContext ctx
. withUserInfo userInfo
) $ flip OMap.traverseWithKey mutations \fieldName dbsi ->

View File

@ -102,7 +102,7 @@ runActionExecution userInfo aep =
let selectAST = f actionLogResponse
selectResolved <- traverse (prepareWithoutPlan userInfo) selectAST
let querySQL = Q.fromBuilder $ toSQL $ RS.mkSQLSelect jsonAggSelect selectResolved
liftEitherM $ runExceptT $ runLazyTx (_pscExecCtx srcConfig) Q.ReadOnly $ liftTx $ asSingleRowJsonResp querySQL []
liftEitherM $ runExceptT $ runTx (_pscExecCtx srcConfig) Q.ReadOnly $ liftTx $ asSingleRowJsonResp querySQL []
AEPAsyncMutation actionId -> pure $ (,Nothing) $ encJFromJValue $ actionIdToText actionId
-- | Synchronously execute webhook handler and resolve response to action "output"
@ -140,7 +140,7 @@ resolveActionExecution env logger userInfo annAction execContext =
=> SourceConfig ('Postgres 'Vanilla) -> RS.AnnSimpleSelect ('Postgres 'Vanilla) -> [Q.PrepArg] -> m EncJSON
executeActionInDb sourceConfig astResolved prepArgs = do
let jsonAggType = mkJsonAggSelect outputType
liftEitherM $ runExceptT $ runLazyTx (_pscExecCtx sourceConfig) Q.ReadOnly $
liftEitherM $ runExceptT $ runTx (_pscExecCtx sourceConfig) Q.ReadOnly $
liftTx $ asSingleRowJsonResp (Q.fromBuilder $ toSQL $ RS.mkSQLSelect jsonAggType astResolved) prepArgs
runWebhook :: (HasVersion, MonadIO m, MonadError QErr m, Tracing.MonadTrace m)

View File

@ -37,7 +37,7 @@ import Hasura.Session
import Hasura.Metadata.Class
import Hasura.Tracing (TraceT)
import Hasura.Backends.Postgres.Connection (LazyTxT)
import qualified Database.PG.Query as Q
import Hasura.RQL.DDL.Schema.Cache (CacheRWT)
import Hasura.RQL.Types.Run (RunT (..))
import Hasura.RQL.Types.SchemaCache.Build (MetadataT (..))
@ -181,8 +181,8 @@ instance (MonadQueryTags m) => MonadQueryTags (TraceT m) where
instance (MonadQueryTags m) => MonadQueryTags (MetadataStorageT m) where
createQueryTags qtSourceConfig attr = retag (createQueryTags @m qtSourceConfig attr) :: Tagged (MetadataStorageT m) Text
instance (MonadQueryTags m) => MonadQueryTags (LazyTxT QErr m) where
createQueryTags qtSourceConfig attr = retag (createQueryTags @m qtSourceConfig attr) :: Tagged (LazyTxT QErr m) Text
instance (MonadQueryTags m) => MonadQueryTags (Q.TxET QErr m) where
createQueryTags qtSourceConfig attr = retag (createQueryTags @m qtSourceConfig attr) :: Tagged (Q.TxET QErr m) Text
instance (MonadQueryTags m) => MonadQueryTags (MetadataT m) where
createQueryTags qtSourceConfig attr = retag (createQueryTags @m qtSourceConfig attr) :: Tagged (MetadataT m) Text

View File

@ -20,6 +20,7 @@ import Network.HTTP.Client.Extended
import qualified Hasura.Tracing as Tracing
import qualified Database.PG.Query as Q
import Hasura.Base.Error
import Hasura.Eventing.HTTP
import Hasura.Eventing.ScheduledTrigger.Types
@ -301,7 +302,7 @@ instance (MonadMetadataStorage m) => MonadMetadataStorage (MetadataT m) where
clearActionData = lift . clearActionData
setProcessingActionLogsToPending = lift . setProcessingActionLogsToPending
instance (MonadMetadataStorage m) => MonadMetadataStorage (LazyTxT QErr m) where
instance (MonadMetadataStorage m) => MonadMetadataStorage (Q.TxET QErr m) where
fetchMetadataResourceVersion = lift fetchMetadataResourceVersion
fetchMetadata = lift fetchMetadata
fetchMetadataNotifications a b = lift $ fetchMetadataNotifications a b
@ -491,4 +492,4 @@ instance (MonadMetadataStorageQueryAPI m) => MonadMetadataStorageQueryAPI (Reade
instance (MonadMetadataStorageQueryAPI m) => MonadMetadataStorageQueryAPI (StateT s m)
instance (MonadMetadataStorageQueryAPI m) => MonadMetadataStorageQueryAPI (Tracing.TraceT m)
instance (MonadMetadataStorageQueryAPI m) => MonadMetadataStorageQueryAPI (MetadataT m)
instance (MonadMetadataStorageQueryAPI m) => MonadMetadataStorageQueryAPI (LazyTxT QErr m)
instance (MonadMetadataStorageQueryAPI m) => MonadMetadataStorageQueryAPI (Q.TxET QErr m)

View File

@ -368,7 +368,7 @@ buildSchemaCacheRule env = proc (metadata, invalidationKeys) -> do
maintenanceMode <- _sccMaintenanceMode <$> askServerConfigCtx
let
initCatalogAction =
runExceptT $ runLazyTx (_pscExecCtx sc) Q.ReadWrite (initCatalogForSource maintenanceMode migrationTime)
runExceptT $ runTx (_pscExecCtx sc) Q.ReadWrite (initCatalogForSource maintenanceMode migrationTime)
-- The `initCatalogForSource` action is retried here because
-- in cloud there will be multiple workers (graphql-engine instances)
-- trying to migrate the source catalog, when needed. This introduces

View File

@ -26,7 +26,6 @@ import Hasura.RQL.DML.Internal
import Hasura.RQL.DML.Types
import Hasura.RQL.IR.BoolExp
import Hasura.RQL.Types
import Hasura.RQL.Types.Run
import Hasura.SQL.Types
import Hasura.Session
@ -137,4 +136,4 @@ runCount
=> CountQuery -> m EncJSON
runCount q = do
sourceConfig <- askSourceConfig @('Postgres 'Vanilla) (cqSource q)
validateCountQ q >>= runQueryLazyTx (_pscExecCtx sourceConfig) Q.ReadOnly . countQToTx
validateCountQ q >>= runTxWithCtx (_pscExecCtx sourceConfig) Q.ReadOnly . countQToTx

View File

@ -33,7 +33,6 @@ import Hasura.RQL.DML.Internal
import Hasura.RQL.DML.Types
import Hasura.RQL.IR.Delete
import Hasura.RQL.Types
import Hasura.RQL.Types.Run
import Hasura.Session
import Hasura.GraphQL.Execute.Backend
@ -111,5 +110,5 @@ runDelete q = do
userInfo <- askUserInfo
let queryTags = QueryTagsComment $ Tagged.untag $ createQueryTags @m Nothing (encodeOptionalQueryTags Nothing)
validateDeleteQ q
>>= runQueryLazyTx (_pscExecCtx sourceConfig) Q.ReadWrite
>>= runTxWithCtx (_pscExecCtx sourceConfig) Q.ReadWrite
. flip runReaderT queryTags . execDeleteQuery strfyNum userInfo

View File

@ -29,7 +29,6 @@ import Hasura.RQL.DML.Internal
import Hasura.RQL.DML.Types
import Hasura.RQL.IR.Insert
import Hasura.RQL.Types
import Hasura.RQL.Types.Run
import Hasura.Session
import Hasura.GraphQL.Execute.Backend
@ -222,7 +221,7 @@ runInsert q = do
res <- convInsQ q
strfyNum <- stringifyNum . _sccSQLGenCtx <$> askServerConfigCtx
let queryTags = QueryTagsComment $ Tagged.untag $ createQueryTags @m Nothing (encodeOptionalQueryTags Nothing)
runQueryLazyTx (_pscExecCtx sourceConfig) Q.ReadWrite $
runTxWithCtx (_pscExecCtx sourceConfig) Q.ReadWrite $
runReaderT (execInsertQuery strfyNum userInfo res) queryTags
decodeInsObjs :: (UserInfoM m, QErrM m) => Value -> m [InsObj ('Postgres 'Vanilla)]

View File

@ -25,7 +25,6 @@ import Hasura.RQL.DML.Types
import Hasura.RQL.IR.OrderBy
import Hasura.RQL.IR.Select
import Hasura.RQL.Types
import Hasura.RQL.Types.Run
import Hasura.SQL.Types
import Hasura.Session
@ -328,4 +327,4 @@ runSelect
=> SelectQuery -> m EncJSON
runSelect q = do
sourceConfig <- askSourceConfig @('Postgres 'Vanilla) (getSourceDMLQuery q)
phaseOne q >>= runQueryLazyTx (_pscExecCtx sourceConfig) Q.ReadOnly . phaseTwo
phaseOne q >>= runTxWithCtx (_pscExecCtx sourceConfig) Q.ReadOnly . phaseTwo

View File

@ -32,7 +32,6 @@ import Hasura.RQL.DML.Types
import Hasura.RQL.IR.BoolExp
import Hasura.RQL.IR.Update
import Hasura.RQL.Types
import Hasura.RQL.Types.Run
import Hasura.SQL.Types
import Hasura.Session
@ -206,5 +205,5 @@ runUpdate q = do
strfyNum <- stringifyNum . _sccSQLGenCtx <$> askServerConfigCtx
let queryTags = QueryTagsComment $ Tagged.untag $ createQueryTags @m Nothing (encodeOptionalQueryTags Nothing)
validateUpdateQuery q
>>= runQueryLazyTx (_pscExecCtx sourceConfig) Q.ReadWrite
>>= runTxWithCtx (_pscExecCtx sourceConfig) Q.ReadWrite
. flip runReaderT queryTags . execUpdateQuery strfyNum userInfo

View File

@ -180,9 +180,6 @@ instance (HasServerConfigCtx m)
instance (HasServerConfigCtx m)
=> HasServerConfigCtx (MetadataT m) where
askServerConfigCtx = lift askServerConfigCtx
instance (HasServerConfigCtx m)
=> HasServerConfigCtx (LazyTxT QErr m) where
askServerConfigCtx = lift askServerConfigCtx
instance (HasServerConfigCtx m) => HasServerConfigCtx (Q.TxET QErr m) where
askServerConfigCtx = lift askServerConfigCtx
instance (HasServerConfigCtx m) => HasServerConfigCtx (TableCacheRT b m) where

View File

@ -3,13 +3,11 @@
module Hasura.RQL.Types.Run
( RunT(..)
, RunCtx(..)
, runQueryLazyTx
, peelRun
) where
import Hasura.Prelude
import qualified Database.PG.Query as Q
import qualified Network.HTTP.Client.Extended as HTTP
import Control.Monad.Trans.Control (MonadBaseControl)
@ -60,26 +58,6 @@ instance (Monad m) => HasServerConfigCtx (RunT m) where
instance (MonadResolveSource m) => MonadResolveSource (RunT m) where
getSourceResolver = RunT . lift . lift $ getSourceResolver
runQueryLazyTx
:: ( MonadIO m
, MonadBaseControl IO m
, MonadError QErr m
, Tracing.MonadTrace m
, UserInfoM m
)
=> PGExecCtx
-> Q.TxAccess
-> LazyTxT QErr m a
-> m a
runQueryLazyTx pgExecCtx txAccess tx = do
traceCtx <- Tracing.currentContext
userInfo <- askUserInfo
liftEitherM
$ runExceptT
$ runLazyTx pgExecCtx txAccess
$ withTraceContext traceCtx
$ withUserInfo userInfo tx
peelRun
:: RunCtx
-> RunT m a

View File

@ -139,6 +139,7 @@ import Data.Int (Int64)
import Data.Text.Extended
import System.Cron.Types
import qualified Database.PG.Query as Q
import qualified Hasura.Backends.Postgres.Connection as PG
import qualified Hasura.GraphQL.Parser as P
import qualified Hasura.SQL.AnyBackend as AB
@ -464,7 +465,7 @@ instance (Monoid w, CacheRM m) => CacheRM (WriterT w m) where
askSchemaCache = lift askSchemaCache
instance (CacheRM m) => CacheRM (TraceT m) where
askSchemaCache = lift askSchemaCache
instance (CacheRM m) => CacheRM (PG.LazyTxT QErr m) where
instance (CacheRM m) => CacheRM (Q.TxET QErr m) where
askSchemaCache = lift askSchemaCache
getDependentObjs :: SchemaCache -> SchemaObjId -> [SchemaObjId]

View File

@ -43,6 +43,7 @@ import Network.HTTP.Client.Extended
import qualified Hasura.Tracing as Tracing
import qualified Database.PG.Query as Q
import Hasura.Backends.Postgres.Connection
import Hasura.Base.Error
import Hasura.RQL.Types.Common
@ -161,7 +162,7 @@ instance (CacheRWM m) => CacheRWM (StateT s m) where
instance (CacheRWM m) => CacheRWM (TraceT m) where
buildSchemaCacheWithOptions a b c = lift $ buildSchemaCacheWithOptions a b c
setMetadataResourceVersionInSchemaCache = lift . setMetadataResourceVersionInSchemaCache
instance (CacheRWM m) => CacheRWM (LazyTxT QErr m) where
instance (CacheRWM m) => CacheRWM (Q.TxET QErr m) where
buildSchemaCacheWithOptions a b c = lift $ buildSchemaCacheWithOptions a b c
setMetadataResourceVersionInSchemaCache = lift . setMetadataResourceVersionInSchemaCache

View File

@ -12,6 +12,7 @@ import Data.Aeson.Extended
import qualified Hasura.SQL.AnyBackend as AB
import qualified Hasura.Tracing as Tracing
import qualified Database.PG.Query as Q
import Hasura.Backends.Postgres.Connection
import Hasura.Base.Error
import Hasura.RQL.IR.BoolExp
@ -97,5 +98,5 @@ instance (MonadResolveSource m) => MonadResolveSource (ReaderT r m) where
instance (MonadResolveSource m) => MonadResolveSource (Tracing.TraceT m) where
getSourceResolver = lift getSourceResolver
instance (MonadResolveSource m) => MonadResolveSource (LazyTxT QErr m) where
instance (MonadResolveSource m) => MonadResolveSource (Q.TxET QErr m) where
getSourceResolver = lift getSourceResolver

View File

@ -632,18 +632,8 @@ gqlExplainHandler
-> m (HttpResponse EncJSON)
gqlExplainHandler query = do
onlyAdmin
scRef <- asks (scCacheRef . hcServerCtx)
sc <- getSCFromRef scRef
-- sqlGenCtx <- asks (scSQLGenCtx . hcServerCtx)
-- env <- asks (scEnvironment . hcServerCtx)
-- logger <- asks (scLogger . hcServerCtx)
-- let runTx :: ReaderT HandlerCtx (Tracing.TraceT (Tracing.NoReporter (LazyTx QErr))) a
-- -> ExceptT QErr (ReaderT HandlerCtx (Tracing.TraceT m)) a
-- let runTx rttx = ExceptT . ReaderT $ \ctx -> do
-- runExceptT (Tracing.interpTraceT (runLazyTx pgExecCtx Q.ReadOnly) (runReaderT rttx ctx))
scRef <- asks (scCacheRef . hcServerCtx)
sc <- getSCFromRef scRef
res <- GE.explainGQLQuery sc query
return $ HttpResponse res []

View File

@ -132,7 +132,7 @@ migrateCatalog maybeDefaultSourceConfig maintenanceMode migrationTime = do
liftTx $ Q.catchE defaultTxErrorHandler $
when createSchema $ Q.unitQ "CREATE SCHEMA hdb_catalog" () False
enablePgcryptoExtension
runTx $(makeRelativeToProject "src-rsr/initialise.sql" >>= Q.sqlFromFile)
multiQ $(makeRelativeToProject "src-rsr/initialise.sql" >>= Q.sqlFromFile)
updateCatalogVersion
let emptyMetadata' = case maybeDefaultSourceConfig of
@ -263,7 +263,7 @@ migrations maybeDefaultSourceConfig dryRun maintenanceMode =
runTxOrPrint
| dryRun =
liftIO . TIO.putStrLn . Q.getQueryText
| otherwise = runTx
| otherwise = multiQ
from42To43 = do
when (maintenanceMode == MaintenanceModeEnabled) $
@ -272,7 +272,7 @@ migrations maybeDefaultSourceConfig dryRun maintenanceMode =
if dryRun then (liftIO . TIO.putStrLn . Q.getQueryText) query
else do
metadataV2 <- fetchMetadataFromHdbTables
runTx query
multiQ query
defaultSourceConfig <- onNothing maybeDefaultSourceConfig $ throw400 NotSupported $
"cannot migrate to catalog version 43 without --database-url or env var " <> tshow (fst databaseUrlEnv)
let metadataV3 =
@ -289,7 +289,7 @@ migrations maybeDefaultSourceConfig dryRun maintenanceMode =
if dryRun then (liftIO . TIO.putStrLn . Q.getQueryText) query
else do
Metadata{..} <- liftTx fetchMetadataFromCatalog
runTx query
multiQ query
let emptyMetadataNoSources =
MetadataNoSources mempty mempty mempty mempty mempty emptyCustomTypes mempty mempty
metadataV2 <- case OMap.toList _metaSources of
@ -310,3 +310,6 @@ migrations maybeDefaultSourceConfig dryRun maintenanceMode =
-- cron triggers are added in the `hdb_catalog.hdb_cron_triggers`
addCronTriggerForeignKeyConstraint
recreateSystemMetadata
multiQ :: (MonadTx m) => Q.Query -> m ()
multiQ = liftTx . Q.multiQE defaultTxErrorHandler

View File

@ -1,6 +1,5 @@
module Hasura.Server.Migrate.Internal
( runTx
, getCatalogVersion
( getCatalogVersion
, from3To4
, setCatalogVersion
) where
@ -20,9 +19,6 @@ import Hasura.RQL.Types.EventTrigger
import Hasura.SQL.Backend
runTx :: (MonadTx m) => Q.Query -> m ()
runTx = liftTx . Q.multiQE defaultTxErrorHandler
-- | The old 0.8 catalog version is non-integral, so we store it in the database as a
-- string.
getCatalogVersion :: Q.TxE QErr Text

View File

@ -90,12 +90,12 @@ spec
=> PostgresConnConfiguration -> PGExecCtx -> Q.ConnInfo -> SpecWithCache m
spec srcConfig pgExecCtx pgConnInfo = do
let migrateCatalogAndBuildCache env time = do
(migrationResult, metadata) <- runTx pgExecCtx $ migrateCatalog (Just srcConfig) MaintenanceModeDisabled time
(migrationResult, metadata) <- runTx' pgExecCtx $ migrateCatalog (Just srcConfig) MaintenanceModeDisabled time
(,migrationResult) <$> runCacheBuildM (buildRebuildableSchemaCache env metadata)
dropAndInit env time = lift $ CacheRefT $ flip modifyMVar \_ ->
(runTx pgExecCtx dropHdbCatalogSchema) *> (migrateCatalogAndBuildCache env time)
downgradeTo v = runTx pgExecCtx . downgradeCatalog (Just srcConfig) DowngradeOptions{ dgoDryRun = False, dgoTargetVersion = v }
(runTx' pgExecCtx dropHdbCatalogSchema) *> (migrateCatalogAndBuildCache env time)
downgradeTo v = runTx' pgExecCtx . downgradeCatalog (Just srcConfig) DowngradeOptions{ dgoDryRun = False, dgoTargetVersion = v }
describe "migrateCatalog" $ do
it "initializes the catalog" $ singleTransaction do
@ -155,7 +155,7 @@ spec srcConfig pgExecCtx pgConnInfo = do
MRMigrated{} -> True
_ -> False
firstDump <- transact dumpMetadata
transact (runTx pgExecCtx recreateSystemMetadata)
transact (runTx' pgExecCtx recreateSystemMetadata)
secondDump <- transact dumpMetadata
secondDump `shouldBe` firstDump
@ -168,7 +168,7 @@ spec srcConfig pgExecCtx pgConnInfo = do
secondDump <- transact dumpMetadata
secondDump `shouldBe` firstDump
runTx
runTx'
:: (MonadError QErr m, MonadIO m, MonadBaseControl IO m)
=> PGExecCtx -> LazyTxT QErr m a -> m a
runTx pgExecCtx = liftEitherM . runExceptT . runLazyTx pgExecCtx Q.ReadWrite
=> PGExecCtx -> Q.TxET QErr m a -> m a
runTx' pgExecCtx = liftEitherM . runExceptT . runTx pgExecCtx Q.ReadWrite

View File

@ -156,7 +156,7 @@ buildPostgresSpecs maybeUrlTemplate = do
>=> flip onLeft printErrJExit
(metadata, schemaCache) <- run do
metadata <- snd <$> (liftEitherM . runExceptT . runLazyTx pgContext Q.ReadWrite)
metadata <- snd <$> (liftEitherM . runExceptT . runTx pgContext Q.ReadWrite)
(migrateCatalog (Just sourceConfig) maintenanceMode =<< liftIO getCurrentTime)
schemaCache <- lift $ lift $ buildRebuildableSchemaCache envMap metadata
pure (metadata, schemaCache)