mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-14 17:02:49 +03:00
server: generalize event triggers - incremental PR 2
https://github.com/hasura/graphql-engine-mono/pull/2270 GitOrigin-RevId: d7644b25d3ee57ffa630de15ae692c1bfa03b4f6
This commit is contained in:
parent
9ebc07ef4f
commit
3247c8bd71
@ -702,6 +702,7 @@ test-suite graphql-engine-tests
|
||||
, time
|
||||
, transformers-base
|
||||
, unordered-containers
|
||||
, utf8-string
|
||||
, vector
|
||||
-- mssql support
|
||||
, odbc
|
||||
|
@ -2,7 +2,6 @@ module Hasura.Backends.BigQuery.DDL
|
||||
( buildComputedFieldInfo
|
||||
, fetchAndValidateEnumValues
|
||||
, createTableEventTrigger
|
||||
, buildEventTriggerInfo
|
||||
, buildFunctionInfo
|
||||
, updateColumnInEventTrigger
|
||||
, parseBoolExpOperations
|
||||
@ -13,8 +12,6 @@ where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Environment as Env
|
||||
|
||||
import Data.Aeson
|
||||
|
||||
import qualified Hasura.Backends.BigQuery.Types as BigQuery
|
||||
@ -72,16 +69,6 @@ createTableEventTrigger
|
||||
createTableEventTrigger _ _ _ _ _ _ = runExceptT $
|
||||
throw400 NotSupported "Cannot create table event triggers in BigQuery sources"
|
||||
|
||||
buildEventTriggerInfo
|
||||
:: MonadError QErr m
|
||||
=> Env.Environment
|
||||
-> SourceName
|
||||
-> TableName 'BigQuery
|
||||
-> EventTriggerConf 'BigQuery
|
||||
-> m (EventTriggerInfo 'BigQuery, [SchemaDependency])
|
||||
buildEventTriggerInfo _ _ _ _ =
|
||||
throw400 NotSupported "Table event triggers are not supported for BigQuery sources"
|
||||
|
||||
buildFunctionInfo
|
||||
:: (MonadError QErr m)
|
||||
=> SourceName
|
||||
|
@ -16,7 +16,6 @@ instance BackendMetadata 'BigQuery where
|
||||
resolveSourceConfig = BigQuery.resolveSourceConfig
|
||||
resolveDatabaseMetadata = BigQuery.resolveSource
|
||||
createTableEventTrigger = BigQuery.createTableEventTrigger
|
||||
buildEventTriggerInfo = BigQuery.buildEventTriggerInfo
|
||||
parseBoolExpOperations = BigQuery.parseBoolExpOperations
|
||||
buildFunctionInfo = BigQuery.buildFunctionInfo
|
||||
updateColumnInEventTrigger = BigQuery.updateColumnInEventTrigger
|
||||
|
@ -2,7 +2,6 @@ module Hasura.Backends.MSSQL.DDL
|
||||
( buildComputedFieldInfo
|
||||
, fetchAndValidateEnumValues
|
||||
, createTableEventTrigger
|
||||
, buildEventTriggerInfo
|
||||
, buildFunctionInfo
|
||||
, updateColumnInEventTrigger
|
||||
, parseCollectableType
|
||||
@ -14,8 +13,6 @@ import Hasura.Prelude
|
||||
|
||||
import Data.Aeson
|
||||
|
||||
import qualified Data.Environment as Env
|
||||
|
||||
import Hasura.Base.Error
|
||||
import Hasura.RQL.IR.BoolExp
|
||||
import Hasura.RQL.Types.Backend
|
||||
@ -72,16 +69,6 @@ createTableEventTrigger
|
||||
createTableEventTrigger _ _ _ _ _ _ = runExceptT $
|
||||
throw400 NotSupported "Cannot create table event triggers in MSSQL sources"
|
||||
|
||||
buildEventTriggerInfo
|
||||
:: MonadError QErr m
|
||||
=> Env.Environment
|
||||
-> SourceName
|
||||
-> TableName 'MSSQL
|
||||
-> EventTriggerConf 'MSSQL
|
||||
-> m (EventTriggerInfo 'MSSQL, [SchemaDependency])
|
||||
buildEventTriggerInfo _ _ _ _ =
|
||||
throw400 NotSupported "Table event triggers are not supported for MSSQL sources"
|
||||
|
||||
buildFunctionInfo
|
||||
:: (MonadError QErr m)
|
||||
=> SourceName
|
||||
|
@ -15,7 +15,6 @@ instance BackendMetadata 'MSSQL where
|
||||
resolveSourceConfig = MSSQL.resolveSourceConfig
|
||||
resolveDatabaseMetadata = MSSQL.resolveDatabaseMetadata
|
||||
createTableEventTrigger = MSSQL.createTableEventTrigger
|
||||
buildEventTriggerInfo = MSSQL.buildEventTriggerInfo
|
||||
parseBoolExpOperations = MSSQL.parseBoolExpOperations
|
||||
buildFunctionInfo = MSSQL.buildFunctionInfo
|
||||
updateColumnInEventTrigger = MSSQL.updateColumnInEventTrigger
|
||||
|
@ -13,7 +13,6 @@ instance BackendMetadata 'MySQL where
|
||||
resolveSourceConfig = MySQL.resolveSourceConfig
|
||||
resolveDatabaseMetadata = MySQL.resolveDatabaseMetadata
|
||||
createTableEventTrigger = error "createTableEventTrigger: MySQL backend does not support this operation yet."
|
||||
buildEventTriggerInfo = error "buildEventTriggerInfo: MySQL backend does not support this operation yet."
|
||||
parseBoolExpOperations = error "parseBoolExpOperations: MySQL backend does not support this operation yet."
|
||||
buildFunctionInfo = error "buildFunctionInfo: MySQL backend does not support this operation yet."
|
||||
updateColumnInEventTrigger = error "updateColumnInEventTrigger: MySQL backend does not support this operation yet."
|
||||
|
@ -7,6 +7,7 @@ where
|
||||
import Data.Aeson
|
||||
|
||||
import Hasura.Backends.Postgres.DDL.BoolExp as M
|
||||
import Hasura.Backends.Postgres.DDL.EventTrigger as M
|
||||
import Hasura.Backends.Postgres.DDL.Field as M
|
||||
import Hasura.Backends.Postgres.DDL.Function as M
|
||||
import Hasura.Backends.Postgres.DDL.Source as M
|
||||
|
@ -1,25 +1,43 @@
|
||||
module Hasura.Backends.Postgres.DDL.EventTrigger
|
||||
( insertManualEvent
|
||||
, redeliverEvent
|
||||
, dropTriggerAndArchiveEvents
|
||||
, createTableEventTrigger
|
||||
, dropTriggerQ
|
||||
, mkAllTriggersQ
|
||||
) where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Data.Text.Lazy as TL
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Text.Shakespeare.Text as ST
|
||||
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.Aeson
|
||||
import Data.FileEmbed (makeRelativeToProject)
|
||||
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
|
||||
import Hasura.Backends.Postgres.Connection
|
||||
import Hasura.Backends.Postgres.SQL.Types hiding (TableName)
|
||||
import Hasura.Backends.Postgres.SQL.DML
|
||||
import Hasura.Backends.Postgres.SQL.Types hiding (TableName)
|
||||
import Hasura.Backends.Postgres.Translate.Column
|
||||
import Hasura.Base.Error
|
||||
import Hasura.RQL.Types.Backend (SourceConfig, TableName)
|
||||
import Hasura.RQL.Types.Backend (Backend, SourceConfig, TableName)
|
||||
import Hasura.RQL.Types.Column
|
||||
import Hasura.RQL.Types.Common
|
||||
import Hasura.RQL.Types.EventTrigger
|
||||
import Hasura.RQL.Types.Table ()
|
||||
import Hasura.RQL.Types.Table ()
|
||||
import Hasura.SQL.Backend
|
||||
import Hasura.SQL.Types
|
||||
import Hasura.Server.Types
|
||||
import Hasura.Session
|
||||
|
||||
-- Corresponds to the 'OLD' and 'NEW' Postgres records; see
|
||||
-- https://www.postgresql.org/docs/current/plpgsql-trigger.html
|
||||
data OpVar = OLD | NEW deriving (Show)
|
||||
|
||||
insertManualEvent
|
||||
:: (MonadIO m, MonadError QErr m)
|
||||
=> SourceConfig ('Postgres pgKind)
|
||||
@ -50,6 +68,34 @@ redeliverEvent
|
||||
redeliverEvent sourceConfig eventId =
|
||||
liftEitherM $ liftIO $ runPgSourceWriteTx sourceConfig (redeliverEventTx eventId)
|
||||
|
||||
dropTriggerAndArchiveEvents
|
||||
:: ( MonadIO m
|
||||
, MonadError QErr m
|
||||
)
|
||||
=> SourceConfig ('Postgres pgKind)
|
||||
-> TriggerName
|
||||
-> m ()
|
||||
dropTriggerAndArchiveEvents sourceConfig triggerName =
|
||||
liftEitherM $ liftIO $ runPgSourceWriteTx sourceConfig $ do
|
||||
dropTriggerQ triggerName
|
||||
archiveEvents triggerName
|
||||
|
||||
createTableEventTrigger
|
||||
:: (Backend ('Postgres pgKind), MonadIO m, MonadBaseControl IO m)
|
||||
=> ServerConfigCtx
|
||||
-> PGSourceConfig
|
||||
-> QualifiedTable
|
||||
-> [ColumnInfo ('Postgres pgKind)]
|
||||
-> TriggerName
|
||||
-> TriggerOpsDef ('Postgres pgKind)
|
||||
-> m (Either QErr ())
|
||||
createTableEventTrigger serverConfigCtx sourceConfig table columns triggerName opsDefinition = runPgSourceWriteTx sourceConfig $ do
|
||||
-- Clean all existing triggers
|
||||
liftTx $ dropTriggerQ triggerName -- executes DROP IF EXISTS.. sql
|
||||
-- Create the given triggers
|
||||
flip runReaderT serverConfigCtx $
|
||||
mkAllTriggersQ triggerName table columns opsDefinition
|
||||
|
||||
---- DATABASE QUERIES ---------------------
|
||||
--
|
||||
-- The API for our in-database work queue:
|
||||
@ -65,7 +111,6 @@ insertPGManualEvent (QualifiedObject schemaName tableName) triggerName rowData =
|
||||
SELECT hdb_catalog.insert_event_log($1, $2, $3, $4, $5)
|
||||
|] (schemaName, tableName, triggerName, (tshow MANUAL), Q.AltJ rowData) False
|
||||
|
||||
|
||||
checkEvent :: EventId -> Q.TxE QErr ()
|
||||
checkEvent eid = do
|
||||
events <- Q.listQE defaultTxErrorHandler
|
||||
@ -98,3 +143,118 @@ redeliverEventTx :: EventId -> Q.TxE QErr ()
|
||||
redeliverEventTx eventId = do
|
||||
checkEvent eventId
|
||||
markForDelivery eventId
|
||||
|
||||
dropTriggerQ :: TriggerName -> Q.TxE QErr ()
|
||||
dropTriggerQ trn =
|
||||
mapM_ (\op -> Q.unitQE
|
||||
defaultTxErrorHandler
|
||||
(Q.fromText $ getDropFuncSql op) () False) [INSERT, UPDATE, DELETE]
|
||||
where
|
||||
getDropFuncSql :: Ops -> Text
|
||||
getDropFuncSql op =
|
||||
"DROP FUNCTION IF EXISTS"
|
||||
<> " hdb_catalog." <> pgIdenTrigger op trn <> "()"
|
||||
<> " CASCADE"
|
||||
|
||||
archiveEvents :: TriggerName -> Q.TxE QErr ()
|
||||
archiveEvents trn =
|
||||
Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
UPDATE hdb_catalog.event_log
|
||||
SET archived = 't'
|
||||
WHERE trigger_name = $1
|
||||
|] (Identity trn) False
|
||||
|
||||
---- Postgres event trigger utility functions ---------------------
|
||||
|
||||
-- | pgIdenTrigger is a method used to construct the name of the pg function
|
||||
-- used for event triggers which are present in the hdb_catalog schema.
|
||||
pgIdenTrigger:: Ops -> TriggerName -> Text
|
||||
pgIdenTrigger op trn = pgFmtIdentifier . qualifyTriggerName op $ triggerNameToTxt trn
|
||||
where
|
||||
qualifyTriggerName op' trn' = "notify_hasura_" <> trn' <> "_" <> tshow op'
|
||||
|
||||
-- | Define the pgSQL trigger functions on database events.
|
||||
mkTriggerQ
|
||||
:: forall pgKind m
|
||||
. (Backend ('Postgres pgKind), MonadTx m, MonadReader ServerConfigCtx m)
|
||||
=> TriggerName
|
||||
-> QualifiedTable
|
||||
-> [ColumnInfo ('Postgres pgKind)]
|
||||
-> Ops
|
||||
-> SubscribeOpSpec ('Postgres pgKind)
|
||||
-> m ()
|
||||
mkTriggerQ trn qt@(QualifiedObject schema table) allCols op (SubscribeOpSpec listenColumns deliveryColumns') = do
|
||||
strfyNum <- stringifyNum . _sccSQLGenCtx <$> ask
|
||||
liftTx $ Q.multiQE defaultTxErrorHandler $ Q.fromText . TL.toStrict $
|
||||
let
|
||||
-- If there are no specific delivery columns selected by user then all the columns will be delivered
|
||||
-- in payload hence 'SubCStar'.
|
||||
deliveryColumns = fromMaybe SubCStar deliveryColumns'
|
||||
getApplicableColumns = \case
|
||||
SubCStar -> allCols
|
||||
SubCArray cols -> getColInfos cols allCols
|
||||
|
||||
-- Columns that should be present in the payload. By default, all columns are present.
|
||||
applicableDeliveryCols = getApplicableColumns deliveryColumns
|
||||
getRowExpression opVar = applyRowToJson' $ mkRowExpression opVar strfyNum applicableDeliveryCols
|
||||
|
||||
-- Columns that user subscribed to listen for changes. By default, we listen on all columns.
|
||||
applicableListenCols = getApplicableColumns listenColumns
|
||||
renderRow opVar = applyRow $ mkRowExpression opVar strfyNum applicableListenCols
|
||||
|
||||
oldDataExp = case op of
|
||||
INSERT -> SENull
|
||||
UPDATE -> getRowExpression OLD
|
||||
DELETE -> getRowExpression OLD
|
||||
MANUAL -> SENull
|
||||
newDataExp = case op of
|
||||
INSERT -> getRowExpression NEW
|
||||
UPDATE -> getRowExpression NEW
|
||||
DELETE -> SENull
|
||||
MANUAL -> SENull
|
||||
|
||||
name = triggerNameToTxt trn
|
||||
qualifiedTriggerName = pgIdenTrigger op trn
|
||||
qualifiedTable = toSQLTxt qt
|
||||
schemaName = pgFmtLit $ getSchemaTxt schema
|
||||
tableName = pgFmtLit $ getTableTxt table
|
||||
|
||||
operation = tshow op
|
||||
oldRow = toSQLTxt $ renderRow OLD
|
||||
newRow = toSQLTxt $ renderRow NEW
|
||||
oldPayloadExpression = toSQLTxt oldDataExp
|
||||
newPayloadExpression = toSQLTxt newDataExp
|
||||
|
||||
in $(makeRelativeToProject "src-rsr/trigger.sql.shakespeare" >>= ST.stextFile )
|
||||
where
|
||||
applyRowToJson' e = SEFnApp "row_to_json" [e] Nothing
|
||||
applyRow e = SEFnApp "row" [e] Nothing
|
||||
opToQual = QualVar . tshow
|
||||
|
||||
mkRowExpression opVar strfyNum columns
|
||||
= mkRowExp $ map (\col -> toExtractor (mkQId opVar strfyNum col) col) columns
|
||||
|
||||
mkQId opVar strfyNum colInfo = toJSONableExp strfyNum (pgiType colInfo) False $
|
||||
SEQIdentifier $ QIdentifier (opToQual opVar) $ toIdentifier $ pgiColumn colInfo
|
||||
|
||||
-- Generate the SQL expression
|
||||
toExtractor sqlExp column
|
||||
-- If the column type is either 'Geography' or 'Geometry', then after applying the 'ST_AsGeoJSON' function
|
||||
-- to the column, alias the value of the expression with the column name else it uses `st_asgeojson` as
|
||||
-- the column name.
|
||||
| isScalarColumnWhere isGeoType (pgiType column) = Extractor sqlExp (Just $ getAlias column)
|
||||
| otherwise = Extractor sqlExp Nothing
|
||||
getAlias col = toAlias $ Identifier $ getPGColTxt (pgiColumn col)
|
||||
|
||||
mkAllTriggersQ
|
||||
:: forall pgKind m
|
||||
. (Backend ('Postgres pgKind), MonadTx m, MonadReader ServerConfigCtx m)
|
||||
=> TriggerName
|
||||
-> QualifiedTable
|
||||
-> [ColumnInfo ('Postgres pgKind)]
|
||||
-> TriggerOpsDef ('Postgres pgKind)
|
||||
-> m ()
|
||||
mkAllTriggersQ trn qt allCols fullspec = do
|
||||
onJust (tdInsert fullspec) (mkTriggerQ trn qt allCols INSERT)
|
||||
onJust (tdUpdate fullspec) (mkTriggerQ trn qt allCols UPDATE)
|
||||
onJust (tdDelete fullspec) (mkTriggerQ trn qt allCols DELETE)
|
||||
|
@ -6,32 +6,33 @@ module Hasura.Backends.Postgres.DDL.RunSQL
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.HashSet as HS
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Text.Regex.TDFA as TDFA
|
||||
import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.HashSet as HS
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Text.Regex.TDFA as TDFA
|
||||
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.Aeson
|
||||
import Data.Text.Extended
|
||||
|
||||
import qualified Hasura.SQL.AnyBackend as AB
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
import qualified Hasura.SQL.AnyBackend as AB
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
|
||||
import Hasura.Backends.Postgres.DDL.Source (ToMetadataFetchQuery, fetchFunctionMetadata,
|
||||
fetchTableMetadata)
|
||||
import Hasura.Backends.Postgres.DDL.Table
|
||||
import Hasura.Backends.Postgres.DDL.EventTrigger
|
||||
import Hasura.Backends.Postgres.DDL.Source (ToMetadataFetchQuery,
|
||||
fetchFunctionMetadata,
|
||||
fetchTableMetadata)
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Base.Error
|
||||
import Hasura.EncJSON
|
||||
import Hasura.RQL.DDL.Deps (reportDepsExt)
|
||||
import Hasura.RQL.DDL.Deps (reportDepsExt)
|
||||
import Hasura.RQL.DDL.Schema
|
||||
import Hasura.RQL.DDL.Schema.Common
|
||||
import Hasura.RQL.DDL.Schema.Diff
|
||||
import Hasura.RQL.Types hiding (ConstraintName, fmFunction,
|
||||
tmComputedFields, tmTable)
|
||||
import Hasura.RQL.Types hiding (ConstraintName, fmFunction,
|
||||
tmComputedFields, tmTable)
|
||||
import Hasura.RQL.Types.Run
|
||||
import Hasura.Server.Utils (quoteRegex)
|
||||
import Hasura.Server.Utils (quoteRegex)
|
||||
import Hasura.Session
|
||||
|
||||
data RunSQL
|
||||
@ -193,7 +194,7 @@ withMetadataCheck source cascade txAccess action = do
|
||||
-- Drop event triggers so no interference is caused to the sql query
|
||||
forM_ (M.elems preActionTables) $ \tableInfo -> do
|
||||
let eventTriggers = _tiEventTriggerInfoMap tableInfo
|
||||
forM_ (M.keys eventTriggers) (liftTx . delTriggerQ)
|
||||
forM_ (M.keys eventTriggers) (liftTx . dropTriggerQ)
|
||||
|
||||
-- Get the metadata before the sql query, everything, need to filter this
|
||||
(preActionTableMeta, preActionFunctionMeta) <- fetchMeta preActionTables preActionFunctions
|
||||
|
@ -1,279 +1,35 @@
|
||||
module Hasura.Backends.Postgres.DDL.Table
|
||||
( createTableEventTrigger
|
||||
, buildEventTriggerInfo
|
||||
, updateColumnInEventTrigger
|
||||
( updateColumnInEventTrigger
|
||||
, fetchAndValidateEnumValues
|
||||
, delTriggerQ
|
||||
, mkAllTriggersQ
|
||||
, getHeaderInfosFromConf
|
||||
)
|
||||
where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.List.NonEmpty as NE
|
||||
import qualified Data.Sequence as Seq
|
||||
import qualified Data.Sequence.NonEmpty as NESeq
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Text.Lazy as TL
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Text.Shakespeare.Text as ST
|
||||
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Control.Monad.Validate
|
||||
import Data.FileEmbed (makeRelativeToProject)
|
||||
import Data.List (delete)
|
||||
import Data.Text.Extended
|
||||
|
||||
import qualified Hasura.SQL.AnyBackend as AB
|
||||
|
||||
import Hasura.Backends.Postgres.Connection
|
||||
import Hasura.Backends.Postgres.SQL.DML
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Base.Error
|
||||
import Hasura.RQL.DDL.Headers
|
||||
import Hasura.RQL.Types.Backend
|
||||
import Hasura.RQL.Types.Column
|
||||
import Hasura.RQL.Types.Common
|
||||
import Hasura.RQL.Types.EventTrigger
|
||||
import Hasura.RQL.Types.SchemaCache
|
||||
import Hasura.RQL.Types.SchemaCacheTypes
|
||||
import Hasura.RQL.Types.Table
|
||||
import Hasura.SQL.Backend
|
||||
import Hasura.SQL.Types
|
||||
import Hasura.Server.Types
|
||||
import Hasura.Server.Utils
|
||||
|
||||
|
||||
-- | Create the table event trigger in the database in a @'/v1/query' API
|
||||
-- transaction as soon as after @'runCreateEventTriggerQuery' is called and
|
||||
-- in building schema cache.
|
||||
createTableEventTrigger
|
||||
:: (Backend ('Postgres pgKind), MonadIO m, MonadBaseControl IO m)
|
||||
=> ServerConfigCtx
|
||||
-> PGSourceConfig
|
||||
-> QualifiedTable
|
||||
-> [ColumnInfo ('Postgres pgKind)]
|
||||
-> TriggerName
|
||||
-> TriggerOpsDef ('Postgres pgKind)
|
||||
-> m (Either QErr ())
|
||||
createTableEventTrigger serverConfigCtx sourceConfig table columns triggerName opsDefinition = runPgSourceWriteTx sourceConfig $ do
|
||||
-- Clean all existing triggers
|
||||
liftTx $ delTriggerQ triggerName -- executes DROP IF EXISTS.. sql
|
||||
-- Create the given triggers
|
||||
flip runReaderT serverConfigCtx $
|
||||
mkAllTriggersQ triggerName table columns opsDefinition
|
||||
|
||||
delTriggerQ :: TriggerName -> Q.TxE QErr ()
|
||||
delTriggerQ trn =
|
||||
mapM_ (\op -> Q.unitQE
|
||||
defaultTxErrorHandler
|
||||
(Q.fromText $ getDropFuncSql op) () False) [INSERT, UPDATE, DELETE]
|
||||
where
|
||||
getDropFuncSql :: Ops -> T.Text
|
||||
getDropFuncSql op =
|
||||
"DROP FUNCTION IF EXISTS"
|
||||
<> " hdb_catalog." <> pgIdenTrigger op trn <> "()"
|
||||
<> " CASCADE"
|
||||
|
||||
-- pgIdenTrigger is a method used to construct the name of the pg function
|
||||
-- used for event triggers which are present in the hdb_catalog schema.
|
||||
pgIdenTrigger:: Ops -> TriggerName -> Text
|
||||
pgIdenTrigger op trn = pgFmtIdentifier . qualifyTriggerName op $ triggerNameToTxt trn
|
||||
where
|
||||
qualifyTriggerName op' trn' = "notify_hasura_" <> trn' <> "_" <> tshow op'
|
||||
|
||||
mkAllTriggersQ
|
||||
:: forall pgKind m
|
||||
. (Backend ('Postgres pgKind), MonadTx m, MonadReader ServerConfigCtx m)
|
||||
=> TriggerName
|
||||
-> QualifiedTable
|
||||
-> [ColumnInfo ('Postgres pgKind)]
|
||||
-> TriggerOpsDef ('Postgres pgKind)
|
||||
-> m ()
|
||||
mkAllTriggersQ trn qt allCols fullspec = do
|
||||
onJust (tdInsert fullspec) (mkTriggerQ trn qt allCols INSERT)
|
||||
onJust (tdUpdate fullspec) (mkTriggerQ trn qt allCols UPDATE)
|
||||
onJust (tdDelete fullspec) (mkTriggerQ trn qt allCols DELETE)
|
||||
|
||||
data OpVar = OLD | NEW deriving (Show)
|
||||
|
||||
-- | Formats each columns to appropriate SQL expression
|
||||
toJSONableExp :: Bool -> ColumnType ('Postgres pgKind) -> Bool -> SQLExp -> SQLExp
|
||||
toJSONableExp strfyNum colTy asText expn
|
||||
-- If its a numeric column greater than a 32-bit integer, we have to stringify it as JSON spec doesn't support >32-bit integers
|
||||
| asText || (isScalarColumnWhere isBigNum colTy && strfyNum) =
|
||||
expn `SETyAnn` textTypeAnn
|
||||
-- If the column is either a `Geometry` or `Geography` then apply the `ST_AsGeoJSON` function to convert it into GeoJSON format
|
||||
| isScalarColumnWhere isGeoType colTy =
|
||||
SEFnApp "ST_AsGeoJSON"
|
||||
[ expn
|
||||
, SEUnsafe "15" -- max decimal digits
|
||||
, SEUnsafe "4" -- to print out crs
|
||||
] Nothing `SETyAnn` jsonTypeAnn
|
||||
| otherwise = expn
|
||||
|
||||
-- | Define the pgSQL trigger functions on database events.
|
||||
mkTriggerQ
|
||||
:: forall pgKind m
|
||||
. (Backend ('Postgres pgKind), MonadTx m, MonadReader ServerConfigCtx m)
|
||||
=> TriggerName
|
||||
-> QualifiedTable
|
||||
-> [ColumnInfo ('Postgres pgKind)]
|
||||
-> Ops
|
||||
-> SubscribeOpSpec ('Postgres pgKind)
|
||||
-> m ()
|
||||
mkTriggerQ trn qt@(QualifiedObject schema table) allCols op (SubscribeOpSpec listenColumns deliveryColumns') = do
|
||||
strfyNum <- stringifyNum . _sccSQLGenCtx <$> ask
|
||||
liftTx $ Q.multiQE defaultTxErrorHandler $ Q.fromText . TL.toStrict $
|
||||
let
|
||||
-- If there are no specific delivery columns selected by user then all the columns will be delivered
|
||||
-- in payload hence 'SubCStar'.
|
||||
deliveryColumns = fromMaybe SubCStar deliveryColumns'
|
||||
getApplicableColumns = \case
|
||||
SubCStar -> allCols
|
||||
SubCArray cols -> getColInfos cols allCols
|
||||
|
||||
-- Columns that should be present in the payload. By default, all columns are present.
|
||||
applicableDeliveryCols = getApplicableColumns deliveryColumns
|
||||
getRowExpression opVar = applyRowToJson' $ mkRowExpression opVar strfyNum applicableDeliveryCols
|
||||
|
||||
-- Columns that user subscribed to listen for changes. By default, we listen on all columns.
|
||||
applicableListenCols = getApplicableColumns listenColumns
|
||||
renderRow opVar = applyRow $ mkRowExpression opVar strfyNum applicableListenCols
|
||||
|
||||
oldDataExp = case op of
|
||||
INSERT -> SENull
|
||||
UPDATE -> getRowExpression OLD
|
||||
DELETE -> getRowExpression OLD
|
||||
MANUAL -> SENull
|
||||
newDataExp = case op of
|
||||
INSERT -> getRowExpression NEW
|
||||
UPDATE -> getRowExpression NEW
|
||||
DELETE -> SENull
|
||||
MANUAL -> SENull
|
||||
|
||||
name = triggerNameToTxt trn
|
||||
qualifiedTriggerName = pgIdenTrigger op trn
|
||||
qualifiedTable = toSQLTxt qt
|
||||
schemaName = pgFmtLit $ getSchemaTxt schema
|
||||
tableName = pgFmtLit $ getTableTxt table
|
||||
|
||||
operation = tshow op
|
||||
oldRow = toSQLTxt $ renderRow OLD
|
||||
newRow = toSQLTxt $ renderRow NEW
|
||||
oldPayloadExpression = toSQLTxt oldDataExp
|
||||
newPayloadExpression = toSQLTxt newDataExp
|
||||
|
||||
in $(makeRelativeToProject "src-rsr/trigger.sql.shakespeare" >>= ST.stextFile )
|
||||
where
|
||||
applyRowToJson' e = SEFnApp "row_to_json" [e] Nothing
|
||||
applyRow e = SEFnApp "row" [e] Nothing
|
||||
opToQual = QualVar . tshow
|
||||
|
||||
mkRowExpression opVar strfyNum columns
|
||||
= mkRowExp $ map (\col -> toExtractor (mkQId opVar strfyNum col) col) columns
|
||||
|
||||
mkQId opVar strfyNum colInfo = toJSONableExp strfyNum (pgiType colInfo) False $
|
||||
SEQIdentifier $ QIdentifier (opToQual opVar) $ toIdentifier $ pgiColumn colInfo
|
||||
|
||||
-- Generate the SQL expression
|
||||
toExtractor sqlExp column
|
||||
-- If the column type is either 'Geography' or 'Geometry', then after applying the 'ST_AsGeoJSON' function
|
||||
-- to the column, alias the value of the expression with the column name else it uses `st_asgeojson` as
|
||||
-- the column name.
|
||||
| isScalarColumnWhere isGeoType (pgiType column) = Extractor sqlExp (Just $ getAlias column)
|
||||
| otherwise = Extractor sqlExp Nothing
|
||||
getAlias col = toAlias $ Identifier $ getPGColTxt (pgiColumn col)
|
||||
|
||||
buildEventTriggerInfo
|
||||
:: forall (pgKind :: PostgresKind) m
|
||||
. (Backend ('Postgres pgKind), QErrM m)
|
||||
=> Env.Environment
|
||||
-> SourceName
|
||||
-> QualifiedTable
|
||||
-> EventTriggerConf ('Postgres pgKind)
|
||||
-> m (EventTriggerInfo ('Postgres pgKind), [SchemaDependency])
|
||||
buildEventTriggerInfo env source qt (EventTriggerConf name def webhook webhookFromEnv rconf mheaders) = do
|
||||
webhookConf <- case (webhook, webhookFromEnv) of
|
||||
(Just w, Nothing) -> return $ WCValue w
|
||||
(Nothing, Just wEnv) -> return $ WCEnv wEnv
|
||||
_ -> throw500 "expected webhook or webhook_from_env"
|
||||
let headerConfs = fromMaybe [] mheaders
|
||||
webhookInfo <- getWebhookInfoFromConf env webhookConf
|
||||
headerInfos <- getHeaderInfosFromConf env headerConfs
|
||||
let eTrigInfo = EventTriggerInfo name def rconf webhookInfo headerInfos
|
||||
tabDep = SchemaDependency
|
||||
(SOSourceObj source
|
||||
$ AB.mkAnyBackend
|
||||
$ SOITable @('Postgres pgKind) qt)
|
||||
DRParent
|
||||
pure (eTrigInfo, tabDep:getTrigDefDeps @pgKind source qt def)
|
||||
|
||||
getTrigDefDeps
|
||||
:: forall (pgKind :: PostgresKind)
|
||||
. (Backend ('Postgres pgKind))
|
||||
=> SourceName
|
||||
-> QualifiedTable
|
||||
-> TriggerOpsDef ('Postgres pgKind)
|
||||
-> [SchemaDependency]
|
||||
getTrigDefDeps source qt (TriggerOpsDef mIns mUpd mDel _) =
|
||||
mconcat $ catMaybes [ subsOpSpecDeps <$> mIns
|
||||
, subsOpSpecDeps <$> mUpd
|
||||
, subsOpSpecDeps <$> mDel
|
||||
]
|
||||
where
|
||||
subsOpSpecDeps :: SubscribeOpSpec ('Postgres pgKind) -> [SchemaDependency]
|
||||
subsOpSpecDeps os =
|
||||
let cols = getColsFromSub $ sosColumns os
|
||||
colDeps = flip map cols $ \col ->
|
||||
SchemaDependency
|
||||
(SOSourceObj source
|
||||
$ AB.mkAnyBackend
|
||||
$ SOITableObj @('Postgres pgKind) qt (TOCol @('Postgres pgKind) col))
|
||||
DRColumn
|
||||
payload = maybe [] getColsFromSub (sosPayload os)
|
||||
payloadDeps = flip map payload $ \col ->
|
||||
SchemaDependency
|
||||
(SOSourceObj source
|
||||
$ AB.mkAnyBackend
|
||||
$ SOITableObj qt (TOCol @('Postgres pgKind) col))
|
||||
DRPayload
|
||||
in colDeps <> payloadDeps
|
||||
getColsFromSub sc = case sc of
|
||||
SubCStar -> []
|
||||
SubCArray pgcols -> pgcols
|
||||
|
||||
getHeaderInfosFromConf
|
||||
:: QErrM m
|
||||
=> Env.Environment
|
||||
-> [HeaderConf]
|
||||
-> m [EventHeaderInfo]
|
||||
getHeaderInfosFromConf env = mapM getHeader
|
||||
where
|
||||
getHeader :: QErrM m => HeaderConf -> m EventHeaderInfo
|
||||
getHeader hconf = case hconf of
|
||||
(HeaderConf _ (HVValue val)) -> return $ EventHeaderInfo hconf val
|
||||
(HeaderConf _ (HVEnv val)) -> do
|
||||
envVal <- getEnv env val
|
||||
return $ EventHeaderInfo hconf envVal
|
||||
|
||||
getWebhookInfoFromConf
|
||||
:: QErrM m
|
||||
=> Env.Environment
|
||||
-> WebhookConf
|
||||
-> m WebhookConfInfo
|
||||
getWebhookInfoFromConf env wc = case wc of
|
||||
WCValue w -> do
|
||||
resolvedWebhook <- resolveWebhook env w
|
||||
return $ WebhookConfInfo wc $ unResolvedWebhook resolvedWebhook
|
||||
WCEnv we -> do
|
||||
envVal <- getEnv env we
|
||||
return $ WebhookConfInfo wc envVal
|
||||
|
||||
updateColumnInEventTrigger
|
||||
:: QualifiedTable
|
||||
-> PGCol
|
||||
|
@ -24,9 +24,6 @@ instance BackendAPI ('Postgres 'Vanilla) where
|
||||
|
||||
, commandParser "add_computed_field" RMAddComputedField
|
||||
, commandParser "drop_computed_field" RMDropComputedField
|
||||
|
||||
, commandParser "create_event_trigger" RMPgCreateEventTrigger
|
||||
, commandParser "delete_event_trigger" RMPgDeleteEventTrigger
|
||||
]
|
||||
]
|
||||
|
||||
|
@ -118,7 +118,6 @@ instance
|
||||
resolveSourceConfig = PG.resolveSourceConfig
|
||||
resolveDatabaseMetadata = PG.resolveDatabaseMetadata
|
||||
createTableEventTrigger = PG.createTableEventTrigger
|
||||
buildEventTriggerInfo = PG.buildEventTriggerInfo @pgKind
|
||||
parseBoolExpOperations = PG.parseBoolExpOperations
|
||||
buildFunctionInfo = PG.buildFunctionInfo
|
||||
updateColumnInEventTrigger = PG.updateColumnInEventTrigger
|
||||
|
@ -1,10 +1,12 @@
|
||||
module Hasura.Backends.Postgres.Translate.Column
|
||||
( toTxtValue
|
||||
, toJSONableExp
|
||||
) where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import Hasura.Backends.Postgres.SQL.DML
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Backends.Postgres.SQL.Value
|
||||
import Hasura.Backends.Postgres.Types.Column
|
||||
import Hasura.RQL.Types.Column
|
||||
@ -16,3 +18,19 @@ toTxtValue ColumnValue{..} =
|
||||
withTyAnn ty . withConstructorFn ty $ txtEncoder cvValue
|
||||
where
|
||||
ty = unsafePGColumnToBackend cvType
|
||||
|
||||
-- | Formats each columns to appropriate SQL expression
|
||||
toJSONableExp :: Bool -> ColumnType ('Postgres pgKind) -> Bool -> SQLExp -> SQLExp
|
||||
toJSONableExp stringifyNum colType asText expression
|
||||
-- If its a numeric column greater than a 32-bit integer, we have to stringify it as JSON spec doesn't support >32-bit integers
|
||||
| asText || (isScalarColumnWhere isBigNum colType && stringifyNum) =
|
||||
expression `SETyAnn` textTypeAnn
|
||||
-- If the column is either a `Geometry` or `Geography` then apply the `ST_AsGeoJSON` function to convert it into GeoJSON format
|
||||
| isScalarColumnWhere isGeoType colType =
|
||||
SEFnApp "ST_AsGeoJSON"
|
||||
[ expression
|
||||
, SEUnsafe "15" -- max decimal digits
|
||||
, SEUnsafe "4" -- to print out crs
|
||||
] Nothing
|
||||
`SETyAnn` jsonTypeAnn
|
||||
| otherwise = expression
|
||||
|
@ -25,6 +25,7 @@ import qualified Hasura.Backends.Postgres.SQL.DML as S
|
||||
import Hasura.Backends.Postgres.SQL.Rewrite
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Backends.Postgres.Translate.BoolExp
|
||||
import Hasura.Backends.Postgres.Translate.Column (toJSONableExp)
|
||||
import Hasura.Backends.Postgres.Translate.Types
|
||||
import Hasura.Base.Error
|
||||
import Hasura.EncJSON
|
||||
|
@ -135,13 +135,13 @@ import qualified Hasura.Backends.Postgres.SQL.DML as S
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
|
||||
import Hasura.Backends.Postgres.DDL.Table (getHeaderInfosFromConf)
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Base.Error
|
||||
import Hasura.Eventing.Common
|
||||
import Hasura.Eventing.HTTP
|
||||
import Hasura.Eventing.ScheduledTrigger.Types
|
||||
import Hasura.Metadata.Class
|
||||
import Hasura.RQL.DDL.EventTrigger (getHeaderInfosFromConf)
|
||||
import Hasura.RQL.DDL.Headers
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.SQL.Types
|
||||
|
@ -25,6 +25,7 @@ module Hasura.Logging
|
||||
, defaultEnabledEngineLogTypes
|
||||
, isEngineLogTypeEnabled
|
||||
, readLogTypes
|
||||
, getFormattedTime
|
||||
) where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
@ -9,28 +9,30 @@ module Hasura.RQL.DDL.EventTrigger
|
||||
, InvokeEventTriggerQuery
|
||||
, runInvokeEventTrigger
|
||||
-- TODO(from master): review
|
||||
, archiveEvents
|
||||
, getHeaderInfosFromConf
|
||||
, getWebhookInfoFromConf
|
||||
, buildEventTriggerInfo
|
||||
) where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.ByteString.Lazy as LBS
|
||||
import qualified Data.HashMap.Strict as HM
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Text.Regex.TDFA as TDFA
|
||||
import qualified Data.ByteString.Lazy as LBS
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as HM
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.Text as T
|
||||
import qualified Text.Regex.TDFA as TDFA
|
||||
|
||||
import Control.Lens ((.~))
|
||||
import Control.Lens ((.~))
|
||||
import Data.Aeson
|
||||
import Data.Text.Extended
|
||||
|
||||
import qualified Hasura.Backends.Postgres.DDL.Table as PG
|
||||
import qualified Hasura.SQL.AnyBackend as AB
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
import qualified Hasura.SQL.AnyBackend as AB
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
|
||||
import Hasura.Base.Error
|
||||
import Hasura.EncJSON
|
||||
import Hasura.RQL.DDL.Headers
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.RQL.Types.Eventing.Backend
|
||||
import Hasura.Session
|
||||
@ -130,15 +132,6 @@ instance Backend b => FromJSON (InvokeEventTriggerQuery b) where
|
||||
<*> o .:? "source" .!= defaultSource
|
||||
<*> o .: "payload"
|
||||
|
||||
|
||||
archiveEvents :: TriggerName -> Q.TxE QErr ()
|
||||
archiveEvents trn =
|
||||
Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
UPDATE hdb_catalog.event_log
|
||||
SET archived = 't'
|
||||
WHERE trigger_name = $1
|
||||
|] (Identity trn) False
|
||||
|
||||
resolveEventTriggerQuery
|
||||
:: forall b m
|
||||
. (Backend b, UserInfoM m, QErrM m, CacheRM m)
|
||||
@ -159,14 +152,14 @@ resolveEventTriggerQuery (CreateEventTriggerQuery source name qt insert update d
|
||||
return (ti, replace, EventTriggerConf name (TriggerOpsDef insert update delete enableManual) webhook webhookFromEnv rconf mheaders)
|
||||
where
|
||||
assertCols ti opSpec = onJust opSpec \sos -> case sosColumns sos of
|
||||
SubCStar -> return ()
|
||||
SubCArray pgcols -> forM_ pgcols (assertPGCol (_tciFieldInfoMap ti) "")
|
||||
SubCStar -> return ()
|
||||
SubCArray columns -> forM_ columns (assertColumnExists @b (_tciFieldInfoMap ti) "")
|
||||
|
||||
createEventTriggerQueryMetadata
|
||||
:: forall pgKind m
|
||||
. (BackendMetadata ('Postgres pgKind), QErrM m, UserInfoM m, CacheRWM m, MetadataM m)
|
||||
=> CreateEventTriggerQuery ('Postgres pgKind)
|
||||
-> m (TableCoreInfo ('Postgres pgKind), EventTriggerConf ('Postgres pgKind))
|
||||
:: forall b m
|
||||
. (BackendMetadata b, QErrM m, UserInfoM m, CacheRWM m, MetadataM m)
|
||||
=> CreateEventTriggerQuery b
|
||||
-> m (TableCoreInfo b, EventTriggerConf b)
|
||||
createEventTriggerQueryMetadata q = do
|
||||
(tableCoreInfo, replace, triggerConf) <- resolveEventTriggerQuery q
|
||||
let table = _cetqTable q
|
||||
@ -175,34 +168,33 @@ createEventTriggerQueryMetadata q = do
|
||||
metadataObj =
|
||||
MOSourceObjId source
|
||||
$ AB.mkAnyBackend
|
||||
$ SMOTableObj @('Postgres pgKind) table
|
||||
$ SMOTableObj @b table
|
||||
$ MTOTrigger triggerName
|
||||
buildSchemaCacheFor metadataObj
|
||||
$ MetadataModifier
|
||||
$ tableMetadataSetter @('Postgres pgKind) source table.tmEventTriggers %~
|
||||
$ tableMetadataSetter @b source table.tmEventTriggers %~
|
||||
if replace then ix triggerName .~ triggerConf
|
||||
else OMap.insert triggerName triggerConf
|
||||
pure (tableCoreInfo, triggerConf)
|
||||
|
||||
runCreateEventTriggerQuery
|
||||
:: forall pgKind m
|
||||
. (BackendMetadata ('Postgres pgKind), QErrM m, UserInfoM m, CacheRWM m, MetadataM m)
|
||||
=> CreateEventTriggerQuery ('Postgres pgKind)
|
||||
:: forall b m
|
||||
. (BackendMetadata b, QErrM m, UserInfoM m, CacheRWM m, MetadataM m)
|
||||
=> CreateEventTriggerQuery b
|
||||
-> m EncJSON
|
||||
runCreateEventTriggerQuery q = do
|
||||
void $ createEventTriggerQueryMetadata @pgKind q
|
||||
void $ createEventTriggerQueryMetadata @b q
|
||||
pure successMsg
|
||||
|
||||
runDeleteEventTriggerQuery
|
||||
:: forall pgKind m
|
||||
. (BackendMetadata ('Postgres pgKind), MonadError QErr m, CacheRWM m, MonadIO m, MetadataM m)
|
||||
=> DeleteEventTriggerQuery ('Postgres pgKind)
|
||||
:: forall b m
|
||||
. (BackendEventTrigger b, BackendMetadata b, MonadError QErr m, CacheRWM m, MonadIO m, MetadataM m)
|
||||
=> DeleteEventTriggerQuery b
|
||||
-> m EncJSON
|
||||
runDeleteEventTriggerQuery (DeleteEventTriggerQuery source name) = do
|
||||
-- liftTx $ delEventTriggerFromCatalog name
|
||||
sourceInfo <- askSourceInfo source
|
||||
let maybeTable = HM.lookup name $ HM.unions $
|
||||
flip map (HM.toList $ _siTables @('Postgres pgKind) sourceInfo) $ \(table, tableInfo) ->
|
||||
flip map (HM.toList $ _siTables @b sourceInfo) $ \(table, tableInfo) ->
|
||||
HM.map (const table) $ _tiEventTriggerInfoMap tableInfo
|
||||
table <- onNothing maybeTable $ throw400 NotExists $
|
||||
"event trigger with name " <> name <<> " does not exist"
|
||||
@ -210,11 +202,10 @@ runDeleteEventTriggerQuery (DeleteEventTriggerQuery source name) = do
|
||||
withNewInconsistentObjsCheck
|
||||
$ buildSchemaCache
|
||||
$ MetadataModifier
|
||||
$ tableMetadataSetter @('Postgres pgKind) source table %~ dropEventTriggerInMetadata name
|
||||
$ tableMetadataSetter @b source table %~ dropEventTriggerInMetadata name
|
||||
|
||||
dropTriggerAndArchiveEvents @b (_siConfiguration sourceInfo) name
|
||||
|
||||
liftEitherM $ liftIO $ runPgSourceWriteTx (_siConfiguration sourceInfo) $ do
|
||||
PG.delTriggerQ name
|
||||
archiveEvents name
|
||||
pure successMsg
|
||||
|
||||
dropEventTriggerInMetadata :: TriggerName -> TableMetadata b -> TableMetadata b
|
||||
@ -287,3 +278,84 @@ askEventTriggerInfo sourceName triggerName = do
|
||||
-- 63 - (notify_hasura_) - (_INSERT | _UPDATE | _DELETE)
|
||||
maxTriggerNameLength :: Int
|
||||
maxTriggerNameLength = 42
|
||||
|
||||
getHeaderInfosFromConf
|
||||
:: QErrM m
|
||||
=> Env.Environment
|
||||
-> [HeaderConf]
|
||||
-> m [EventHeaderInfo]
|
||||
getHeaderInfosFromConf env = mapM getHeader
|
||||
where
|
||||
getHeader :: QErrM m => HeaderConf -> m EventHeaderInfo
|
||||
getHeader hconf = case hconf of
|
||||
(HeaderConf _ (HVValue val)) -> return $ EventHeaderInfo hconf val
|
||||
(HeaderConf _ (HVEnv val)) -> do
|
||||
envVal <- getEnv env val
|
||||
return $ EventHeaderInfo hconf envVal
|
||||
|
||||
getWebhookInfoFromConf
|
||||
:: QErrM m
|
||||
=> Env.Environment
|
||||
-> WebhookConf
|
||||
-> m WebhookConfInfo
|
||||
getWebhookInfoFromConf env wc = case wc of
|
||||
WCValue w -> do
|
||||
resolvedWebhook <- resolveWebhook env w
|
||||
return $ WebhookConfInfo wc $ unResolvedWebhook resolvedWebhook
|
||||
WCEnv we -> do
|
||||
envVal <- getEnv env we
|
||||
return $ WebhookConfInfo wc envVal
|
||||
|
||||
buildEventTriggerInfo
|
||||
:: forall b m
|
||||
. (Backend b, QErrM m)
|
||||
=> Env.Environment
|
||||
-> SourceName
|
||||
-> TableName b
|
||||
-> EventTriggerConf b
|
||||
-> m (EventTriggerInfo b, [SchemaDependency])
|
||||
buildEventTriggerInfo env source tableName (EventTriggerConf name def webhook webhookFromEnv rconf mheaders) = do
|
||||
webhookConf <- case (webhook, webhookFromEnv) of
|
||||
(Just w, Nothing) -> return $ WCValue w
|
||||
(Nothing, Just wEnv) -> return $ WCEnv wEnv
|
||||
_ -> throw500 "expected webhook or webhook_from_env"
|
||||
let headerConfs = fromMaybe [] mheaders
|
||||
webhookInfo <- getWebhookInfoFromConf env webhookConf
|
||||
headerInfos <- getHeaderInfosFromConf env headerConfs
|
||||
let eTrigInfo = EventTriggerInfo name def rconf webhookInfo headerInfos
|
||||
tabDep = SchemaDependency
|
||||
(SOSourceObj source
|
||||
$ AB.mkAnyBackend
|
||||
$ SOITable @b tableName)
|
||||
DRParent
|
||||
pure (eTrigInfo, tabDep:getTrigDefDeps @b source tableName def)
|
||||
|
||||
getTrigDefDeps
|
||||
:: forall b
|
||||
. Backend b
|
||||
=> SourceName
|
||||
-> TableName b
|
||||
-> TriggerOpsDef b
|
||||
-> [SchemaDependency]
|
||||
getTrigDefDeps source tableName (TriggerOpsDef mIns mUpd mDel _) =
|
||||
mconcat $ catMaybes [ subsOpSpecDeps <$> mIns
|
||||
, subsOpSpecDeps <$> mUpd
|
||||
, subsOpSpecDeps <$> mDel
|
||||
]
|
||||
where
|
||||
subsOpSpecDeps :: SubscribeOpSpec b -> [SchemaDependency]
|
||||
subsOpSpecDeps os =
|
||||
let cols = getColsFromSub $ sosColumns os
|
||||
mkColDependency dependencyReason col =
|
||||
SchemaDependency
|
||||
(SOSourceObj source
|
||||
$ AB.mkAnyBackend
|
||||
$ SOITableObj @b tableName (TOCol @b col))
|
||||
dependencyReason
|
||||
colDeps = map (mkColDependency DRColumn) cols
|
||||
payload = maybe [] getColsFromSub (sosPayload os)
|
||||
payloadDeps = map (mkColDependency DRPayload) payload
|
||||
in colDeps <> payloadDeps
|
||||
getColsFromSub sc = case sc of
|
||||
SubCStar -> []
|
||||
SubCArray cols -> cols
|
||||
|
@ -24,14 +24,16 @@ import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd.Extended as OMap
|
||||
import qualified Data.HashSet as HS
|
||||
import qualified Data.List as L
|
||||
import qualified Data.TByteString as TBS
|
||||
|
||||
import Control.Lens ((.~), (^?))
|
||||
import Data.Aeson
|
||||
import Data.Has (Has, getter)
|
||||
import Data.Text.Extended ((<<>))
|
||||
|
||||
import qualified Hasura.Logging as HL
|
||||
import qualified Hasura.SQL.AnyBackend as AB
|
||||
|
||||
import Hasura.Backends.Postgres.DDL.Table (delTriggerQ)
|
||||
import Hasura.Metadata.Class
|
||||
import Hasura.RQL.DDL.Action
|
||||
import Hasura.RQL.DDL.ComputedField
|
||||
@ -51,6 +53,7 @@ import Hasura.Base.Error
|
||||
import Hasura.EncJSON
|
||||
import Hasura.RQL.DDL.Metadata.Types
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.RQL.Types.Eventing.Backend (BackendEventTrigger (..))
|
||||
import Hasura.Server.Types (ExperimentalFeature (..))
|
||||
|
||||
|
||||
@ -60,6 +63,8 @@ runClearMetadata
|
||||
, MetadataM m
|
||||
, HasServerConfigCtx m
|
||||
, MonadMetadataStorageQueryAPI m
|
||||
, MonadReader r m
|
||||
, Has (HL.Logger HL.Hasura) r
|
||||
)
|
||||
=> ClearMetadata -> m EncJSON
|
||||
runClearMetadata _ = do
|
||||
@ -82,7 +87,7 @@ runClearMetadata _ = do
|
||||
& metaSources %~ OMap.insert defaultSource emptyDefaultSource
|
||||
runReplaceMetadataV1 $ RMWithSources emptyMetadata'
|
||||
|
||||
{- Note [Clear postgres schema for dropped triggers]
|
||||
{- Note [Cleanup for dropped triggers]
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
There was an issue (https://github.com/hasura/graphql-engine/issues/5461)
|
||||
fixed (via https://github.com/hasura/graphql-engine/pull/6137) related to
|
||||
@ -90,7 +95,7 @@ event triggers while replacing metadata in the catalog prior to metadata
|
||||
separation. The metadata separation solves the issue naturally, since the
|
||||
'hdb_catalog.event_triggers' table is no more in use and new/updated event
|
||||
triggers are processed in building schema cache. But we need to drop the
|
||||
pg trigger and archive events for dropped event triggers. This is handled
|
||||
database trigger and archive events for dropped event triggers. This is handled
|
||||
explicitly in @'runReplaceMetadata' function.
|
||||
-}
|
||||
|
||||
@ -102,6 +107,8 @@ runReplaceMetadata
|
||||
, MonadIO m
|
||||
, MonadMetadataStorageQueryAPI m
|
||||
, HasServerConfigCtx m
|
||||
, MonadReader r m
|
||||
, Has (HL.Logger HL.Hasura) r
|
||||
)
|
||||
=> ReplaceMetadata -> m EncJSON
|
||||
runReplaceMetadata = \case
|
||||
@ -115,22 +122,27 @@ runReplaceMetadataV1
|
||||
, MonadIO m
|
||||
, MonadMetadataStorageQueryAPI m
|
||||
, HasServerConfigCtx m
|
||||
, MonadReader r m
|
||||
, Has (HL.Logger HL.Hasura) r
|
||||
)
|
||||
=> ReplaceMetadataV1 -> m EncJSON
|
||||
runReplaceMetadataV1 =
|
||||
(successMsg <$) . runReplaceMetadataV2 . ReplaceMetadataV2 NoAllowInconsistentMetadata
|
||||
|
||||
runReplaceMetadataV2
|
||||
:: forall m
|
||||
:: forall m r
|
||||
. ( QErrM m
|
||||
, CacheRWM m
|
||||
, MetadataM m
|
||||
, MonadIO m
|
||||
, HasServerConfigCtx m
|
||||
, MonadMetadataStorageQueryAPI m
|
||||
, MonadReader r m
|
||||
, Has (HL.Logger HL.Hasura) r
|
||||
)
|
||||
=> ReplaceMetadataV2 -> m EncJSON
|
||||
runReplaceMetadataV2 ReplaceMetadataV2{..} = do
|
||||
logger :: (HL.Logger HL.Hasura) <- asks getter
|
||||
-- we drop all the future cron trigger events before inserting the new metadata
|
||||
-- and re-populating future cron events below
|
||||
experimentalFeatures <- _sccExperimentalFeatures <$> askServerConfigCtx
|
||||
@ -180,14 +192,11 @@ runReplaceMetadataV2 ReplaceMetadataV2{..} = do
|
||||
for_ cronTriggersToBeAdded $ \CronTriggerMetadata {..} ->
|
||||
populateInitialCronTriggerEvents ctSchedule ctName
|
||||
|
||||
-- See Note [Clear postgres schema for dropped triggers]
|
||||
dropPostgresTriggers (getOnlyPGSources oldMetadata) (getOnlyPGSources metadata)
|
||||
-- See Note [Cleanup for dropped triggers]
|
||||
dropSourceSQLTriggers logger (_metaSources oldMetadata) (_metaSources metadata)
|
||||
|
||||
encJFromJValue . formatInconsistentObjs . scInconsistentObjs <$> askSchemaCache
|
||||
where
|
||||
getOnlyPGSources :: Metadata -> InsOrdHashMap SourceName (SourceMetadata ('Postgres 'Vanilla))
|
||||
getOnlyPGSources = OMap.mapMaybe AB.unpackAnyBackend . _metaSources
|
||||
|
||||
{- Note [Cron triggers behaviour with replace metadata]
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
@ -243,33 +252,50 @@ runReplaceMetadataV2 ReplaceMetadataV2{..} = do
|
||||
pure $ allNewCronTriggers <> oldCronTriggersNotIncludedInMetadata
|
||||
pure $ (cronTriggers, cronTriggersToBeAdded)
|
||||
|
||||
|
||||
dropPostgresTriggers
|
||||
:: InsOrdHashMap SourceName (SourceMetadata ('Postgres 'Vanilla)) -- ^ old pg sources
|
||||
-> InsOrdHashMap SourceName (SourceMetadata ('Postgres 'Vanilla)) -- ^ new pg sources
|
||||
dropSourceSQLTriggers
|
||||
:: HL.Logger HL.Hasura
|
||||
-> InsOrdHashMap SourceName BackendSourceMetadata -- ^ old sources
|
||||
-> InsOrdHashMap SourceName BackendSourceMetadata -- ^ new sources
|
||||
-> m ()
|
||||
dropPostgresTriggers oldSources newSources =
|
||||
for_ (OMap.toList newSources) $ \(source, newSourceCache) ->
|
||||
onJust (OMap.lookup source oldSources) $ \oldSourceCache -> do
|
||||
let oldTriggersMap = getPGTriggersMap oldSourceCache
|
||||
newTriggersMap = getPGTriggersMap newSourceCache
|
||||
droppedTriggers = OMap.keys $ oldTriggersMap `OMap.difference` newTriggersMap
|
||||
catcher e@QErr{ qeCode }
|
||||
| qeCode == Unexpected = pure () -- NOTE: This information should be returned by the inconsistent_metadata response, so doesn't need additional logging.
|
||||
| otherwise = throwError e -- rethrow other errors
|
||||
dropSourceSQLTriggers (HL.Logger logger) oldSources newSources = do
|
||||
-- NOTE: the current implementation of this function has an edge case.
|
||||
-- The edge case is that when a `SourceA` which contained some event triggers
|
||||
-- is modified to point to a new database, this function will try to drop the
|
||||
-- SQL triggers of the dropped event triggers on the new database which doesn't exist.
|
||||
-- In the current implementation, this doesn't throw an error because the trigger is dropped
|
||||
-- using `DROP IF EXISTS..` meaning this silently fails without throwing an error.
|
||||
for_ (OMap.toList newSources) $ \(source, newBackendSourceMetadata) -> do
|
||||
onJust (OMap.lookup source oldSources) $ \oldBackendSourceMetadata ->
|
||||
compose source newBackendSourceMetadata oldBackendSourceMetadata \(newSourceMetadata :: SourceMetadata b) -> do
|
||||
dispatch oldBackendSourceMetadata \oldSourceMetadata -> do
|
||||
let oldTriggersMap = getTriggersMap oldSourceMetadata
|
||||
newTriggersMap = getTriggersMap newSourceMetadata
|
||||
droppedTriggers = OMap.keys $ oldTriggersMap `OMap.difference` newTriggersMap
|
||||
catcher e@QErr{ qeCode }
|
||||
| qeCode == Unexpected = pure () -- NOTE: This information should be returned by the inconsistent_metadata response, so doesn't need additional logging.
|
||||
| otherwise = throwError e -- rethrow other errors
|
||||
|
||||
-- This will swallow Unexpected exceptions for sources if allow_inconsistent_metadata is enabled
|
||||
-- This should be ok since if the sources are already missing from the cache then they should
|
||||
-- not need to be removed.
|
||||
--
|
||||
-- TODO: Determine if any errors should be thrown from askSourceConfig at all if the errors are just being discarded
|
||||
return $
|
||||
flip catchError catcher do
|
||||
sourceConfig <- askSourceConfig @b source
|
||||
for_ droppedTriggers $ dropTriggerAndArchiveEvents @b sourceConfig
|
||||
|
||||
-- This will swallow Unexpected exceptions for sources if allow_inconsistent_metadata is enabled
|
||||
-- This should be ok since if the sources are already missing from the cache then they should
|
||||
-- not need to be removed.
|
||||
--
|
||||
-- TODO: Determine if any errors should be thrown from askSourceConfig at all if the errors are just being discarded
|
||||
flip catchError catcher do
|
||||
sourceConfig <- askSourceConfig @('Postgres 'Vanilla) source
|
||||
for_ droppedTriggers $
|
||||
\name ->
|
||||
liftIO $ runPgSourceWriteTx sourceConfig $ delTriggerQ name >> archiveEvents name
|
||||
where
|
||||
getPGTriggersMap = OMap.unions . map _tmEventTriggers . OMap.elems . _smTables
|
||||
getTriggersMap = OMap.unions . map _tmEventTriggers . OMap.elems . _smTables
|
||||
|
||||
dispatch = AB.dispatchAnyBackend @BackendEventTrigger
|
||||
|
||||
compose
|
||||
:: SourceName
|
||||
-> AB.AnyBackend i
|
||||
-> AB.AnyBackend i
|
||||
-> (forall b. BackendEventTrigger b => i b -> i b -> m ()) -> m ()
|
||||
compose sourceName x y f = AB.composeAnyBackend @BackendEventTrigger f x y (logger $ HL.UnstructuredLog HL.LevelInfo $ TBS.fromText $ "Event trigger clean up couldn't be done on the source " <> sourceName <<> " because it has changed its type")
|
||||
|
||||
processExperimentalFeatures :: HasServerConfigCtx m => Metadata -> m Metadata
|
||||
processExperimentalFeatures metadata = do
|
||||
|
@ -10,21 +10,21 @@ module Hasura.RQL.DDL.ScheduledTrigger
|
||||
, populateInitialCronTriggerEvents
|
||||
) where
|
||||
|
||||
import System.Cron.Types (CronSchedule)
|
||||
import System.Cron.Types (CronSchedule)
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.Time.Clock as C
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.Time.Clock as C
|
||||
|
||||
import Hasura.Backends.Postgres.DDL.Table (getHeaderInfosFromConf)
|
||||
import Hasura.Base.Error
|
||||
import Hasura.EncJSON
|
||||
import Hasura.Eventing.ScheduledTrigger
|
||||
import Hasura.Metadata.Class
|
||||
import Hasura.RQL.DDL.EventTrigger (getHeaderInfosFromConf)
|
||||
import Hasura.RQL.Types
|
||||
|
||||
populateInitialCronTriggerEvents
|
||||
|
@ -58,6 +58,7 @@ import Hasura.GraphQL.Schema (buildGQLContext)
|
||||
import Hasura.Metadata.Class
|
||||
import Hasura.RQL.DDL.Action
|
||||
import Hasura.RQL.DDL.CustomTypes
|
||||
import Hasura.RQL.DDL.EventTrigger (buildEventTriggerInfo)
|
||||
import Hasura.RQL.DDL.InheritedRoles (resolveInheritedRole)
|
||||
import Hasura.RQL.DDL.RemoteRelationship (PartiallyResolvedSource (..))
|
||||
import Hasura.RQL.DDL.RemoteSchema
|
||||
|
@ -86,7 +86,7 @@ validateCountQWith sessVarBldr prepValBldr (CountQuery qt _ mDistCols mWhere) =
|
||||
|
||||
forM_ mDistCols $ \distCols -> do
|
||||
let distColAsrns = [ checkSelOnCol selPerm
|
||||
, assertPGCol colInfoMap relInDistColsErr]
|
||||
, assertColumnExists colInfoMap relInDistColsErr]
|
||||
withPathK "distinct" $ verifyAsrns distColAsrns distCols
|
||||
|
||||
-- convert the where clause
|
||||
|
@ -350,19 +350,6 @@ dmlTxErrorHandler = mkTxErrorHandler $ \case
|
||||
, PGInvalidColumnReference ]
|
||||
_ -> False
|
||||
|
||||
toJSONableExp :: Bool -> ColumnType ('Postgres pgKind) -> Bool -> S.SQLExp -> S.SQLExp
|
||||
toJSONableExp strfyNum colTy asText expn
|
||||
| asText || (isScalarColumnWhere isBigNum colTy && strfyNum) =
|
||||
expn `S.SETyAnn` S.textTypeAnn
|
||||
| isScalarColumnWhere isGeoType colTy =
|
||||
S.SEFnApp "ST_AsGeoJSON"
|
||||
[ expn
|
||||
, S.SEUnsafe "15" -- max decimal digits
|
||||
, S.SEUnsafe "4" -- to print out crs
|
||||
] Nothing
|
||||
`S.SETyAnn` S.jsonTypeAnn
|
||||
| otherwise = expn
|
||||
|
||||
-- validate headers
|
||||
validateHeaders :: (UserInfoM m, QErrM m) => HashSet Text -> m ()
|
||||
validateHeaders depHeaders = do
|
||||
|
@ -23,7 +23,7 @@ module Hasura.RQL.Types
|
||||
, askTableCoreInfoSource
|
||||
, askFieldInfoMap
|
||||
, askFieldInfoMapSource
|
||||
, assertPGCol
|
||||
, assertColumnExists
|
||||
, askRelType
|
||||
, askComputedFieldInfo
|
||||
, askRemoteRel
|
||||
@ -288,14 +288,15 @@ askComputedFieldInfo fields computedField = do
|
||||
, computedField <<> " is a " <> fieldType <> "; "
|
||||
]
|
||||
|
||||
assertPGCol :: (MonadError QErr m, Backend backend)
|
||||
=> FieldInfoMap (FieldInfo backend)
|
||||
-> Text
|
||||
-> Column backend
|
||||
-> m ()
|
||||
assertPGCol m msg c = do
|
||||
_ <- askColInfo m c msg
|
||||
return ()
|
||||
assertColumnExists
|
||||
:: forall backend m
|
||||
. (MonadError QErr m, Backend backend)
|
||||
=> FieldInfoMap (FieldInfo backend)
|
||||
-> Text
|
||||
-> Column backend
|
||||
-> m ()
|
||||
assertColumnExists m msg c = do
|
||||
void $ askColInfo m c msg
|
||||
|
||||
askRelType :: (MonadError QErr m)
|
||||
=> FieldInfoMap (FieldInfo backend)
|
||||
|
@ -40,22 +40,38 @@ class Backend b => BackendEventTrigger (b :: BackendType) where
|
||||
-> EventId
|
||||
-> m ()
|
||||
|
||||
-- | @dropTriggerAndArchiveEvents@ drops the database trigger and
|
||||
-- marks all the events related to the event trigger as archived.
|
||||
-- See Note [Cleanup for dropped triggers]
|
||||
dropTriggerAndArchiveEvents
|
||||
:: ( MonadIO m
|
||||
, MonadError QErr m
|
||||
)
|
||||
=> SourceConfig b
|
||||
-> TriggerName
|
||||
-> m ()
|
||||
|
||||
instance BackendEventTrigger ('Postgres 'Vanilla) where
|
||||
insertManualEvent = PG.insertManualEvent
|
||||
redeliverEvent = PG.redeliverEvent
|
||||
dropTriggerAndArchiveEvents = PG.dropTriggerAndArchiveEvents
|
||||
|
||||
instance BackendEventTrigger ('Postgres 'Citus) where
|
||||
insertManualEvent _ _ _ _ _ _ = throw400 NotSupported $ "Event triggers are not supported for Citus sources"
|
||||
redeliverEvent _ _ = throw400 NotSupported $ "Event triggers are not supported for Citus sources"
|
||||
dropTriggerAndArchiveEvents _ _ = throw400 NotSupported $ "Event triggers are not supported for Citus sources"
|
||||
|
||||
instance BackendEventTrigger 'MSSQL where
|
||||
insertManualEvent _ _ _ _ _ _ = throw400 NotSupported "Event triggers are not supported for MS-SQL sources"
|
||||
redeliverEvent _ _ = throw400 NotSupported "Event triggers are not supported for MS-SQL sources"
|
||||
dropTriggerAndArchiveEvents _ _ = throw400 NotSupported $ "Event triggers are not supported for MS-SQL sources"
|
||||
|
||||
instance BackendEventTrigger 'BigQuery where
|
||||
insertManualEvent _ _ _ _ _ _ = throw400 NotSupported "Event triggers are not supported for BigQuery sources"
|
||||
redeliverEvent _ _ = throw400 NotSupported "Event triggers are not supported for BigQuery sources"
|
||||
dropTriggerAndArchiveEvents _ _ = throw400 NotSupported $ "Event triggers are not supported for BigQUery sources"
|
||||
|
||||
instance BackendEventTrigger 'MySQL where
|
||||
insertManualEvent _ _ _ _ _ _ = throw400 NotSupported "Event triggers are not supported for MySQL sources"
|
||||
redeliverEvent _ _ = throw400 NotSupported "Event triggers are not supported for MySQL sources"
|
||||
dropTriggerAndArchiveEvents _ _ = throw400 NotSupported $ "Event triggers are not supported for MySQL sources"
|
||||
|
@ -71,14 +71,6 @@ class ( Backend b
|
||||
-> TriggerOpsDef b
|
||||
-> m (Either QErr ())
|
||||
|
||||
buildEventTriggerInfo
|
||||
:: MonadError QErr m
|
||||
=> Env.Environment
|
||||
-> SourceName
|
||||
-> TableName b
|
||||
-> EventTriggerConf b
|
||||
-> m (EventTriggerInfo b, [SchemaDependency])
|
||||
|
||||
parseBoolExpOperations
|
||||
:: (MonadError QErr m, TableCoreInfoRM b m)
|
||||
=> ValueParser b m v
|
||||
|
@ -10,6 +10,7 @@ module Hasura.SQL.AnyBackend
|
||||
, dispatchAnyBackend
|
||||
, dispatchAnyBackend'
|
||||
, dispatchAnyBackendArrow
|
||||
, dispatchAnyBackendWithTwoConstraints
|
||||
, unpackAnyBackend
|
||||
, composeAnyBackend
|
||||
, runBackend
|
||||
@ -272,6 +273,19 @@ dispatchAnyBackend
|
||||
-> r
|
||||
dispatchAnyBackend e f = $(mkDispatch 'f 'e)
|
||||
|
||||
dispatchAnyBackendWithTwoConstraints
|
||||
:: forall
|
||||
(c1 :: BackendType -> Constraint)
|
||||
(c2 :: BackendType -> Constraint)
|
||||
(i :: BackendType -> Type)
|
||||
(r :: Type)
|
||||
. AllBackendsSatisfy c1
|
||||
=> AllBackendsSatisfy c2
|
||||
=> AnyBackend i
|
||||
-> (forall (b :: BackendType). c1 b => c2 b => i b -> r)
|
||||
-> r
|
||||
dispatchAnyBackendWithTwoConstraints e f = $(mkDispatch 'f 'e)
|
||||
|
||||
-- | Unlike 'dispatchAnyBackend', the expected constraint has a different kind.
|
||||
-- Use for classes like 'Show', 'ToJSON', etc.
|
||||
dispatchAnyBackend'
|
||||
|
@ -92,5 +92,7 @@ remoteRelationshipCommands =
|
||||
]
|
||||
eventTriggerCommands =
|
||||
[ commandParser "invoke_event_trigger" $ RMInvokeEventTrigger . mkAnyBackend @b
|
||||
, commandParser "create_event_trigger" $ RMCreateEventTrigger . mkAnyBackend @b
|
||||
, commandParser "delete_event_trigger" $ RMDeleteEventTrigger . mkAnyBackend @b
|
||||
, commandParser "redeliver_event" $ RMRedeliverEvent . mkAnyBackend @b
|
||||
]
|
||||
|
@ -105,11 +105,9 @@ data RQLMetadataV1
|
||||
| RMAddComputedField !(AddComputedField ('Postgres 'Vanilla))
|
||||
| RMDropComputedField !(DropComputedField ('Postgres 'Vanilla))
|
||||
|
||||
-- Tables event triggers (PG-specific)
|
||||
| RMPgCreateEventTrigger !(CreateEventTriggerQuery ('Postgres 'Vanilla))
|
||||
| RMPgDeleteEventTrigger !(DeleteEventTriggerQuery ('Postgres 'Vanilla))
|
||||
|
||||
-- Event Trigger APIs
|
||||
-- Tables event triggers
|
||||
| RMCreateEventTrigger !(AnyBackend CreateEventTriggerQuery)
|
||||
| RMDeleteEventTrigger !(AnyBackend DeleteEventTriggerQuery)
|
||||
| RMRedeliverEvent !(AnyBackend RedeliverEventQuery)
|
||||
| RMInvokeEventTrigger !(AnyBackend InvokeEventTriggerQuery)
|
||||
|
||||
@ -466,8 +464,8 @@ runMetadataQueryV1M env currentResourceVersion = \case
|
||||
RMAddComputedField q -> runAddComputedField q
|
||||
RMDropComputedField q -> runDropComputedField q
|
||||
|
||||
RMPgCreateEventTrigger q -> runCreateEventTriggerQuery q
|
||||
RMPgDeleteEventTrigger q -> runDeleteEventTriggerQuery q
|
||||
RMCreateEventTrigger q -> dispatchMetadata runCreateEventTriggerQuery q
|
||||
RMDeleteEventTrigger q -> dispatchMetadataAndEventTrigger runDeleteEventTriggerQuery q
|
||||
RMRedeliverEvent q -> dispatchEventTrigger runRedeliverEvent q
|
||||
RMInvokeEventTrigger q -> dispatchEventTrigger runInvokeEventTrigger q
|
||||
|
||||
@ -541,7 +539,11 @@ runMetadataQueryV1M env currentResourceVersion = \case
|
||||
dispatchEventTrigger :: (forall b. BackendEventTrigger b => i b -> a) -> AnyBackend i -> a
|
||||
dispatchEventTrigger f x = dispatchAnyBackend @BackendEventTrigger x f
|
||||
|
||||
|
||||
dispatchMetadataAndEventTrigger
|
||||
:: (forall b. (BackendMetadata b, BackendEventTrigger b) => i b -> a)
|
||||
-> AnyBackend i
|
||||
-> a
|
||||
dispatchMetadataAndEventTrigger f x = dispatchAnyBackendWithTwoConstraints @BackendMetadata @BackendEventTrigger x f
|
||||
|
||||
runMetadataQueryV2M
|
||||
:: ( MonadIO m
|
||||
@ -549,6 +551,8 @@ runMetadataQueryV2M
|
||||
, MetadataM m
|
||||
, MonadMetadataStorageQueryAPI m
|
||||
, HasServerConfigCtx m
|
||||
, MonadReader r m
|
||||
, Has (L.Logger L.Hasura) r
|
||||
)
|
||||
=> MetadataResourceVersion
|
||||
-> RQLMetadataV2
|
||||
|
@ -64,10 +64,10 @@ data RQLMetadataV1
|
||||
| RMAddComputedField !(AddComputedField ('Postgres 'Vanilla))
|
||||
| RMDropComputedField !(DropComputedField ('Postgres 'Vanilla))
|
||||
|
||||
-- Tables event triggers (PG-specific)
|
||||
| RMPgCreateEventTrigger !(CreateEventTriggerQuery ('Postgres 'Vanilla))
|
||||
| RMPgDeleteEventTrigger !(DeleteEventTriggerQuery ('Postgres 'Vanilla))
|
||||
| RMRedeliverEvent !(AnyBackend RedeliverEventQuery)
|
||||
-- Tables event triggers
|
||||
| RMCreateEventTrigger !(AnyBackend CreateEventTriggerQuery)
|
||||
| RMDeleteEventTrigger !(AnyBackend DeleteEventTriggerQuery)
|
||||
| RMRedeliverEvent !(AnyBackend RedeliverEventQuery)
|
||||
| RMInvokeEventTrigger !(AnyBackend InvokeEventTriggerQuery)
|
||||
|
||||
-- Remote schemas
|
||||
|
@ -13,8 +13,10 @@ import Control.Monad.Unique
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import Data.Has (Has)
|
||||
import Network.HTTP.Client.Extended
|
||||
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
|
||||
import Hasura.Backends.Postgres.DDL.RunSQL
|
||||
@ -182,12 +184,13 @@ runQuery
|
||||
, MonadQueryTags m
|
||||
)
|
||||
=> Env.Environment
|
||||
-> L.Logger L.Hasura
|
||||
-> InstanceId
|
||||
-> UserInfo -> RebuildableSchemaCache -> HTTP.Manager
|
||||
-> ServerConfigCtx -> RQLQuery -> m (EncJSON, RebuildableSchemaCache)
|
||||
runQuery env instanceId userInfo sc hMgr serverConfigCtx query = do
|
||||
runQuery env logger instanceId userInfo sc hMgr serverConfigCtx query = do
|
||||
(metadata, currentResourceVersion) <- fetchMetadata
|
||||
result <- runQueryM env query & Tracing.interpTraceT \x -> do
|
||||
result <- runReaderT (runQueryM env query) logger & Tracing.interpTraceT \x -> do
|
||||
(((js, tracemeta), meta), rsc, ci) <-
|
||||
x & runMetadataT metadata
|
||||
& runCacheRWT sc
|
||||
@ -355,6 +358,8 @@ runQueryM
|
||||
, MetadataM m
|
||||
, MonadMetadataStorageQueryAPI m
|
||||
, MonadQueryTags m
|
||||
, MonadReader r m
|
||||
, Has (L.Logger L.Hasura) r
|
||||
)
|
||||
=> Env.Environment
|
||||
-> RQLQuery
|
||||
|
@ -443,10 +443,10 @@ v1QueryHandler query = do
|
||||
(liftEitherM . authorizeV1QueryApi query) =<< ask
|
||||
scRef <- asks (scCacheRef . hcServerCtx)
|
||||
logger <- asks (scLogger . hcServerCtx)
|
||||
res <- bool (fst <$> action) (withSCUpdate scRef logger Nothing action) $ queryModifiesSchemaCache query
|
||||
res <- bool (fst <$> (action logger)) (withSCUpdate scRef logger Nothing (action logger)) $ queryModifiesSchemaCache query
|
||||
return $ HttpResponse res []
|
||||
where
|
||||
action = do
|
||||
action logger = do
|
||||
userInfo <- asks hcUser
|
||||
scRef <- asks (scCacheRef . hcServerCtx)
|
||||
schemaCache <- fmap fst $ liftIO $ readIORef $ _scrCache scRef
|
||||
@ -459,7 +459,7 @@ v1QueryHandler query = do
|
||||
maintenanceMode <- asks (scEnableMaintenanceMode . hcServerCtx)
|
||||
experimentalFeatures <- asks (scExperimentalFeatures . hcServerCtx)
|
||||
let serverConfigCtx = ServerConfigCtx functionPermsCtx remoteSchemaPermsCtx sqlGenCtx maintenanceMode experimentalFeatures
|
||||
runQuery env instanceId userInfo schemaCache httpMgr
|
||||
runQuery env logger instanceId userInfo schemaCache httpMgr
|
||||
serverConfigCtx query
|
||||
|
||||
v1MetadataHandler
|
||||
|
@ -4,6 +4,7 @@ module Hasura.Server.MigrateSpec (CacheRefT(..), spec) where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.ByteString.Lazy.UTF8 as LBS
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Network.HTTP.Client.Extended as HTTP
|
||||
@ -13,12 +14,14 @@ import Control.Monad.Morph
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Control.Monad.Unique
|
||||
import Control.Natural ((:~>) (..))
|
||||
import Data.Aeson (encode)
|
||||
import Data.Time.Clock (getCurrentTime)
|
||||
import Test.Hspec.Core.Spec
|
||||
import Test.Hspec.Expectations.Lifted
|
||||
|
||||
import Hasura.Backends.Postgres.Connection
|
||||
import Hasura.Base.Error
|
||||
import Hasura.Logging
|
||||
import Hasura.Metadata.Class
|
||||
import Hasura.RQL.DDL.Metadata (ClearMetadata (..), runClearMetadata)
|
||||
import Hasura.RQL.DDL.Schema
|
||||
@ -137,6 +140,10 @@ spec srcConfig pgExecCtx pgConnInfo = do
|
||||
|
||||
describe "recreateSystemMetadata" $ do
|
||||
let dumpMetadata = execPGDump (PGDumpReqBody defaultSource ["--schema=hdb_catalog"] False) pgConnInfo
|
||||
logger :: Logger Hasura = Logger $ \l -> do
|
||||
let (logLevel, logType :: EngineLogType Hasura, logDetail) = toEngineLog l
|
||||
t <- liftIO $ getFormattedTime Nothing
|
||||
liftIO $ putStrLn $ LBS.toString $ encode $ EngineLog t logLevel logType logDetail
|
||||
|
||||
it "is idempotent" \(NT transact) -> do
|
||||
env <- Env.getEnvironment
|
||||
@ -157,7 +164,7 @@ spec srcConfig pgExecCtx pgConnInfo = do
|
||||
time <- getCurrentTime
|
||||
transact (dropAndInit env time) `shouldReturn` MRInitialized
|
||||
firstDump <- transact dumpMetadata
|
||||
transact (runClearMetadata ClearMetadata) `shouldReturn` successMsg
|
||||
transact (flip runReaderT logger $ runClearMetadata ClearMetadata) `shouldReturn` successMsg
|
||||
secondDump <- transact dumpMetadata
|
||||
secondDump `shouldBe` firstDump
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user