Remove explicit case on the backend tag in Cache

### Description

As part of the cache building process, we create / update / migrate the catalog that each DB uses as a place to store event trigger information. The function that decides how this should be done was doing an explicit `case ... of` on the backend tag, instead of delegating to one of the backend classes. The downsides of this is that:
- it adds a "friction point" where the backend matters in the core of the engine, which is otherwise written to be almost entirely backend-agnostic
- it creates imports from deep in the engine to the `Backends`, which we try to restrict to a very small set of clearly identified files (the `Instances` files)
- it is currently implemented using a "catch all" default case, which might not always be correct for new backends

This PR makes the catalog updating process a part of `BackendMetadata`, and cleans the corresponding schema cache code.

PR-URL: https://github.com/hasura/graphql-engine-mono/pull/4457
GitOrigin-RevId: 592f0eaa97a7c38f4e6d4400e1d2353aab12c97e
This commit is contained in:
Antoine Leblanc 2022-05-05 14:43:50 +01:00 committed by hasura-bot
parent 97ac9cbcdc
commit 04d8f068b6
10 changed files with 45 additions and 45 deletions

View File

@ -3,10 +3,13 @@
module Hasura.Backends.BigQuery.Instances.Metadata () where
import Hasura.Backends.BigQuery.DDL qualified as BigQuery
import Hasura.Prelude
import Hasura.RQL.Types.EventTrigger (RecreateEventTriggers (RETDoNothing))
import Hasura.RQL.Types.Metadata.Backend
import Hasura.SQL.Backend
instance BackendMetadata 'BigQuery where
prepareCatalog = const $ pure RETDoNothing
buildComputedFieldInfo = BigQuery.buildComputedFieldInfo
fetchAndValidateEnumValues = BigQuery.fetchAndValidateEnumValues
resolveSourceConfig = BigQuery.resolveSourceConfig

View File

@ -22,6 +22,7 @@ import Hasura.Prelude
import Hasura.RQL.IR.BoolExp (OpExpG (..), PartialSQLExp (..))
import Hasura.RQL.Types.Column qualified as RQL.T.C
import Hasura.RQL.Types.Common (OID (..), SourceName)
import Hasura.RQL.Types.EventTrigger (RecreateEventTriggers (RETDoNothing))
import Hasura.RQL.Types.Metadata (SourceMetadata (..))
import Hasura.RQL.Types.Metadata.Backend (BackendMetadata (..))
import Hasura.RQL.Types.Source (ResolvedSource (..))
@ -38,6 +39,7 @@ import Servant.Client (AsClientT)
import Witch qualified
instance BackendMetadata 'DataConnector where
prepareCatalog = const $ pure RETDoNothing
resolveSourceConfig = resolveSourceConfig'
resolveDatabaseMetadata = resolveDatabaseMetadata'
parseBoolExpOperations = parseBoolExpOperations'

View File

@ -13,7 +13,7 @@ module Hasura.Backends.MSSQL.DDL.Source
( resolveSourceConfig,
resolveDatabaseMetadata,
postDropSourceHook,
initCatalogForSource,
prepareCatalog,
)
where
@ -107,12 +107,14 @@ doesTableExist tableName = do
qualifiedTable = qualifyTableName tableName
-- | Initialise catalog tables for a source, including those required by the event delivery subsystem.
initCatalogForSource :: MonadMSSQLTx m => m RecreateEventTriggers
initCatalogForSource = do
prepareCatalog ::
(MonadIO m, MonadBaseControl IO m) =>
MSSQLSourceConfig ->
ExceptT QErr m RecreateEventTriggers
prepareCatalog sourceConfig = mssqlRunReadWrite (_mscExecCtx sourceConfig) do
hdbCatalogExist <- doesSchemaExist "hdb_catalog"
eventLogTableExist <- doesTableExist $ TableName "event_log" "hdb_catalog"
sourceVersionTableExist <- doesTableExist $ TableName "hdb_source_catalog_version" "hdb_catalog"
if
-- Fresh database
| not hdbCatalogExist -> liftMSSQLTx do

View File

@ -11,6 +11,7 @@ import Hasura.RQL.Types.Metadata.Backend
import Hasura.SQL.Backend
instance BackendMetadata 'MSSQL where
prepareCatalog = MSSQL.prepareCatalog
buildComputedFieldInfo = MSSQL.buildComputedFieldInfo
fetchAndValidateEnumValues = MSSQL.fetchAndValidateEnumValues
resolveSourceConfig = MSSQL.resolveSourceConfig

View File

@ -4,10 +4,12 @@ module Hasura.Backends.MySQL.Instances.Metadata () where
import Hasura.Backends.MySQL.Connection qualified as MySQL
import Hasura.Prelude
import Hasura.RQL.Types.EventTrigger (RecreateEventTriggers (RETDoNothing))
import Hasura.RQL.Types.Metadata.Backend
import Hasura.SQL.Backend
instance BackendMetadata 'MySQL where
prepareCatalog = const $ pure RETDoNothing
buildComputedFieldInfo = error "buildComputedFieldInfo: MySQL backend does not support this operation yet."
fetchAndValidateEnumValues = error "fetchAndValidateEnumValues: MySQL backend does not support this operation yet."
resolveSourceConfig = MySQL.resolveSourceConfig

View File

@ -15,7 +15,7 @@ module Hasura.Backends.Postgres.DDL.Source
( ToMetadataFetchQuery,
fetchTableMetadata,
fetchFunctionMetadata,
initCatalogForSource,
prepareCatalog,
postDropSourceHook,
resolveDatabaseMetadata,
resolveSourceConfig,
@ -33,7 +33,7 @@ import Data.HashMap.Strict qualified as Map
import Data.HashMap.Strict.InsOrd qualified as OMap
import Data.List.Extended qualified as LE
import Data.List.NonEmpty qualified as NE
import Data.Time.Clock (UTCTime)
import Data.Time.Clock (UTCTime, getCurrentTime)
import Database.PG.Query qualified as Q
import Hasura.Backends.Postgres.Connection
import Hasura.Backends.Postgres.DDL.Source.Version
@ -164,13 +164,14 @@ resolveDatabaseMetadata sourceMetadata sourceConfig sourceCustomization = runExc
map (_cfdFunction . _cfmDefinition) . OMap.elems . _tmComputedFields
-- | Initialise catalog tables for a source, including those required by the event delivery subsystem.
initCatalogForSource ::
forall m. MonadTx m => UTCTime -> m RecreateEventTriggers
initCatalogForSource migrationTime = do
prepareCatalog ::
(MonadIO m, MonadBaseControl IO m) =>
SourceConfig ('Postgres pgKind) ->
ExceptT QErr m RecreateEventTriggers
prepareCatalog sourceConfig = runTx (_pscExecCtx sourceConfig) Q.ReadWrite do
hdbCatalogExist <- doesSchemaExist "hdb_catalog"
eventLogTableExist <- doesTableExist "hdb_catalog" "event_log"
sourceVersionTableExist <- doesTableExist "hdb_catalog" "hdb_source_catalog_version"
if
-- Fresh database
| not hdbCatalogExist -> liftTx do
@ -219,6 +220,7 @@ initCatalogForSource migrationTime = do
case NE.nonEmpty neededMigrations of
Just nonEmptyNeededMigrations -> do
-- Migrations aren't empty. We need to update the catalog version after migrations
migrationTime <- liftIO getCurrentTime
liftTx $ traverse_ snd nonEmptyNeededMigrations
setCatalogVersion "43" migrationTime
Nothing ->

View File

@ -119,6 +119,7 @@ instance
) =>
BackendMetadata ('Postgres pgKind)
where
prepareCatalog = PG.prepareCatalog
buildComputedFieldInfo = PG.buildComputedFieldInfo
fetchAndValidateEnumValues = PG.fetchAndValidateEnumValues
resolveSourceConfig = PG.resolveSourceConfig

View File

@ -38,12 +38,6 @@ import Data.Proxy
import Data.Set qualified as S
import Data.Text.Extended
import Data.These (These (..))
import Data.Time.Clock (getCurrentTime)
import Database.PG.Query qualified as Q
import Hasura.Backends.MSSQL.Connection
import Hasura.Backends.MSSQL.DDL.Source qualified as MSSQL
import Hasura.Backends.Postgres.Connection
import Hasura.Backends.Postgres.DDL.Source (initCatalogForSource)
import Hasura.Base.Error
import Hasura.GraphQL.Execute.Types
import Hasura.GraphQL.Schema (buildGQLContext)
@ -97,7 +91,6 @@ import Hasura.SQL.Backend
import Hasura.SQL.BackendMap (BackendMap)
import Hasura.SQL.BackendMap qualified as BackendMap
import Hasura.SQL.Tag
import Hasura.SQL.Tag qualified as Tag
import Hasura.Server.Types
import Hasura.Session
import Hasura.Tracing qualified as Tracing
@ -175,7 +168,6 @@ newtype CacheRWT m a
MonadIO,
MonadReader r,
MonadError e,
MonadTx,
UserInfoM,
HasHttpManagerM,
MonadMetadataStorage,
@ -446,7 +438,6 @@ buildSchemaCacheRule logger env = proc (metadata, invalidationKeys) -> do
-< do
if numEventTriggers > 0
then do
migrationTime <- liftIO getCurrentTime
maintenanceMode <- _sccMaintenanceMode <$> askServerConfigCtx
eventingMode <- _sccEventingMode <$> askServerConfigCtx
readOnlyMode <- _sccReadOnlyMode <$> askServerConfigCtx
@ -459,18 +450,6 @@ buildSchemaCacheRule logger env = proc (metadata, invalidationKeys) -> do
-- when maintenance mode is enabled, don't perform any migrations
| maintenanceMode == (MaintenanceModeEnabled ()) -> pure RETDoNothing
| otherwise -> do
let initCatalogAction =
case backendTag @b of
Tag.PostgresVanillaTag -> do
runExceptT $ runTx (_pscExecCtx sourceConfig) Q.ReadWrite (initCatalogForSource migrationTime)
Tag.MSSQLTag -> do
runExceptT $
mssqlRunReadWrite (_mscExecCtx sourceConfig) MSSQL.initCatalogForSource
-- TODO: When event triggers are supported on new databases,
-- the initialization of the source catalog should also return
-- if the event triggers are to be re-created or not, essentially
-- replacing the `RETDoNothing` below
_ -> pure $ Right RETDoNothing
-- The `initCatalogForSource` action is retried here because
-- in cloud there will be multiple workers (graphql-engine instances)
-- trying to migrate the source catalog, when needed. This introduces
@ -486,7 +465,7 @@ buildSchemaCacheRule logger env = proc (metadata, invalidationKeys) -> do
<> Retry.limitRetries 3
)
(const $ return . isLeft)
(const initCatalogAction)
(const $ runExceptT $ prepareCatalog @b sourceConfig)
else pure RETDoNothing
buildSource ::

View File

@ -83,9 +83,7 @@ class Backend b => BackendEventTrigger (b :: BackendType) where
-- | Ad-hoc function to set a retry for an undelivered event
setRetry ::
( MonadIO m,
MonadError QErr m
) =>
(MonadIO m, MonadError QErr m) =>
SourceConfig b ->
Event b ->
Time.UTCTime ->
@ -95,9 +93,7 @@ class Backend b => BackendEventTrigger (b :: BackendType) where
-- | @getMaintenanceModeVersion@ gets the source catalog version from the
-- source
getMaintenanceModeVersion ::
( MonadIO m,
MonadError QErr m
) =>
(MonadIO m, MonadError QErr m) =>
SourceConfig b ->
m MaintenanceModeVersion
@ -150,9 +146,7 @@ class Backend b => BackendEventTrigger (b :: BackendType) where
-- marks all the events related to the event trigger as archived.
-- See Note [Cleanup for dropped triggers]
dropTriggerAndArchiveEvents ::
( MonadIO m,
MonadError QErr m
) =>
(MonadIO m, MonadError QErr m) =>
SourceConfig b ->
TriggerName ->
TableName b ->
@ -166,9 +160,7 @@ class Backend b => BackendEventTrigger (b :: BackendType) where
-- case, we need to drop the trigger created by us earlier for the INSERT
-- trigger.
dropDanglingSQLTrigger ::
( MonadIO m,
MonadError QErr m
) =>
(MonadIO m, MonadError QErr m) =>
SourceConfig b ->
TriggerName ->
TableName b ->
@ -208,6 +200,13 @@ class Backend b => BackendEventTrigger (b :: BackendType) where
Maybe (PrimaryKey b (ColumnInfo b)) ->
m (Either QErr ())
--------------------------------------------------------------------------------
-- TODO: move those instances to 'Backend/*/Instances/Eventing' and create a
-- corresponding 'Instances.hs' file in this directory to import them, similarly
-- to how we import instances for other backend classes. This would
-- significantly reduce the number of files in the core engine that end up
-- depending / importing backend-specific files.
instance BackendEventTrigger ('Postgres 'Vanilla) where
insertManualEvent = PG.insertManualEvent
fetchUndeliveredEvents = PG.fetchUndeliveredEvents

View File

@ -121,4 +121,13 @@ class
TableName b ->
Either (ObjRelDef b) (ArrRelDef b) ->
m ()
validateRelationship = \_ _ _ -> pure ()
validateRelationship _ _ _ = pure ()
-- | Run all operations required to create, update, or migrate the internal
-- catalog used by the backend for internal bookkeeping, if any. The return
-- type indicates whether the performed operations subsequently require
-- re-creating event trigers.
prepareCatalog ::
(MonadIO m, MonadBaseControl IO m) =>
SourceConfig b ->
ExceptT QErr m RecreateEventTriggers