From 895f244a678260584671606d395e24bc5dbb14cd Mon Sep 17 00:00:00 2001 From: Alexis King Date: Thu, 30 Jan 2020 17:55:09 -0600 Subject: [PATCH] Refactor common logic out of metadata processing --- server/src-lib/Hasura/RQL/DDL/Schema/Cache.hs | 52 +++++++------------ .../Hasura/RQL/DDL/Schema/Cache/Common.hs | 41 +++++++++++++-- .../Hasura/RQL/DDL/Schema/Cache/Fields.hs | 34 ++++-------- 3 files changed, 66 insertions(+), 61 deletions(-) diff --git a/server/src-lib/Hasura/RQL/DDL/Schema/Cache.hs b/server/src-lib/Hasura/RQL/DDL/Schema/Cache.hs index f4fc845a494..767f43bb0ff 100644 --- a/server/src-lib/Hasura/RQL/DDL/Schema/Cache.hs +++ b/server/src-lib/Hasura/RQL/DDL/Schema/Cache.hs @@ -207,7 +207,7 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do -- remote schemas let remoteSchemaInvalidationKeys = Inc.selectD #_ikRemoteSchemas invalidationKeys - remoteSchemaMap <- buildRemoteSchemas -< (remoteSchemas, remoteSchemaInvalidationKeys) + remoteSchemaMap <- buildRemoteSchemas -< (remoteSchemaInvalidationKeys, remoteSchemas) returnA -< BuildOutputs { _boTables = tableCache @@ -254,14 +254,7 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do :: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr , Inc.ArrowCache m arr, MonadIO m, MonadTx m, MonadReader BuildReason m, HasSQLGenCtx m ) => (TableCoreInfo, [CatalogEventTrigger]) `arr` EventTriggerInfoMap - buildTableEventTriggers = proc (tableInfo, eventTriggers) -> - (\infos -> M.catMaybes infos >- returnA) <-< - (| Inc.keyed (\_ duplicateEventTriggers -> do - maybeEventTrigger <- noDuplicates mkEventTriggerMetadataObject -< duplicateEventTriggers - (\info -> join info >- returnA) <-< - (| traverseA (\eventTrigger -> buildEventTrigger -< (tableInfo, eventTrigger)) - |) maybeEventTrigger) - |) (M.groupOn _cetName eventTriggers) + buildTableEventTriggers = buildInfoMap _cetName mkEventTriggerMetadataObject buildEventTrigger where buildEventTrigger = proc (tableInfo, eventTrigger) -> do let CatalogEventTrigger qt trn configuration = eventTrigger @@ -288,53 +281,44 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do buildRemoteSchemas :: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr , Inc.ArrowCache m arr , MonadIO m, HasHttpManager m ) - => ( [AddRemoteSchemaQuery] - , Inc.Dependency (HashMap RemoteSchemaName Inc.InvalidationKey) - ) `arr` HashMap RemoteSchemaName (AddRemoteSchemaQuery, RemoteSchemaCtx) - buildRemoteSchemas = proc (remoteSchemas, invalidationKeys) -> - -- TODO: Extract common code between this and buildTableEventTriggers - (\infos -> returnA -< M.catMaybes infos) <-< - (| Inc.keyed (\_ duplicateRemoteSchemas -> do - maybeRemoteSchema <- noDuplicates mkRemoteSchemaMetadataObject -< duplicateRemoteSchemas - (\info -> returnA -< join info) <-< - (| traverseA (\remoteSchema -> buildRemoteSchema -< (remoteSchema, invalidationKeys)) - |) maybeRemoteSchema) - |) (M.groupOn _arsqName remoteSchemas) + => ( Inc.Dependency (HashMap RemoteSchemaName Inc.InvalidationKey) + , [AddRemoteSchemaQuery] + ) `arr` HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject) + buildRemoteSchemas = + buildInfoMapPreservingMetadata _arsqName mkRemoteSchemaMetadataObject buildRemoteSchema where -- We want to cache this call because it fetches the remote schema over HTTP, and we don’t -- want to re-run that if the remote schema definition hasn’t changed. - buildRemoteSchema = Inc.cache proc (remoteSchema, invalidationKeys) -> do + buildRemoteSchema = Inc.cache proc (invalidationKeys, remoteSchema) -> do Inc.dependOn -< Inc.selectKeyD (_arsqName remoteSchema) invalidationKeys - (| withRecordInconsistency (do - remoteGQLSchema <- liftEitherA <<< bindA -< - runExceptT $ addRemoteSchemaP2Setup remoteSchema - returnA -< (remoteSchema, remoteGQLSchema)) + (| withRecordInconsistency (liftEitherA <<< bindA -< + runExceptT $ addRemoteSchemaP2Setup remoteSchema) |) (mkRemoteSchemaMetadataObject remoteSchema) -- Builds the GraphQL schema and merges in remote schemas. This function is kind of gross, as -- it’s possible for the remote schema merging to fail, at which point we have to mark them -- inconsistent. This means we have to accumulate the consistent remote schemas as we go, in - -- addition to the build GraphQL context. + -- addition to the built GraphQL context. buildGQLSchema :: ( ArrowChoice arr, ArrowWriter (Seq InconsistentMetadata) arr, ArrowKleisli m arr , MonadError QErr m ) => ( TableCache , FunctionCache - , HashMap RemoteSchemaName (AddRemoteSchemaQuery, RemoteSchemaCtx) + , HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject) ) `arr` (RemoteSchemaMap, GS.GCtxMap, GS.GCtx) buildGQLSchema = proc (tableCache, functionCache, remoteSchemas) -> do baseGQLSchema <- bindA -< GS.mkGCtxMap tableCache functionCache - (| foldlA' (\(remoteSchemaMap, gqlSchemas, remoteGQLSchemas) (remoteSchema, remoteGQLSchema) -> + (| foldlA' (\(remoteSchemaMap, gqlSchemas, remoteGQLSchemas) + (remoteSchemaName, (remoteSchema, metadataObject)) -> (| withRecordInconsistency (do - let gqlSchema = convRemoteGCtx $ rscGCtx remoteGQLSchema + let gqlSchema = convRemoteGCtx $ rscGCtx remoteSchema mergedGQLSchemas <- bindErrorA -< mergeRemoteSchema gqlSchemas gqlSchema mergedRemoteGQLSchemas <- bindErrorA -< mergeGCtx remoteGQLSchemas gqlSchema - let mergedRemoteSchemaMap = - M.insert (_arsqName remoteSchema) remoteGQLSchema remoteSchemaMap + let mergedRemoteSchemaMap = M.insert remoteSchemaName remoteSchema remoteSchemaMap returnA -< (mergedRemoteSchemaMap, mergedGQLSchemas, mergedRemoteGQLSchemas)) - |) (mkRemoteSchemaMetadataObject remoteSchema) + |) metadataObject >-> (| onNothingA ((remoteSchemaMap, gqlSchemas, remoteGQLSchemas) >- returnA) |)) - |) (M.empty, baseGQLSchema, GC.emptyGCtx) (M.elems remoteSchemas) + |) (M.empty, baseGQLSchema, GC.emptyGCtx) (M.toList remoteSchemas) -- | @'withMetadataCheck' cascade action@ runs @action@ and checks if the schema changed as a diff --git a/server/src-lib/Hasura/RQL/DDL/Schema/Cache/Common.hs b/server/src-lib/Hasura/RQL/DDL/Schema/Cache/Common.hs index fc008381565..f15183d6911 100644 --- a/server/src-lib/Hasura/RQL/DDL/Schema/Cache/Common.hs +++ b/server/src-lib/Hasura/RQL/DDL/Schema/Cache/Common.hs @@ -7,7 +7,7 @@ module Hasura.RQL.DDL.Schema.Cache.Common where import Hasura.Prelude -import qualified Data.HashMap.Strict as M +import qualified Data.HashMap.Strict.Extended as M import qualified Data.HashSet as HS import qualified Data.Sequence as Seq @@ -55,9 +55,9 @@ data BuildOutputs = BuildOutputs { _boTables :: !TableCache , _boFunctions :: !FunctionCache - , _boRemoteSchemas :: !(HashMap RemoteSchemaName (AddRemoteSchemaQuery, RemoteSchemaCtx)) - -- ^ We preserve the 'AddRemoteSchemaQuery' from the original catalog metadata in the output so we - -- can reuse it later if we need to mark the remote schema inconsistent during GraphQL schema + , _boRemoteSchemas :: !(HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject)) + -- ^ We preserve the 'MetadataObject' from the original catalog metadata in the output so we can + -- reuse it later if we need to mark the remote schema inconsistent during GraphQL schema -- generation (because of field conflicts). , _boAllowlist :: !(HS.HashSet GQLQuery) } deriving (Show, Eq) @@ -104,5 +104,38 @@ noDuplicates mkMetadataObject = proc values -> case values of returnA -< Nothing {-# INLINABLE noDuplicates #-} +-- | Processes a list of catalog metadata into a map of processed information, marking any duplicate +-- entries inconsistent. +buildInfoMap + :: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr + , Eq k, Hashable k ) + => (a -> k) + -> (a -> MetadataObject) + -> (e, a) `arr` Maybe b + -> (e, [a]) `arr` HashMap k b +buildInfoMap extractKey mkMetadataObject buildInfo = proc (e, infos) -> + (M.groupOn extractKey infos >- returnA) + >-> (| Inc.keyed (\_ duplicateInfos -> + (duplicateInfos >- noDuplicates mkMetadataObject) + >-> (| traverseA (\info -> (e, info) >- buildInfo) |) + >-> (\info -> join info >- returnA)) |) + >-> (\infoMap -> M.catMaybes infoMap >- returnA) +{-# INLINABLE buildInfoMap #-} + +-- | Like 'buildInfo', but includes each processed info’s associated 'MetadataObject' in the result. +-- This is useful if the results will be further processed, and the 'MetadataObject' is still needed +-- to mark the object inconsistent. +buildInfoMapPreservingMetadata + :: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr + , Eq k, Hashable k ) + => (a -> k) + -> (a -> MetadataObject) + -> (e, a) `arr` Maybe b + -> (e, [a]) `arr` HashMap k (b, MetadataObject) +buildInfoMapPreservingMetadata extractKey mkMetadataObject buildInfo = + buildInfoMap extractKey mkMetadataObject proc (e, info) -> + ((e, info) >- buildInfo) >-> \result -> result <&> (, mkMetadataObject info) >- returnA +{-# INLINABLE buildInfoMapPreservingMetadata #-} + addTableContext :: QualifiedTable -> Text -> Text addTableContext tableName e = "in table " <> tableName <<> ": " <> e diff --git a/server/src-lib/Hasura/RQL/DDL/Schema/Cache/Fields.hs b/server/src-lib/Hasura/RQL/DDL/Schema/Cache/Fields.hs index ea28aa64299..ae6f3bc48a6 100644 --- a/server/src-lib/Hasura/RQL/DDL/Schema/Cache/Fields.hs +++ b/server/src-lib/Hasura/RQL/DDL/Schema/Cache/Fields.hs @@ -34,31 +34,19 @@ addNonColumnFields , [CatalogComputedField] ) `arr` FieldInfoMap FieldInfo addNonColumnFields = proc (rawTableInfo, columns, relationships, computedFields) -> do - let foreignKeys = _tciForeignKeys <$> rawTableInfo - relationshipInfos <- - (| Inc.keyed (\_ relationshipsByName -> do - maybeRelationship <- noDuplicates mkRelationshipMetadataObject -< relationshipsByName - (\info -> join info >- returnA) <-< - (| traverseA (\relationship -> do - info <- buildRelationship -< (foreignKeys, relationship) - returnA -< info <&> (, mkRelationshipMetadataObject relationship)) - |) maybeRelationship) - |) (M.groupOn _crRelName relationships) - - let trackedTableNames = HS.fromList $ M.keys rawTableInfo - computedFieldInfos <- - (| Inc.keyed (\_ computedFieldsByName -> do - maybeComputedField <- noDuplicates mkComputedFieldMetadataObject -< computedFieldsByName - (\info -> join info >- returnA) <-< - (| traverseA (\computedField -> do - info <- buildComputedField -< (trackedTableNames, computedField) - returnA -< info <&> (, mkComputedFieldMetadataObject computedField)) - |) maybeComputedField) - |) (M.groupOn (_afcName . _cccComputedField) computedFields) + relationshipInfos + <- buildInfoMapPreservingMetadata _crRelName mkRelationshipMetadataObject buildRelationship + -< (_tciForeignKeys <$> rawTableInfo, relationships) + computedFieldInfos + <- buildInfoMapPreservingMetadata + (_afcName . _cccComputedField) + mkComputedFieldMetadataObject + buildComputedField + -< (HS.fromList $ M.keys rawTableInfo, computedFields) let mapKey f = M.fromList . map (first f) . M.toList - relationshipFields = mapKey fromRel $ M.catMaybes relationshipInfos - computedFieldFields = mapKey fromComputedField $ M.catMaybes computedFieldInfos + relationshipFields = mapKey fromRel relationshipInfos + computedFieldFields = mapKey fromComputedField computedFieldInfos -- First, check for conflicts between non-column fields, since we can raise a better error -- message in terms of the two metadata objects that define them.