mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-15 09:22:43 +03:00
Refactor common logic out of metadata processing
This commit is contained in:
parent
8ef205fba5
commit
895f244a67
@ -207,7 +207,7 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||
|
||||
-- remote schemas
|
||||
let remoteSchemaInvalidationKeys = Inc.selectD #_ikRemoteSchemas invalidationKeys
|
||||
remoteSchemaMap <- buildRemoteSchemas -< (remoteSchemas, remoteSchemaInvalidationKeys)
|
||||
remoteSchemaMap <- buildRemoteSchemas -< (remoteSchemaInvalidationKeys, remoteSchemas)
|
||||
|
||||
returnA -< BuildOutputs
|
||||
{ _boTables = tableCache
|
||||
@ -254,14 +254,7 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Inc.ArrowCache m arr, MonadIO m, MonadTx m, MonadReader BuildReason m, HasSQLGenCtx m )
|
||||
=> (TableCoreInfo, [CatalogEventTrigger]) `arr` EventTriggerInfoMap
|
||||
buildTableEventTriggers = proc (tableInfo, eventTriggers) ->
|
||||
(\infos -> M.catMaybes infos >- returnA) <-<
|
||||
(| Inc.keyed (\_ duplicateEventTriggers -> do
|
||||
maybeEventTrigger <- noDuplicates mkEventTriggerMetadataObject -< duplicateEventTriggers
|
||||
(\info -> join info >- returnA) <-<
|
||||
(| traverseA (\eventTrigger -> buildEventTrigger -< (tableInfo, eventTrigger))
|
||||
|) maybeEventTrigger)
|
||||
|) (M.groupOn _cetName eventTriggers)
|
||||
buildTableEventTriggers = buildInfoMap _cetName mkEventTriggerMetadataObject buildEventTrigger
|
||||
where
|
||||
buildEventTrigger = proc (tableInfo, eventTrigger) -> do
|
||||
let CatalogEventTrigger qt trn configuration = eventTrigger
|
||||
@ -288,53 +281,44 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||
buildRemoteSchemas
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Inc.ArrowCache m arr , MonadIO m, HasHttpManager m )
|
||||
=> ( [AddRemoteSchemaQuery]
|
||||
, Inc.Dependency (HashMap RemoteSchemaName Inc.InvalidationKey)
|
||||
) `arr` HashMap RemoteSchemaName (AddRemoteSchemaQuery, RemoteSchemaCtx)
|
||||
buildRemoteSchemas = proc (remoteSchemas, invalidationKeys) ->
|
||||
-- TODO: Extract common code between this and buildTableEventTriggers
|
||||
(\infos -> returnA -< M.catMaybes infos) <-<
|
||||
(| Inc.keyed (\_ duplicateRemoteSchemas -> do
|
||||
maybeRemoteSchema <- noDuplicates mkRemoteSchemaMetadataObject -< duplicateRemoteSchemas
|
||||
(\info -> returnA -< join info) <-<
|
||||
(| traverseA (\remoteSchema -> buildRemoteSchema -< (remoteSchema, invalidationKeys))
|
||||
|) maybeRemoteSchema)
|
||||
|) (M.groupOn _arsqName remoteSchemas)
|
||||
=> ( Inc.Dependency (HashMap RemoteSchemaName Inc.InvalidationKey)
|
||||
, [AddRemoteSchemaQuery]
|
||||
) `arr` HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject)
|
||||
buildRemoteSchemas =
|
||||
buildInfoMapPreservingMetadata _arsqName mkRemoteSchemaMetadataObject buildRemoteSchema
|
||||
where
|
||||
-- We want to cache this call because it fetches the remote schema over HTTP, and we don’t
|
||||
-- want to re-run that if the remote schema definition hasn’t changed.
|
||||
buildRemoteSchema = Inc.cache proc (remoteSchema, invalidationKeys) -> do
|
||||
buildRemoteSchema = Inc.cache proc (invalidationKeys, remoteSchema) -> do
|
||||
Inc.dependOn -< Inc.selectKeyD (_arsqName remoteSchema) invalidationKeys
|
||||
(| withRecordInconsistency (do
|
||||
remoteGQLSchema <- liftEitherA <<< bindA -<
|
||||
runExceptT $ addRemoteSchemaP2Setup remoteSchema
|
||||
returnA -< (remoteSchema, remoteGQLSchema))
|
||||
(| withRecordInconsistency (liftEitherA <<< bindA -<
|
||||
runExceptT $ addRemoteSchemaP2Setup remoteSchema)
|
||||
|) (mkRemoteSchemaMetadataObject remoteSchema)
|
||||
|
||||
-- Builds the GraphQL schema and merges in remote schemas. This function is kind of gross, as
|
||||
-- it’s possible for the remote schema merging to fail, at which point we have to mark them
|
||||
-- inconsistent. This means we have to accumulate the consistent remote schemas as we go, in
|
||||
-- addition to the build GraphQL context.
|
||||
-- addition to the built GraphQL context.
|
||||
buildGQLSchema
|
||||
:: ( ArrowChoice arr, ArrowWriter (Seq InconsistentMetadata) arr, ArrowKleisli m arr
|
||||
, MonadError QErr m )
|
||||
=> ( TableCache
|
||||
, FunctionCache
|
||||
, HashMap RemoteSchemaName (AddRemoteSchemaQuery, RemoteSchemaCtx)
|
||||
, HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject)
|
||||
) `arr` (RemoteSchemaMap, GS.GCtxMap, GS.GCtx)
|
||||
buildGQLSchema = proc (tableCache, functionCache, remoteSchemas) -> do
|
||||
baseGQLSchema <- bindA -< GS.mkGCtxMap tableCache functionCache
|
||||
(| foldlA' (\(remoteSchemaMap, gqlSchemas, remoteGQLSchemas) (remoteSchema, remoteGQLSchema) ->
|
||||
(| foldlA' (\(remoteSchemaMap, gqlSchemas, remoteGQLSchemas)
|
||||
(remoteSchemaName, (remoteSchema, metadataObject)) ->
|
||||
(| withRecordInconsistency (do
|
||||
let gqlSchema = convRemoteGCtx $ rscGCtx remoteGQLSchema
|
||||
let gqlSchema = convRemoteGCtx $ rscGCtx remoteSchema
|
||||
mergedGQLSchemas <- bindErrorA -< mergeRemoteSchema gqlSchemas gqlSchema
|
||||
mergedRemoteGQLSchemas <- bindErrorA -< mergeGCtx remoteGQLSchemas gqlSchema
|
||||
let mergedRemoteSchemaMap =
|
||||
M.insert (_arsqName remoteSchema) remoteGQLSchema remoteSchemaMap
|
||||
let mergedRemoteSchemaMap = M.insert remoteSchemaName remoteSchema remoteSchemaMap
|
||||
returnA -< (mergedRemoteSchemaMap, mergedGQLSchemas, mergedRemoteGQLSchemas))
|
||||
|) (mkRemoteSchemaMetadataObject remoteSchema)
|
||||
|) metadataObject
|
||||
>-> (| onNothingA ((remoteSchemaMap, gqlSchemas, remoteGQLSchemas) >- returnA) |))
|
||||
|) (M.empty, baseGQLSchema, GC.emptyGCtx) (M.elems remoteSchemas)
|
||||
|) (M.empty, baseGQLSchema, GC.emptyGCtx) (M.toList remoteSchemas)
|
||||
|
||||
|
||||
-- | @'withMetadataCheck' cascade action@ runs @action@ and checks if the schema changed as a
|
||||
|
@ -7,7 +7,7 @@ module Hasura.RQL.DDL.Schema.Cache.Common where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.HashMap.Strict.Extended as M
|
||||
import qualified Data.HashSet as HS
|
||||
import qualified Data.Sequence as Seq
|
||||
|
||||
@ -55,9 +55,9 @@ data BuildOutputs
|
||||
= BuildOutputs
|
||||
{ _boTables :: !TableCache
|
||||
, _boFunctions :: !FunctionCache
|
||||
, _boRemoteSchemas :: !(HashMap RemoteSchemaName (AddRemoteSchemaQuery, RemoteSchemaCtx))
|
||||
-- ^ We preserve the 'AddRemoteSchemaQuery' from the original catalog metadata in the output so we
|
||||
-- can reuse it later if we need to mark the remote schema inconsistent during GraphQL schema
|
||||
, _boRemoteSchemas :: !(HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject))
|
||||
-- ^ We preserve the 'MetadataObject' from the original catalog metadata in the output so we can
|
||||
-- reuse it later if we need to mark the remote schema inconsistent during GraphQL schema
|
||||
-- generation (because of field conflicts).
|
||||
, _boAllowlist :: !(HS.HashSet GQLQuery)
|
||||
} deriving (Show, Eq)
|
||||
@ -104,5 +104,38 @@ noDuplicates mkMetadataObject = proc values -> case values of
|
||||
returnA -< Nothing
|
||||
{-# INLINABLE noDuplicates #-}
|
||||
|
||||
-- | Processes a list of catalog metadata into a map of processed information, marking any duplicate
|
||||
-- entries inconsistent.
|
||||
buildInfoMap
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Eq k, Hashable k )
|
||||
=> (a -> k)
|
||||
-> (a -> MetadataObject)
|
||||
-> (e, a) `arr` Maybe b
|
||||
-> (e, [a]) `arr` HashMap k b
|
||||
buildInfoMap extractKey mkMetadataObject buildInfo = proc (e, infos) ->
|
||||
(M.groupOn extractKey infos >- returnA)
|
||||
>-> (| Inc.keyed (\_ duplicateInfos ->
|
||||
(duplicateInfos >- noDuplicates mkMetadataObject)
|
||||
>-> (| traverseA (\info -> (e, info) >- buildInfo) |)
|
||||
>-> (\info -> join info >- returnA)) |)
|
||||
>-> (\infoMap -> M.catMaybes infoMap >- returnA)
|
||||
{-# INLINABLE buildInfoMap #-}
|
||||
|
||||
-- | Like 'buildInfo', but includes each processed info’s associated 'MetadataObject' in the result.
|
||||
-- This is useful if the results will be further processed, and the 'MetadataObject' is still needed
|
||||
-- to mark the object inconsistent.
|
||||
buildInfoMapPreservingMetadata
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Eq k, Hashable k )
|
||||
=> (a -> k)
|
||||
-> (a -> MetadataObject)
|
||||
-> (e, a) `arr` Maybe b
|
||||
-> (e, [a]) `arr` HashMap k (b, MetadataObject)
|
||||
buildInfoMapPreservingMetadata extractKey mkMetadataObject buildInfo =
|
||||
buildInfoMap extractKey mkMetadataObject proc (e, info) ->
|
||||
((e, info) >- buildInfo) >-> \result -> result <&> (, mkMetadataObject info) >- returnA
|
||||
{-# INLINABLE buildInfoMapPreservingMetadata #-}
|
||||
|
||||
addTableContext :: QualifiedTable -> Text -> Text
|
||||
addTableContext tableName e = "in table " <> tableName <<> ": " <> e
|
||||
|
@ -34,31 +34,19 @@ addNonColumnFields
|
||||
, [CatalogComputedField]
|
||||
) `arr` FieldInfoMap FieldInfo
|
||||
addNonColumnFields = proc (rawTableInfo, columns, relationships, computedFields) -> do
|
||||
let foreignKeys = _tciForeignKeys <$> rawTableInfo
|
||||
relationshipInfos <-
|
||||
(| Inc.keyed (\_ relationshipsByName -> do
|
||||
maybeRelationship <- noDuplicates mkRelationshipMetadataObject -< relationshipsByName
|
||||
(\info -> join info >- returnA) <-<
|
||||
(| traverseA (\relationship -> do
|
||||
info <- buildRelationship -< (foreignKeys, relationship)
|
||||
returnA -< info <&> (, mkRelationshipMetadataObject relationship))
|
||||
|) maybeRelationship)
|
||||
|) (M.groupOn _crRelName relationships)
|
||||
|
||||
let trackedTableNames = HS.fromList $ M.keys rawTableInfo
|
||||
computedFieldInfos <-
|
||||
(| Inc.keyed (\_ computedFieldsByName -> do
|
||||
maybeComputedField <- noDuplicates mkComputedFieldMetadataObject -< computedFieldsByName
|
||||
(\info -> join info >- returnA) <-<
|
||||
(| traverseA (\computedField -> do
|
||||
info <- buildComputedField -< (trackedTableNames, computedField)
|
||||
returnA -< info <&> (, mkComputedFieldMetadataObject computedField))
|
||||
|) maybeComputedField)
|
||||
|) (M.groupOn (_afcName . _cccComputedField) computedFields)
|
||||
relationshipInfos
|
||||
<- buildInfoMapPreservingMetadata _crRelName mkRelationshipMetadataObject buildRelationship
|
||||
-< (_tciForeignKeys <$> rawTableInfo, relationships)
|
||||
computedFieldInfos
|
||||
<- buildInfoMapPreservingMetadata
|
||||
(_afcName . _cccComputedField)
|
||||
mkComputedFieldMetadataObject
|
||||
buildComputedField
|
||||
-< (HS.fromList $ M.keys rawTableInfo, computedFields)
|
||||
|
||||
let mapKey f = M.fromList . map (first f) . M.toList
|
||||
relationshipFields = mapKey fromRel $ M.catMaybes relationshipInfos
|
||||
computedFieldFields = mapKey fromComputedField $ M.catMaybes computedFieldInfos
|
||||
relationshipFields = mapKey fromRel relationshipInfos
|
||||
computedFieldFields = mapKey fromComputedField computedFieldInfos
|
||||
|
||||
-- First, check for conflicts between non-column fields, since we can raise a better error
|
||||
-- message in terms of the two metadata objects that define them.
|
||||
|
Loading…
Reference in New Issue
Block a user