Build the GraphQL context after pruning dangling dependents

fixes #3791
This commit is contained in:
Alexis King 2020-01-29 14:15:53 -06:00
parent 64485a0fd6
commit 3cdb3841e6
7 changed files with 197 additions and 76 deletions

View File

@ -1,27 +1,51 @@
-- | A simple implementation of /incremental build rules/, which can be used to avoid unnecessary
-- recomputation on incrementally-changing input. See 'Rule' for more details.
module Hasura.Incremental
( Rule
module Hasura.Incremental (
-- * The @Rule@ datatype
Rule
, Result
, build
, rebuild
, rebuildRule
, result
-- * Abstract interface
, ArrowDistribute(..)
, ArrowCache(..)
, MonadDepend(..)
, DependT
-- * Fine-grained dependencies
, Dependency
, Selector
, Select(Selector)
, selectD
, selectKeyD
, Cacheable(..)
, Accesses
-- * Cache invalidation
, InvalidationKey
, initialInvalidationKey
, invalidate
) where
import Hasura.Prelude
import Hasura.Incremental.Internal.Cache
import Hasura.Incremental.Internal.Dependency
import Hasura.Incremental.Internal.Rule
import Hasura.Incremental.Select
-- | A simple helper type that can be used to implement explicit cache invalidation. Internally,
-- each 'InvalidationKey' is a counter; 'initialInvalidationKey' starts the counter at 0 and
-- 'invalidate' increments it by 1. Two 'InvalidationKey's are equal iff they have the same internal
-- count, so depending on an 'InvalidationKey' provides a mechanism to force portions of the build
-- process to be reexecuted by calling 'invalidate' before running the build.
newtype InvalidationKey = InvalidationKey Int
deriving (Show, Eq, Cacheable)
initialInvalidationKey :: InvalidationKey
initialInvalidationKey = InvalidationKey 0
invalidate :: InvalidationKey -> InvalidationKey
invalidate (InvalidationKey n) = InvalidationKey (n + 1)

View File

@ -6,6 +6,7 @@ module Hasura.Incremental.Select
( Select(..)
, ConstS(..)
, selectKey
, FieldS(..)
, UniqueS
, newUniqueS
, DMapS(..)
@ -25,6 +26,10 @@ import qualified Data.HashMap.Strict as M
import Control.Monad.Unique
import Data.GADT.Compare
import Data.Kind
import Data.Proxy (Proxy (..))
import GHC.OverloadedLabels (IsLabel (..))
import GHC.Records (HasField (..))
import GHC.TypeLits (KnownSymbol, sameSymbol, symbolVal)
import Unsafe.Coerce (unsafeCoerce)
-- | The 'Select' class provides a way to access subparts of a product type using a reified
@ -34,10 +39,18 @@ import Unsafe.Coerce (unsafeCoerce)
--
-- This is useful to implement dependency tracking, since its possible to track in a reified form
-- exactly which parts of a data structure are used.
--
-- Instances of 'Select' can be automatically derived for record types (just define an empty
-- instance). The instance uses the magical 'HasField' constraints, and 'Selector's for the type can
-- be written using @OverloadedLabels@.
class (GCompare (Selector a)) => Select a where
type Selector a :: Type -> Type
select :: Selector a b -> a -> b
type Selector r = FieldS r
default select :: Selector a ~ FieldS a => Selector a b -> a -> b
select (FieldS (_ :: Proxy s)) = getField @s
instance (Eq k, Ord k, Hashable k) => Select (HashMap k v) where
type Selector (HashMap k v) = ConstS k (Maybe v)
select (ConstS k) = M.lookup k
@ -66,6 +79,29 @@ instance (Ord k) => GCompare (ConstS k a) where
EQ -> GEQ
GT -> GGT
data FieldS r a where
FieldS :: (KnownSymbol s, HasField s r a) => !(Proxy s) -> FieldS r a
instance (KnownSymbol s, HasField s r a) => IsLabel s (FieldS r a) where
fromLabel = FieldS (Proxy @s)
instance GEq (FieldS r) where
FieldS a `geq` FieldS b = case sameSymbol a b of
-- If two fields of the same record have the same name, then their fields fundamentally must
-- have the same type! However, unfortunately, `HasField` constraints use a functional
-- dependency to enforce this rather than a type family, and functional dependencies dont
-- provide evidence, so we have to use `unsafeCoerce` here. Yuck!
Just Refl -> Just (unsafeCoerce Refl)
Nothing -> Nothing
instance GCompare (FieldS r) where
FieldS a `gcompare` FieldS b = case sameSymbol a b of
-- See note about `HasField` and `unsafeCoerce` above.
Just Refl -> unsafeCoerce GEQ
Nothing
| symbolVal a < symbolVal b -> GLT
| otherwise -> GGT
-- | A 'UniqueS' is, as the name implies, a globally-unique 'Selector', which can be created using
-- 'newUniqueS'. If a value of type @'UniqueS' a@ is found to be equal (via 'geq') with another
-- value of type @'UniqueS' b@, then @a@ and @b@ must be the same type. This effectively allows the

View File

@ -1,4 +1,5 @@
{-# LANGUAGE Arrows #-}
{-# LANGUAGE Arrows #-}
{-# LANGUAGE OverloadedLabels #-}
{-| Top-level functions concerned specifically with operations on the schema cache, such as
rebuilding it from the catalog and incorporating schema changes. See the module documentation for
@ -60,8 +61,9 @@ buildRebuildableSchemaCache
=> m (RebuildableSchemaCache m)
buildRebuildableSchemaCache = do
catalogMetadata <- liftTx fetchCatalogData
result <- flip runReaderT CatalogSync $ Inc.build buildSchemaCacheRule (catalogMetadata, M.empty)
pure $ RebuildableSchemaCache (Inc.result result) M.empty (Inc.rebuildRule result)
result <- flip runReaderT CatalogSync $
Inc.build buildSchemaCacheRule (catalogMetadata, initialInvalidationKeys)
pure $ RebuildableSchemaCache (Inc.result result) initialInvalidationKeys (Inc.rebuildRule result)
newtype CacheRWT m a
= CacheRWT { unCacheRWT :: StateT (RebuildableSchemaCache m) m a }
@ -79,50 +81,65 @@ instance (Monad m) => TableCoreInfoRM (CacheRWT m)
instance (Monad m) => CacheRM (CacheRWT m) where
askSchemaCache = CacheRWT $ gets lastBuiltSchemaCache
instance (MonadIO m, MonadTx m, MonadUnique m) => CacheRWM (CacheRWT m) where
instance (MonadIO m, MonadTx m) => CacheRWM (CacheRWT m) where
buildSchemaCacheWithOptions buildReason = CacheRWT do
RebuildableSchemaCache _ invalidationMap rule <- get
RebuildableSchemaCache _ invalidationKeys rule <- get
catalogMetadata <- liftTx fetchCatalogData
result <- lift $ flip runReaderT buildReason $ Inc.build rule (catalogMetadata, invalidationMap)
result <- lift $ flip runReaderT buildReason $ Inc.build rule (catalogMetadata, invalidationKeys)
let schemaCache = Inc.result result
prunedInvalidationMap = pruneInvalidationMap schemaCache invalidationMap
put $! RebuildableSchemaCache schemaCache prunedInvalidationMap (Inc.rebuildRule result)
prunedInvalidationKeys = pruneInvalidationKeys schemaCache invalidationKeys
put $! RebuildableSchemaCache schemaCache prunedInvalidationKeys (Inc.rebuildRule result)
where
pruneInvalidationMap schemaCache = M.filterWithKey \name _ ->
-- Prunes invalidation keys that no longer exist in the schema to avoid leaking memory by
-- hanging onto unnecessary keys.
pruneInvalidationKeys schemaCache = over ikRemoteSchemas $ M.filterWithKey \name _ ->
M.member name (scRemoteSchemas schemaCache)
invalidateCachedRemoteSchema name = CacheRWT do
unique <- newUnique
assign (rscInvalidationMap . at name) (Just unique)
invalidateCachedRemoteSchema name =
CacheRWT $ modifying (rscInvalidationMap . ikRemoteSchemas . at name) $
Just . maybe Inc.initialInvalidationKey Inc.invalidate
buildSchemaCacheRule
-- Note: by supplying BuildReason via MonadReader, it does not participate in caching, which is
-- what we want!
:: ( HasVersion, ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
, MonadIO m, MonadTx m, MonadReader BuildReason m, HasHttpManager m, HasSQLGenCtx m )
=> (CatalogMetadata, InvalidationMap) `arr` SchemaCache
buildSchemaCacheRule = proc inputs -> do
(outputs, collectedInfo) <- runWriterA buildAndCollectInfo -< inputs
=> (CatalogMetadata, InvalidationKeys) `arr` SchemaCache
buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
invalidationKeysDep <- Inc.newDependency -< invalidationKeys
-- Step 1: Process metadata and collect dependency information.
(outputs, collectedInfo) <-
runWriterA buildAndCollectInfo -< (catalogMetadata, invalidationKeysDep)
let (inconsistentObjects, unresolvedDependencies) = partitionCollectedInfo collectedInfo
(resolvedOutputs, extraInconsistentObjects, resolvedDependencies) <-
-- Step 2: Resolve dependency information and drop dangling dependents.
(resolvedOutputs, dependencyInconsistentObjects, resolvedDependencies) <-
resolveDependencies -< (outputs, unresolvedDependencies)
-- Step 3: Build the GraphQL schema.
((remoteSchemaMap, gqlSchema, remoteGQLSchema), gqlSchemaInconsistentObjects)
<- runWriterA buildGQLSchema
-< (_boTables resolvedOutputs, _boFunctions resolvedOutputs, _boRemoteSchemas resolvedOutputs)
returnA -< SchemaCache
{ scTables = _boTables resolvedOutputs
, scFunctions = _boFunctions resolvedOutputs
, scRemoteSchemas = _boRemoteSchemas resolvedOutputs
, scRemoteSchemas = remoteSchemaMap
, scAllowlist = _boAllowlist resolvedOutputs
, scGCtxMap = _boGCtxMap resolvedOutputs
, scDefaultRemoteGCtx = _boDefaultRemoteGCtx resolvedOutputs
, scGCtxMap = gqlSchema
, scDefaultRemoteGCtx = remoteGQLSchema
, scDepMap = resolvedDependencies
, scInconsistentObjs = inconsistentObjects <> extraInconsistentObjects
, scInconsistentObjs =
inconsistentObjects <> dependencyInconsistentObjects <> toList gqlSchemaInconsistentObjects
}
where
buildAndCollectInfo
:: ( HasVersion, ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
, ArrowWriter (Seq CollectedInfo) arr, MonadIO m, MonadTx m, MonadReader BuildReason m
, HasHttpManager m, HasSQLGenCtx m )
=> (CatalogMetadata, InvalidationMap) `arr` BuildOutputs
buildAndCollectInfo = proc (catalogMetadata, invalidationMap) -> do
=> (CatalogMetadata, Inc.Dependency InvalidationKeys) `arr` BuildOutputs
buildAndCollectInfo = proc (catalogMetadata, invalidationKeys) -> do
let CatalogMetadata tables relationships permissions
eventTriggers remoteSchemas functions allowlistDefs
computedFields = catalogMetadata
@ -182,23 +199,15 @@ buildSchemaCacheRule = proc inputs -> do
& map (queryWithoutTypeNames . getGQLQuery . _lqQuery)
& HS.fromList
-- build GraphQL context with tables and functions
baseGQLSchema <- bindA -< GS.mkGCtxMap tableCache functionCache
-- remote schemas
let invalidatedRemoteSchemas = flip map remoteSchemas \remoteSchema ->
(M.lookup (_arsqName remoteSchema) invalidationMap, remoteSchema)
(remoteSchemaMap, gqlSchema, remoteGQLSchema) <-
(| foldlA' (\schemas schema -> (schemas, schema) >- addRemoteSchema)
|) (M.empty, baseGQLSchema, GC.emptyGCtx) invalidatedRemoteSchemas
let remoteSchemaInvalidationKeys = Inc.selectD #_ikRemoteSchemas invalidationKeys
remoteSchemaMap <- buildRemoteSchemas -< (remoteSchemas, remoteSchemaInvalidationKeys)
returnA -< BuildOutputs
{ _boTables = tableCache
, _boFunctions = functionCache
, _boRemoteSchemas = remoteSchemaMap
, _boAllowlist = allowList
, _boGCtxMap = gqlSchema
, _boDefaultRemoteGCtx = remoteGQLSchema
}
mkEventTriggerMetadataObject (CatalogEventTrigger qt trn configuration) =
@ -206,6 +215,9 @@ buildSchemaCacheRule = proc inputs -> do
definition = object ["table" .= qt, "configuration" .= configuration]
in MetadataObject objectId definition
mkRemoteSchemaMetadataObject remoteSchema =
MetadataObject (MORemoteSchema (_arsqName remoteSchema)) (toJSON remoteSchema)
-- Given a map of table info, “folds in” another map of information, accumulating inconsistent
-- metadata objects for any entries in the second map that dont appear in the first map. This
-- is used to “line up” the metadata for relationships, computed fields, permissions, etc. with
@ -267,24 +279,57 @@ buildSchemaCacheRule = proc inputs -> do
when (buildReason == CatalogUpdate) $
mkAllTriggersQ triggerName tableName (M.elems tableColumns) triggerDefinition
addRemoteSchema
:: ( HasVersion, ArrowChoice arr, ArrowWriter (Seq CollectedInfo) arr, ArrowKleisli m arr
, MonadIO m, HasHttpManager m )
=> ( (RemoteSchemaMap, GS.GCtxMap, GS.GCtx)
, (Maybe InvalidationKey, AddRemoteSchemaQuery)
buildRemoteSchemas
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
, Inc.ArrowCache m arr , MonadIO m, HasHttpManager m )
=> ( [AddRemoteSchemaQuery]
, Inc.Dependency (HashMap RemoteSchemaName Inc.InvalidationKey)
) `arr` HashMap RemoteSchemaName (AddRemoteSchemaQuery, RemoteSchemaCtx)
buildRemoteSchemas = proc (remoteSchemas, invalidationKeys) ->
-- TODO: Extract common code between this and buildTableEventTriggers
(\infos -> returnA -< M.catMaybes infos) <-<
(| Inc.keyed (\_ duplicateRemoteSchemas -> do
maybeRemoteSchema <- noDuplicates mkRemoteSchemaMetadataObject -< duplicateRemoteSchemas
(\info -> returnA -< join info) <-<
(| traverseA (\remoteSchema -> buildRemoteSchema -< (remoteSchema, invalidationKeys))
|) maybeRemoteSchema)
|) (M.groupOn _arsqName remoteSchemas)
where
-- We want to cache this call because it fetches the remote schema over HTTP, and we dont
-- want to re-run that if the remote schema definition hasnt changed.
buildRemoteSchema = Inc.cache proc (remoteSchema, invalidationKeys) -> do
Inc.dependOn -< Inc.selectKeyD (_arsqName remoteSchema) invalidationKeys
(| withRecordInconsistency (do
remoteGQLSchema <- liftEitherA <<< bindA -<
runExceptT $ addRemoteSchemaP2Setup remoteSchema
returnA -< (remoteSchema, remoteGQLSchema))
|) (mkRemoteSchemaMetadataObject remoteSchema)
-- Builds the GraphQL schema and merges in remote schemas. This function is kind of gross, as
-- its possible for the remote schema merging to fail, at which point we have to mark them
-- inconsistent. This means we have to accumulate the consistent remote schemas as we go, in
-- addition to the build GraphQL context.
buildGQLSchema
:: ( ArrowChoice arr, ArrowWriter (Seq InconsistentMetadata) arr, ArrowKleisli m arr
, MonadError QErr m )
=> ( TableCache
, FunctionCache
, HashMap RemoteSchemaName (AddRemoteSchemaQuery, RemoteSchemaCtx)
) `arr` (RemoteSchemaMap, GS.GCtxMap, GS.GCtx)
addRemoteSchema = proc ((remoteSchemas, gCtxMap, defGCtx), (_, remoteSchema)) -> do
let name = _arsqName remoteSchema
(| onNothingA (returnA -< (remoteSchemas, gCtxMap, defGCtx)) |) <-<
(| withRecordInconsistency (case M.lookup name remoteSchemas of
Just _ -> throwA -< err400 AlreadyExists "duplicate definition for remote schema"
Nothing -> liftEitherA <<< bindA -< runExceptT do
rsCtx <- addRemoteSchemaP2Setup remoteSchema
let rGCtx = convRemoteGCtx $ rscGCtx rsCtx
mergedGCtxMap <- mergeRemoteSchema gCtxMap rGCtx
mergedDefGCtx <- mergeGCtx defGCtx rGCtx
pure (M.insert name rsCtx remoteSchemas, mergedGCtxMap, mergedDefGCtx))
|) (MetadataObject (MORemoteSchema name) (toJSON remoteSchema))
buildGQLSchema = proc (tableCache, functionCache, remoteSchemas) -> do
baseGQLSchema <- bindA -< GS.mkGCtxMap tableCache functionCache
(| foldlA' (\(remoteSchemaMap, gqlSchemas, remoteGQLSchemas) (remoteSchema, remoteGQLSchema) ->
(| withRecordInconsistency (do
let gqlSchema = convRemoteGCtx $ rscGCtx remoteGQLSchema
mergedGQLSchemas <- bindErrorA -< mergeRemoteSchema gqlSchemas gqlSchema
mergedRemoteGQLSchemas <- bindErrorA -< mergeGCtx remoteGQLSchemas gqlSchema
let mergedRemoteSchemaMap =
M.insert (_arsqName remoteSchema) remoteGQLSchema remoteSchemaMap
returnA -< (mergedRemoteSchemaMap, mergedGQLSchemas, mergedRemoteGQLSchemas))
|) (mkRemoteSchemaMetadataObject remoteSchema)
>-> (| onNothingA ((remoteSchemaMap, gqlSchemas, remoteGQLSchemas) >- returnA) |))
|) (M.empty, baseGQLSchema, GC.emptyGCtx) (M.elems remoteSchemas)
-- | @'withMetadataCheck' cascade action@ runs @action@ and checks if the schema changed as a
-- result. If it did, it checks to ensure the changes do not violate any integrity constraints, and

View File

@ -11,9 +11,7 @@ import qualified Data.Sequence as Seq
import Control.Arrow.Extended
import Control.Lens
import Control.Monad.Unique
import qualified Hasura.GraphQL.Context as GC
import qualified Hasura.Incremental as Inc
import Hasura.RQL.Types
@ -22,18 +20,24 @@ import Hasura.RQL.Types.QueryCollection
import Hasura.RQL.Types.Run
import Hasura.SQL.Types
-- | A map used to explicitly invalidate part of the build cache, which is most useful for external
-- resources (currently only remote schemas). The 'InvalidationKey' values it contains are used as
-- inputs to build rules, so setting an entry to a fresh 'InvalidationKey' forces it to be
-- re-executed.
type InvalidationMap = HashMap RemoteSchemaName InvalidationKey
type InvalidationKey = Unique
data InvalidationKeys = InvalidationKeys
{ _ikMetadata :: !Inc.InvalidationKey
-- ^ Invalidated by the @reload_metadata@ API.
, _ikRemoteSchemas :: !(HashMap RemoteSchemaName Inc.InvalidationKey)
-- ^ Invalidated by the @reload_remote_schema@ API.
} deriving (Eq, Generic)
instance Inc.Cacheable InvalidationKeys
instance Inc.Select InvalidationKeys
$(makeLenses ''InvalidationKeys)
initialInvalidationKeys :: InvalidationKeys
initialInvalidationKeys = InvalidationKeys Inc.initialInvalidationKey mempty
data BuildInputs
= BuildInputs
{ _biReason :: !BuildReason
, _biCatalogMetadata :: !CatalogMetadata
, _biInvalidationMap :: !InvalidationMap
, _biInvalidationMap :: !InvalidationKeys
} deriving (Eq)
-- | The direct output of 'buildSchemaCacheRule'. Contains most of the things necessary to build a
@ -41,20 +45,21 @@ data BuildInputs
-- 'MonadWriter' side channel.
data BuildOutputs
= BuildOutputs
{ _boTables :: !TableCache
, _boFunctions :: !FunctionCache
, _boRemoteSchemas :: !RemoteSchemaMap
, _boAllowlist :: !(HS.HashSet GQLQuery)
, _boGCtxMap :: !GC.GCtxMap
, _boDefaultRemoteGCtx :: !GC.GCtx
{ _boTables :: !TableCache
, _boFunctions :: !FunctionCache
, _boRemoteSchemas :: !(HashMap RemoteSchemaName (AddRemoteSchemaQuery, RemoteSchemaCtx))
-- ^ We preserve the 'AddRemoteSchemaQuery' from the original catalog metadata in the output so we
-- can reuse it later if we need to mark the remote schema inconsistent during GraphQL schema
-- generation (because of field conflicts).
, _boAllowlist :: !(HS.HashSet GQLQuery)
} deriving (Show, Eq)
$(makeLenses ''BuildOutputs)
data RebuildableSchemaCache m
= RebuildableSchemaCache
{ lastBuiltSchemaCache :: !SchemaCache
, _rscInvalidationMap :: !InvalidationMap
, _rscRebuild :: !(Inc.Rule (ReaderT BuildReason m) (CatalogMetadata, InvalidationMap) SchemaCache)
, _rscInvalidationMap :: !InvalidationKeys
, _rscRebuild :: !(Inc.Rule (ReaderT BuildReason m) (CatalogMetadata, InvalidationKeys) SchemaCache)
}
$(makeLenses ''RebuildableSchemaCache)

View File

@ -22,7 +22,7 @@ type UrlFromEnv = Text
newtype RemoteSchemaName
= RemoteSchemaName
{ unRemoteSchemaName :: NonEmptyText }
deriving ( Show, Eq, Lift, Hashable, J.ToJSON, J.ToJSONKey
deriving ( Show, Eq, Ord, Lift, Hashable, J.ToJSON, J.ToJSONKey
, J.FromJSON, Q.ToPrepArg, Q.FromCol, DQuote, NFData
, Generic, Cacheable, Arbitrary
)

View File

@ -26,6 +26,7 @@ import qualified Data.Sequence as Seq
import qualified Data.Text as T
import Control.Arrow.Extended
import Control.Lens
import Data.Aeson (toJSON)
import Data.List (nub)
@ -44,6 +45,14 @@ data CollectedInfo
!SchemaObjId
!SchemaDependency
deriving (Show, Eq)
$(makePrisms ''CollectedInfo)
class AsInconsistentMetadata s where
_InconsistentMetadata :: Prism' s InconsistentMetadata
instance AsInconsistentMetadata InconsistentMetadata where
_InconsistentMetadata = id
instance AsInconsistentMetadata CollectedInfo where
_InconsistentMetadata = _CIInconsistency
partitionCollectedInfo
:: Seq CollectedInfo
@ -55,12 +64,14 @@ partitionCollectedInfo =
let dependency = (metadataObject, objectId, schemaDependency)
in (inconsistencies, dependency:dependencies)
recordInconsistency :: (ArrowWriter (Seq CollectedInfo) arr) => (MetadataObject, Text) `arr` ()
recordInconsistency
:: (ArrowWriter (Seq w) arr, AsInconsistentMetadata w) => (MetadataObject, Text) `arr` ()
recordInconsistency = first (arr (:[])) >>> recordInconsistencies
recordInconsistencies :: (ArrowWriter (Seq CollectedInfo) arr) => ([MetadataObject], Text) `arr` ()
recordInconsistencies
:: (ArrowWriter (Seq w) arr, AsInconsistentMetadata w) => ([MetadataObject], Text) `arr` ()
recordInconsistencies = proc (metadataObjects, reason) ->
tellA -< Seq.fromList $ map (CIInconsistency . InconsistentObject reason) metadataObjects
tellA -< Seq.fromList $ map (review _InconsistentMetadata . InconsistentObject reason) metadataObjects
recordDependencies
:: (ArrowWriter (Seq CollectedInfo) arr)
@ -69,7 +80,7 @@ recordDependencies = proc (metadataObject, schemaObjectId, dependencies) ->
tellA -< Seq.fromList $ map (CIDependency metadataObject schemaObjectId) dependencies
withRecordInconsistency
:: (ArrowChoice arr, ArrowWriter (Seq CollectedInfo) arr)
:: (ArrowChoice arr, ArrowWriter (Seq w) arr, AsInconsistentMetadata w)
=> ErrorA QErr arr (e, s) a
-> arr (e, (MetadataObject, s)) (Maybe a)
withRecordInconsistency f = proc (e, (metadataObject, s)) -> do

View File

@ -90,7 +90,7 @@ class TestRemoteSchemaBasic:
q = mk_add_remote_q('simple 2', 'http://localhost:5000/hello-graphql')
st_code, resp = hge_ctx.v1q(q)
assert st_code == 400
assert resp['code'] == 'constraint-violation'
assert resp['code'] == 'unexpected'
def test_remove_schema_error(self, hge_ctx):
"""remove remote schema which is not added"""