mirror of
https://github.com/hasura/graphql-engine.git
synced 2025-01-05 22:34:22 +03:00
Merge pull request #3798 from lexi-lambda/3759-3791-minor-metadata-build-bugfixes
Fix two minor bugs in the schema cache build process (fix #3759 and #3791)
This commit is contained in:
commit
fb9498488f
@ -46,7 +46,7 @@ runApp (HGEOptionsG rci hgeCmd) =
|
|||||||
execQuery queryBs
|
execQuery queryBs
|
||||||
& runHasSystemDefinedT (SystemDefined False)
|
& runHasSystemDefinedT (SystemDefined False)
|
||||||
& runCacheRWT schemaCache
|
& runCacheRWT schemaCache
|
||||||
& fmap fst
|
& fmap (\(res, _, _) -> res)
|
||||||
either printErrJExit (liftIO . BLC.putStrLn) res
|
either printErrJExit (liftIO . BLC.putStrLn) res
|
||||||
|
|
||||||
HCVersion -> liftIO $ putStrLn $ "Hasura GraphQL Engine: " ++ convertText currentVersion
|
HCVersion -> liftIO $ putStrLn $ "Hasura GraphQL Engine: " ++ convertText currentVersion
|
||||||
|
@ -1,27 +1,51 @@
|
|||||||
-- | A simple implementation of /incremental build rules/, which can be used to avoid unnecessary
|
-- | A simple implementation of /incremental build rules/, which can be used to avoid unnecessary
|
||||||
-- recomputation on incrementally-changing input. See 'Rule' for more details.
|
-- recomputation on incrementally-changing input. See 'Rule' for more details.
|
||||||
module Hasura.Incremental
|
module Hasura.Incremental (
|
||||||
( Rule
|
-- * The @Rule@ datatype
|
||||||
|
Rule
|
||||||
, Result
|
, Result
|
||||||
, build
|
, build
|
||||||
, rebuild
|
, rebuild
|
||||||
, rebuildRule
|
, rebuildRule
|
||||||
, result
|
, result
|
||||||
|
|
||||||
|
-- * Abstract interface
|
||||||
, ArrowDistribute(..)
|
, ArrowDistribute(..)
|
||||||
, ArrowCache(..)
|
, ArrowCache(..)
|
||||||
, MonadDepend(..)
|
, MonadDepend(..)
|
||||||
, DependT
|
, DependT
|
||||||
|
|
||||||
|
-- * Fine-grained dependencies
|
||||||
, Dependency
|
, Dependency
|
||||||
, Selector
|
, Select(Selector)
|
||||||
, selectD
|
, selectD
|
||||||
, selectKeyD
|
, selectKeyD
|
||||||
, Cacheable(..)
|
, Cacheable(..)
|
||||||
, Accesses
|
, Accesses
|
||||||
|
|
||||||
|
-- * Cache invalidation
|
||||||
|
, InvalidationKey
|
||||||
|
, initialInvalidationKey
|
||||||
|
, invalidate
|
||||||
) where
|
) where
|
||||||
|
|
||||||
|
import Hasura.Prelude
|
||||||
|
|
||||||
import Hasura.Incremental.Internal.Cache
|
import Hasura.Incremental.Internal.Cache
|
||||||
import Hasura.Incremental.Internal.Dependency
|
import Hasura.Incremental.Internal.Dependency
|
||||||
import Hasura.Incremental.Internal.Rule
|
import Hasura.Incremental.Internal.Rule
|
||||||
import Hasura.Incremental.Select
|
import Hasura.Incremental.Select
|
||||||
|
|
||||||
|
-- | A simple helper type that can be used to implement explicit cache invalidation. Internally,
|
||||||
|
-- each 'InvalidationKey' is a counter; 'initialInvalidationKey' starts the counter at 0 and
|
||||||
|
-- 'invalidate' increments it by 1. Two 'InvalidationKey's are equal iff they have the same internal
|
||||||
|
-- count, so depending on an 'InvalidationKey' provides a mechanism to force portions of the build
|
||||||
|
-- process to be reexecuted by calling 'invalidate' before running the build.
|
||||||
|
newtype InvalidationKey = InvalidationKey Int
|
||||||
|
deriving (Show, Eq, Cacheable)
|
||||||
|
|
||||||
|
initialInvalidationKey :: InvalidationKey
|
||||||
|
initialInvalidationKey = InvalidationKey 0
|
||||||
|
|
||||||
|
invalidate :: InvalidationKey -> InvalidationKey
|
||||||
|
invalidate (InvalidationKey n) = InvalidationKey (n + 1)
|
||||||
|
@ -6,6 +6,7 @@ module Hasura.Incremental.Select
|
|||||||
( Select(..)
|
( Select(..)
|
||||||
, ConstS(..)
|
, ConstS(..)
|
||||||
, selectKey
|
, selectKey
|
||||||
|
, FieldS(..)
|
||||||
, UniqueS
|
, UniqueS
|
||||||
, newUniqueS
|
, newUniqueS
|
||||||
, DMapS(..)
|
, DMapS(..)
|
||||||
@ -25,6 +26,10 @@ import qualified Data.HashMap.Strict as M
|
|||||||
import Control.Monad.Unique
|
import Control.Monad.Unique
|
||||||
import Data.GADT.Compare
|
import Data.GADT.Compare
|
||||||
import Data.Kind
|
import Data.Kind
|
||||||
|
import Data.Proxy (Proxy (..))
|
||||||
|
import GHC.OverloadedLabels (IsLabel (..))
|
||||||
|
import GHC.Records (HasField (..))
|
||||||
|
import GHC.TypeLits (KnownSymbol, sameSymbol, symbolVal)
|
||||||
import Unsafe.Coerce (unsafeCoerce)
|
import Unsafe.Coerce (unsafeCoerce)
|
||||||
|
|
||||||
-- | The 'Select' class provides a way to access subparts of a product type using a reified
|
-- | The 'Select' class provides a way to access subparts of a product type using a reified
|
||||||
@ -34,10 +39,18 @@ import Unsafe.Coerce (unsafeCoerce)
|
|||||||
--
|
--
|
||||||
-- This is useful to implement dependency tracking, since it’s possible to track in a reified form
|
-- This is useful to implement dependency tracking, since it’s possible to track in a reified form
|
||||||
-- exactly which parts of a data structure are used.
|
-- exactly which parts of a data structure are used.
|
||||||
|
--
|
||||||
|
-- Instances of 'Select' can be automatically derived for record types (just define an empty
|
||||||
|
-- instance). The instance uses the magical 'HasField' constraints, and 'Selector's for the type can
|
||||||
|
-- be written using @OverloadedLabels@.
|
||||||
class (GCompare (Selector a)) => Select a where
|
class (GCompare (Selector a)) => Select a where
|
||||||
type Selector a :: Type -> Type
|
type Selector a :: Type -> Type
|
||||||
select :: Selector a b -> a -> b
|
select :: Selector a b -> a -> b
|
||||||
|
|
||||||
|
type Selector r = FieldS r
|
||||||
|
default select :: Selector a ~ FieldS a => Selector a b -> a -> b
|
||||||
|
select (FieldS (_ :: Proxy s)) = getField @s
|
||||||
|
|
||||||
instance (Eq k, Ord k, Hashable k) => Select (HashMap k v) where
|
instance (Eq k, Ord k, Hashable k) => Select (HashMap k v) where
|
||||||
type Selector (HashMap k v) = ConstS k (Maybe v)
|
type Selector (HashMap k v) = ConstS k (Maybe v)
|
||||||
select (ConstS k) = M.lookup k
|
select (ConstS k) = M.lookup k
|
||||||
@ -66,6 +79,29 @@ instance (Ord k) => GCompare (ConstS k a) where
|
|||||||
EQ -> GEQ
|
EQ -> GEQ
|
||||||
GT -> GGT
|
GT -> GGT
|
||||||
|
|
||||||
|
data FieldS r a where
|
||||||
|
FieldS :: (KnownSymbol s, HasField s r a) => !(Proxy s) -> FieldS r a
|
||||||
|
|
||||||
|
instance (KnownSymbol s, HasField s r a) => IsLabel s (FieldS r a) where
|
||||||
|
fromLabel = FieldS (Proxy @s)
|
||||||
|
|
||||||
|
instance GEq (FieldS r) where
|
||||||
|
FieldS a `geq` FieldS b = case sameSymbol a b of
|
||||||
|
-- If two fields of the same record have the same name, then their fields fundamentally must
|
||||||
|
-- have the same type! However, unfortunately, `HasField` constraints use a functional
|
||||||
|
-- dependency to enforce this rather than a type family, and functional dependencies don’t
|
||||||
|
-- provide evidence, so we have to use `unsafeCoerce` here. Yuck!
|
||||||
|
Just Refl -> Just (unsafeCoerce Refl)
|
||||||
|
Nothing -> Nothing
|
||||||
|
|
||||||
|
instance GCompare (FieldS r) where
|
||||||
|
FieldS a `gcompare` FieldS b = case sameSymbol a b of
|
||||||
|
-- See note about `HasField` and `unsafeCoerce` above.
|
||||||
|
Just Refl -> unsafeCoerce GEQ
|
||||||
|
Nothing
|
||||||
|
| symbolVal a < symbolVal b -> GLT
|
||||||
|
| otherwise -> GGT
|
||||||
|
|
||||||
-- | A 'UniqueS' is, as the name implies, a globally-unique 'Selector', which can be created using
|
-- | A 'UniqueS' is, as the name implies, a globally-unique 'Selector', which can be created using
|
||||||
-- 'newUniqueS'. If a value of type @'UniqueS' a@ is found to be equal (via 'geq') with another
|
-- 'newUniqueS'. If a value of type @'UniqueS' a@ is found to be equal (via 'geq') with another
|
||||||
-- value of type @'UniqueS' b@, then @a@ and @b@ must be the same type. This effectively allows the
|
-- value of type @'UniqueS' b@, then @a@ and @b@ must be the same type. This effectively allows the
|
||||||
|
@ -380,7 +380,7 @@ runExportMetadata _ =
|
|||||||
|
|
||||||
runReloadMetadata :: (QErrM m, CacheRWM m) => ReloadMetadata -> m EncJSON
|
runReloadMetadata :: (QErrM m, CacheRWM m) => ReloadMetadata -> m EncJSON
|
||||||
runReloadMetadata ReloadMetadata = do
|
runReloadMetadata ReloadMetadata = do
|
||||||
buildSchemaCache
|
buildSchemaCacheWithOptions CatalogUpdate mempty { ciMetadata = True }
|
||||||
return successMsg
|
return successMsg
|
||||||
|
|
||||||
runDumpInternalState
|
runDumpInternalState
|
||||||
|
@ -13,6 +13,7 @@ import Hasura.Prelude
|
|||||||
|
|
||||||
import qualified Data.Aeson as J
|
import qualified Data.Aeson as J
|
||||||
import qualified Data.HashMap.Strict as Map
|
import qualified Data.HashMap.Strict as Map
|
||||||
|
import qualified Data.HashSet as S
|
||||||
import qualified Database.PG.Query as Q
|
import qualified Database.PG.Query as Q
|
||||||
|
|
||||||
import Hasura.GraphQL.RemoteServer
|
import Hasura.GraphQL.RemoteServer
|
||||||
@ -87,8 +88,8 @@ runReloadRemoteSchema (RemoteSchemaNameQuery name) = do
|
|||||||
void $ onNothing (Map.lookup name rmSchemas) $
|
void $ onNothing (Map.lookup name rmSchemas) $
|
||||||
throw400 NotExists $ "remote schema with name " <> name <<> " does not exist"
|
throw400 NotExists $ "remote schema with name " <> name <<> " does not exist"
|
||||||
|
|
||||||
invalidateCachedRemoteSchema name
|
let invalidations = mempty { ciRemoteSchemas = S.singleton name }
|
||||||
withNewInconsistentObjsCheck buildSchemaCache
|
withNewInconsistentObjsCheck $ buildSchemaCacheWithOptions CatalogUpdate invalidations
|
||||||
pure successMsg
|
pure successMsg
|
||||||
|
|
||||||
addRemoteSchemaToCatalog
|
addRemoteSchemaToCatalog
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
{-# LANGUAGE Arrows #-}
|
{-# LANGUAGE Arrows #-}
|
||||||
|
{-# LANGUAGE OverloadedLabels #-}
|
||||||
|
|
||||||
{-| Top-level functions concerned specifically with operations on the schema cache, such as
|
{-| Top-level functions concerned specifically with operations on the schema cache, such as
|
||||||
rebuilding it from the catalog and incorporating schema changes. See the module documentation for
|
rebuilding it from the catalog and incorporating schema changes. See the module documentation for
|
||||||
@ -60,75 +61,97 @@ buildRebuildableSchemaCache
|
|||||||
=> m (RebuildableSchemaCache m)
|
=> m (RebuildableSchemaCache m)
|
||||||
buildRebuildableSchemaCache = do
|
buildRebuildableSchemaCache = do
|
||||||
catalogMetadata <- liftTx fetchCatalogData
|
catalogMetadata <- liftTx fetchCatalogData
|
||||||
result <- flip runReaderT CatalogSync $ Inc.build buildSchemaCacheRule (catalogMetadata, M.empty)
|
result <- flip runReaderT CatalogSync $
|
||||||
pure $ RebuildableSchemaCache (Inc.result result) M.empty (Inc.rebuildRule result)
|
Inc.build buildSchemaCacheRule (catalogMetadata, initialInvalidationKeys)
|
||||||
|
pure $ RebuildableSchemaCache (Inc.result result) initialInvalidationKeys (Inc.rebuildRule result)
|
||||||
|
|
||||||
newtype CacheRWT m a
|
newtype CacheRWT m a
|
||||||
= CacheRWT { unCacheRWT :: StateT (RebuildableSchemaCache m) m a }
|
-- The CacheInvalidations component of the state could actually be collected using WriterT, but
|
||||||
|
-- WriterT implementations prior to transformers-0.5.6.0 (which added
|
||||||
|
-- Control.Monad.Trans.Writer.CPS) are leaky, and we don’t have that yet.
|
||||||
|
= CacheRWT (StateT (RebuildableSchemaCache m, CacheInvalidations) m a)
|
||||||
deriving
|
deriving
|
||||||
( Functor, Applicative, Monad, MonadIO, MonadReader r, MonadError e, MonadWriter w, MonadTx
|
( Functor, Applicative, Monad, MonadIO, MonadReader r, MonadError e, MonadTx
|
||||||
, UserInfoM, HasHttpManager, HasSQLGenCtx, HasSystemDefined )
|
, UserInfoM, HasHttpManager, HasSQLGenCtx, HasSystemDefined )
|
||||||
|
|
||||||
runCacheRWT :: RebuildableSchemaCache m -> CacheRWT m a -> m (a, RebuildableSchemaCache m)
|
runCacheRWT
|
||||||
runCacheRWT cache = flip runStateT cache . unCacheRWT
|
:: Functor m
|
||||||
|
=> RebuildableSchemaCache m -> CacheRWT m a -> m (a, RebuildableSchemaCache m, CacheInvalidations)
|
||||||
|
runCacheRWT cache (CacheRWT m) =
|
||||||
|
runStateT m (cache, mempty) <&> \(v, (newCache, invalidations)) -> (v, newCache, invalidations)
|
||||||
|
|
||||||
instance MonadTrans CacheRWT where
|
instance MonadTrans CacheRWT where
|
||||||
lift = CacheRWT . lift
|
lift = CacheRWT . lift
|
||||||
|
|
||||||
instance (Monad m) => TableCoreInfoRM (CacheRWT m)
|
instance (Monad m) => TableCoreInfoRM (CacheRWT m)
|
||||||
instance (Monad m) => CacheRM (CacheRWT m) where
|
instance (Monad m) => CacheRM (CacheRWT m) where
|
||||||
askSchemaCache = CacheRWT $ gets lastBuiltSchemaCache
|
askSchemaCache = CacheRWT $ gets (lastBuiltSchemaCache . fst)
|
||||||
|
|
||||||
instance (MonadIO m, MonadTx m, MonadUnique m) => CacheRWM (CacheRWT m) where
|
instance (MonadIO m, MonadTx m) => CacheRWM (CacheRWT m) where
|
||||||
buildSchemaCacheWithOptions buildReason = CacheRWT do
|
buildSchemaCacheWithOptions buildReason invalidations = CacheRWT do
|
||||||
RebuildableSchemaCache _ invalidationMap rule <- get
|
(RebuildableSchemaCache _ invalidationKeys rule, oldInvalidations) <- get
|
||||||
|
let newInvalidationKeys = invalidateKeys invalidations invalidationKeys
|
||||||
catalogMetadata <- liftTx fetchCatalogData
|
catalogMetadata <- liftTx fetchCatalogData
|
||||||
result <- lift $ flip runReaderT buildReason $ Inc.build rule (catalogMetadata, invalidationMap)
|
result <- lift $ flip runReaderT buildReason $
|
||||||
|
Inc.build rule (catalogMetadata, newInvalidationKeys)
|
||||||
let schemaCache = Inc.result result
|
let schemaCache = Inc.result result
|
||||||
prunedInvalidationMap = pruneInvalidationMap schemaCache invalidationMap
|
prunedInvalidationKeys = pruneInvalidationKeys schemaCache newInvalidationKeys
|
||||||
put $! RebuildableSchemaCache schemaCache prunedInvalidationMap (Inc.rebuildRule result)
|
!newCache = RebuildableSchemaCache schemaCache prunedInvalidationKeys (Inc.rebuildRule result)
|
||||||
|
!newInvalidations = oldInvalidations <> invalidations
|
||||||
|
put (newCache, newInvalidations)
|
||||||
where
|
where
|
||||||
pruneInvalidationMap schemaCache = M.filterWithKey \name _ ->
|
-- Prunes invalidation keys that no longer exist in the schema to avoid leaking memory by
|
||||||
|
-- hanging onto unnecessary keys.
|
||||||
|
pruneInvalidationKeys schemaCache = over ikRemoteSchemas $ M.filterWithKey \name _ ->
|
||||||
M.member name (scRemoteSchemas schemaCache)
|
M.member name (scRemoteSchemas schemaCache)
|
||||||
|
|
||||||
invalidateCachedRemoteSchema name = CacheRWT do
|
|
||||||
unique <- newUnique
|
|
||||||
assign (rscInvalidationMap . at name) (Just unique)
|
|
||||||
|
|
||||||
buildSchemaCacheRule
|
buildSchemaCacheRule
|
||||||
-- Note: by supplying BuildReason via MonadReader, it does not participate in caching, which is
|
-- Note: by supplying BuildReason via MonadReader, it does not participate in caching, which is
|
||||||
-- what we want!
|
-- what we want!
|
||||||
:: ( HasVersion, ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
:: ( HasVersion, ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
||||||
, MonadIO m, MonadTx m, MonadReader BuildReason m, HasHttpManager m, HasSQLGenCtx m )
|
, MonadIO m, MonadTx m, MonadReader BuildReason m, HasHttpManager m, HasSQLGenCtx m )
|
||||||
=> (CatalogMetadata, InvalidationMap) `arr` SchemaCache
|
=> (CatalogMetadata, InvalidationKeys) `arr` SchemaCache
|
||||||
buildSchemaCacheRule = proc inputs -> do
|
buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||||
(outputs, collectedInfo) <- runWriterA buildAndCollectInfo -< inputs
|
invalidationKeysDep <- Inc.newDependency -< invalidationKeys
|
||||||
|
|
||||||
|
-- Step 1: Process metadata and collect dependency information.
|
||||||
|
(outputs, collectedInfo) <-
|
||||||
|
runWriterA buildAndCollectInfo -< (catalogMetadata, invalidationKeysDep)
|
||||||
let (inconsistentObjects, unresolvedDependencies) = partitionCollectedInfo collectedInfo
|
let (inconsistentObjects, unresolvedDependencies) = partitionCollectedInfo collectedInfo
|
||||||
(resolvedOutputs, extraInconsistentObjects, resolvedDependencies) <-
|
|
||||||
|
-- Step 2: Resolve dependency information and drop dangling dependents.
|
||||||
|
(resolvedOutputs, dependencyInconsistentObjects, resolvedDependencies) <-
|
||||||
resolveDependencies -< (outputs, unresolvedDependencies)
|
resolveDependencies -< (outputs, unresolvedDependencies)
|
||||||
|
|
||||||
|
-- Step 3: Build the GraphQL schema.
|
||||||
|
((remoteSchemaMap, gqlSchema, remoteGQLSchema), gqlSchemaInconsistentObjects)
|
||||||
|
<- runWriterA buildGQLSchema
|
||||||
|
-< (_boTables resolvedOutputs, _boFunctions resolvedOutputs, _boRemoteSchemas resolvedOutputs)
|
||||||
|
|
||||||
returnA -< SchemaCache
|
returnA -< SchemaCache
|
||||||
{ scTables = _boTables resolvedOutputs
|
{ scTables = _boTables resolvedOutputs
|
||||||
, scFunctions = _boFunctions resolvedOutputs
|
, scFunctions = _boFunctions resolvedOutputs
|
||||||
, scRemoteSchemas = _boRemoteSchemas resolvedOutputs
|
, scRemoteSchemas = remoteSchemaMap
|
||||||
, scAllowlist = _boAllowlist resolvedOutputs
|
, scAllowlist = _boAllowlist resolvedOutputs
|
||||||
, scGCtxMap = _boGCtxMap resolvedOutputs
|
, scGCtxMap = gqlSchema
|
||||||
, scDefaultRemoteGCtx = _boDefaultRemoteGCtx resolvedOutputs
|
, scDefaultRemoteGCtx = remoteGQLSchema
|
||||||
, scDepMap = resolvedDependencies
|
, scDepMap = resolvedDependencies
|
||||||
, scInconsistentObjs = inconsistentObjects <> extraInconsistentObjects
|
, scInconsistentObjs =
|
||||||
|
inconsistentObjects <> dependencyInconsistentObjects <> toList gqlSchemaInconsistentObjects
|
||||||
}
|
}
|
||||||
where
|
where
|
||||||
buildAndCollectInfo
|
buildAndCollectInfo
|
||||||
:: ( HasVersion, ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
||||||
, ArrowWriter (Seq CollectedInfo) arr, MonadIO m, MonadTx m, MonadReader BuildReason m
|
, ArrowWriter (Seq CollectedInfo) arr, MonadIO m, MonadTx m, MonadReader BuildReason m
|
||||||
, HasHttpManager m, HasSQLGenCtx m )
|
, HasHttpManager m, HasSQLGenCtx m )
|
||||||
=> (CatalogMetadata, InvalidationMap) `arr` BuildOutputs
|
=> (CatalogMetadata, Inc.Dependency InvalidationKeys) `arr` BuildOutputs
|
||||||
buildAndCollectInfo = proc (catalogMetadata, invalidationMap) -> do
|
buildAndCollectInfo = proc (catalogMetadata, invalidationKeys) -> do
|
||||||
let CatalogMetadata tables relationships permissions
|
let CatalogMetadata tables relationships permissions
|
||||||
eventTriggers remoteSchemas functions allowlistDefs
|
eventTriggers remoteSchemas functions allowlistDefs
|
||||||
computedFields = catalogMetadata
|
computedFields = catalogMetadata
|
||||||
|
|
||||||
-- tables
|
-- tables
|
||||||
tableRawInfos <- buildTableCache -< tables
|
tableRawInfos <- buildTableCache -< (tables, Inc.selectD #_ikMetadata invalidationKeys)
|
||||||
|
|
||||||
-- relationships and computed fields
|
-- relationships and computed fields
|
||||||
let relationshipsByTable = M.groupOn _crTable relationships
|
let relationshipsByTable = M.groupOn _crTable relationships
|
||||||
@ -182,23 +205,15 @@ buildSchemaCacheRule = proc inputs -> do
|
|||||||
& map (queryWithoutTypeNames . getGQLQuery . _lqQuery)
|
& map (queryWithoutTypeNames . getGQLQuery . _lqQuery)
|
||||||
& HS.fromList
|
& HS.fromList
|
||||||
|
|
||||||
-- build GraphQL context with tables and functions
|
|
||||||
baseGQLSchema <- bindA -< GS.mkGCtxMap tableCache functionCache
|
|
||||||
|
|
||||||
-- remote schemas
|
-- remote schemas
|
||||||
let invalidatedRemoteSchemas = flip map remoteSchemas \remoteSchema ->
|
let remoteSchemaInvalidationKeys = Inc.selectD #_ikRemoteSchemas invalidationKeys
|
||||||
(M.lookup (_arsqName remoteSchema) invalidationMap, remoteSchema)
|
remoteSchemaMap <- buildRemoteSchemas -< (remoteSchemaInvalidationKeys, remoteSchemas)
|
||||||
(remoteSchemaMap, gqlSchema, remoteGQLSchema) <-
|
|
||||||
(| foldlA' (\schemas schema -> (schemas, schema) >- addRemoteSchema)
|
|
||||||
|) (M.empty, baseGQLSchema, GC.emptyGCtx) invalidatedRemoteSchemas
|
|
||||||
|
|
||||||
returnA -< BuildOutputs
|
returnA -< BuildOutputs
|
||||||
{ _boTables = tableCache
|
{ _boTables = tableCache
|
||||||
, _boFunctions = functionCache
|
, _boFunctions = functionCache
|
||||||
, _boRemoteSchemas = remoteSchemaMap
|
, _boRemoteSchemas = remoteSchemaMap
|
||||||
, _boAllowlist = allowList
|
, _boAllowlist = allowList
|
||||||
, _boGCtxMap = gqlSchema
|
|
||||||
, _boDefaultRemoteGCtx = remoteGQLSchema
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mkEventTriggerMetadataObject (CatalogEventTrigger qt trn configuration) =
|
mkEventTriggerMetadataObject (CatalogEventTrigger qt trn configuration) =
|
||||||
@ -206,6 +221,9 @@ buildSchemaCacheRule = proc inputs -> do
|
|||||||
definition = object ["table" .= qt, "configuration" .= configuration]
|
definition = object ["table" .= qt, "configuration" .= configuration]
|
||||||
in MetadataObject objectId definition
|
in MetadataObject objectId definition
|
||||||
|
|
||||||
|
mkRemoteSchemaMetadataObject remoteSchema =
|
||||||
|
MetadataObject (MORemoteSchema (_arsqName remoteSchema)) (toJSON remoteSchema)
|
||||||
|
|
||||||
-- Given a map of table info, “folds in” another map of information, accumulating inconsistent
|
-- Given a map of table info, “folds in” another map of information, accumulating inconsistent
|
||||||
-- metadata objects for any entries in the second map that don’t appear in the first map. This
|
-- metadata objects for any entries in the second map that don’t appear in the first map. This
|
||||||
-- is used to “line up” the metadata for relationships, computed fields, permissions, etc. with
|
-- is used to “line up” the metadata for relationships, computed fields, permissions, etc. with
|
||||||
@ -236,14 +254,7 @@ buildSchemaCacheRule = proc inputs -> do
|
|||||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||||
, Inc.ArrowCache m arr, MonadIO m, MonadTx m, MonadReader BuildReason m, HasSQLGenCtx m )
|
, Inc.ArrowCache m arr, MonadIO m, MonadTx m, MonadReader BuildReason m, HasSQLGenCtx m )
|
||||||
=> (TableCoreInfo, [CatalogEventTrigger]) `arr` EventTriggerInfoMap
|
=> (TableCoreInfo, [CatalogEventTrigger]) `arr` EventTriggerInfoMap
|
||||||
buildTableEventTriggers = proc (tableInfo, eventTriggers) ->
|
buildTableEventTriggers = buildInfoMap _cetName mkEventTriggerMetadataObject buildEventTrigger
|
||||||
(\infos -> M.catMaybes infos >- returnA) <-<
|
|
||||||
(| Inc.keyed (\_ duplicateEventTriggers -> do
|
|
||||||
maybeEventTrigger <- noDuplicates mkEventTriggerMetadataObject -< duplicateEventTriggers
|
|
||||||
(\info -> join info >- returnA) <-<
|
|
||||||
(| traverseA (\eventTrigger -> buildEventTrigger -< (tableInfo, eventTrigger))
|
|
||||||
|) maybeEventTrigger)
|
|
||||||
|) (M.groupOn _cetName eventTriggers)
|
|
||||||
where
|
where
|
||||||
buildEventTrigger = proc (tableInfo, eventTrigger) -> do
|
buildEventTrigger = proc (tableInfo, eventTrigger) -> do
|
||||||
let CatalogEventTrigger qt trn configuration = eventTrigger
|
let CatalogEventTrigger qt trn configuration = eventTrigger
|
||||||
@ -268,24 +279,48 @@ buildSchemaCacheRule = proc inputs -> do
|
|||||||
liftTx $ delTriggerQ triggerName -- executes DROP IF EXISTS.. sql
|
liftTx $ delTriggerQ triggerName -- executes DROP IF EXISTS.. sql
|
||||||
mkAllTriggersQ triggerName tableName (M.elems tableColumns) triggerDefinition
|
mkAllTriggersQ triggerName tableName (M.elems tableColumns) triggerDefinition
|
||||||
|
|
||||||
addRemoteSchema
|
buildRemoteSchemas
|
||||||
:: ( HasVersion, ArrowChoice arr, ArrowWriter (Seq CollectedInfo) arr, ArrowKleisli m arr
|
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||||
, MonadIO m, HasHttpManager m )
|
, Inc.ArrowCache m arr , MonadIO m, HasHttpManager m )
|
||||||
=> ( (RemoteSchemaMap, GS.GCtxMap, GS.GCtx)
|
=> ( Inc.Dependency (HashMap RemoteSchemaName Inc.InvalidationKey)
|
||||||
, (Maybe InvalidationKey, AddRemoteSchemaQuery)
|
, [AddRemoteSchemaQuery]
|
||||||
|
) `arr` HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject)
|
||||||
|
buildRemoteSchemas =
|
||||||
|
buildInfoMapPreservingMetadata _arsqName mkRemoteSchemaMetadataObject buildRemoteSchema
|
||||||
|
where
|
||||||
|
-- We want to cache this call because it fetches the remote schema over HTTP, and we don’t
|
||||||
|
-- want to re-run that if the remote schema definition hasn’t changed.
|
||||||
|
buildRemoteSchema = Inc.cache proc (invalidationKeys, remoteSchema) -> do
|
||||||
|
Inc.dependOn -< Inc.selectKeyD (_arsqName remoteSchema) invalidationKeys
|
||||||
|
(| withRecordInconsistency (liftEitherA <<< bindA -<
|
||||||
|
runExceptT $ addRemoteSchemaP2Setup remoteSchema)
|
||||||
|
|) (mkRemoteSchemaMetadataObject remoteSchema)
|
||||||
|
|
||||||
|
-- Builds the GraphQL schema and merges in remote schemas. This function is kind of gross, as
|
||||||
|
-- it’s possible for the remote schema merging to fail, at which point we have to mark them
|
||||||
|
-- inconsistent. This means we have to accumulate the consistent remote schemas as we go, in
|
||||||
|
-- addition to the built GraphQL context.
|
||||||
|
buildGQLSchema
|
||||||
|
:: ( ArrowChoice arr, ArrowWriter (Seq InconsistentMetadata) arr, ArrowKleisli m arr
|
||||||
|
, MonadError QErr m )
|
||||||
|
=> ( TableCache
|
||||||
|
, FunctionCache
|
||||||
|
, HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject)
|
||||||
) `arr` (RemoteSchemaMap, GS.GCtxMap, GS.GCtx)
|
) `arr` (RemoteSchemaMap, GS.GCtxMap, GS.GCtx)
|
||||||
addRemoteSchema = proc ((remoteSchemas, gCtxMap, defGCtx), (_, remoteSchema)) -> do
|
buildGQLSchema = proc (tableCache, functionCache, remoteSchemas) -> do
|
||||||
let name = _arsqName remoteSchema
|
baseGQLSchema <- bindA -< GS.mkGCtxMap tableCache functionCache
|
||||||
(| onNothingA (returnA -< (remoteSchemas, gCtxMap, defGCtx)) |) <-<
|
(| foldlA' (\(remoteSchemaMap, gqlSchemas, remoteGQLSchemas)
|
||||||
(| withRecordInconsistency (case M.lookup name remoteSchemas of
|
(remoteSchemaName, (remoteSchema, metadataObject)) ->
|
||||||
Just _ -> throwA -< err400 AlreadyExists "duplicate definition for remote schema"
|
(| withRecordInconsistency (do
|
||||||
Nothing -> liftEitherA <<< bindA -< runExceptT do
|
let gqlSchema = convRemoteGCtx $ rscGCtx remoteSchema
|
||||||
rsCtx <- addRemoteSchemaP2Setup remoteSchema
|
mergedGQLSchemas <- bindErrorA -< mergeRemoteSchema gqlSchemas gqlSchema
|
||||||
let rGCtx = convRemoteGCtx $ rscGCtx rsCtx
|
mergedRemoteGQLSchemas <- bindErrorA -< mergeGCtx remoteGQLSchemas gqlSchema
|
||||||
mergedGCtxMap <- mergeRemoteSchema gCtxMap rGCtx
|
let mergedRemoteSchemaMap = M.insert remoteSchemaName remoteSchema remoteSchemaMap
|
||||||
mergedDefGCtx <- mergeGCtx defGCtx rGCtx
|
returnA -< (mergedRemoteSchemaMap, mergedGQLSchemas, mergedRemoteGQLSchemas))
|
||||||
pure (M.insert name rsCtx remoteSchemas, mergedGCtxMap, mergedDefGCtx))
|
|) metadataObject
|
||||||
|) (MetadataObject (MORemoteSchema name) (toJSON remoteSchema))
|
>-> (| onNothingA ((remoteSchemaMap, gqlSchemas, remoteGQLSchemas) >- returnA) |))
|
||||||
|
|) (M.empty, baseGQLSchema, GC.emptyGCtx) (M.toList remoteSchemas)
|
||||||
|
|
||||||
|
|
||||||
-- | @'withMetadataCheck' cascade action@ runs @action@ and checks if the schema changed as a
|
-- | @'withMetadataCheck' cascade action@ runs @action@ and checks if the schema changed as a
|
||||||
-- result. If it did, it checks to ensure the changes do not violate any integrity constraints, and
|
-- result. If it did, it checks to ensure the changes do not violate any integrity constraints, and
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
{-# LANGUAGE Arrows #-}
|
{-# LANGUAGE Arrows #-}
|
||||||
|
{-# LANGUAGE RecordWildCards #-}
|
||||||
|
|
||||||
-- | Types/functions shared between modules that implement "Hasura.RQL.DDL.Schema.Cache". Other
|
-- | Types/functions shared between modules that implement "Hasura.RQL.DDL.Schema.Cache". Other
|
||||||
-- modules should not import this module directly.
|
-- modules should not import this module directly.
|
||||||
@ -6,14 +7,13 @@ module Hasura.RQL.DDL.Schema.Cache.Common where
|
|||||||
|
|
||||||
import Hasura.Prelude
|
import Hasura.Prelude
|
||||||
|
|
||||||
|
import qualified Data.HashMap.Strict.Extended as M
|
||||||
import qualified Data.HashSet as HS
|
import qualified Data.HashSet as HS
|
||||||
import qualified Data.Sequence as Seq
|
import qualified Data.Sequence as Seq
|
||||||
|
|
||||||
import Control.Arrow.Extended
|
import Control.Arrow.Extended
|
||||||
import Control.Lens
|
import Control.Lens
|
||||||
import Control.Monad.Unique
|
|
||||||
|
|
||||||
import qualified Hasura.GraphQL.Context as GC
|
|
||||||
import qualified Hasura.Incremental as Inc
|
import qualified Hasura.Incremental as Inc
|
||||||
|
|
||||||
import Hasura.RQL.Types
|
import Hasura.RQL.Types
|
||||||
@ -22,18 +22,30 @@ import Hasura.RQL.Types.QueryCollection
|
|||||||
import Hasura.RQL.Types.Run
|
import Hasura.RQL.Types.Run
|
||||||
import Hasura.SQL.Types
|
import Hasura.SQL.Types
|
||||||
|
|
||||||
-- | A map used to explicitly invalidate part of the build cache, which is most useful for external
|
-- | 'InvalidationKeys' used to apply requested 'CacheInvalidations'.
|
||||||
-- resources (currently only remote schemas). The 'InvalidationKey' values it contains are used as
|
data InvalidationKeys = InvalidationKeys
|
||||||
-- inputs to build rules, so setting an entry to a fresh 'InvalidationKey' forces it to be
|
{ _ikMetadata :: !Inc.InvalidationKey
|
||||||
-- re-executed.
|
, _ikRemoteSchemas :: !(HashMap RemoteSchemaName Inc.InvalidationKey)
|
||||||
type InvalidationMap = HashMap RemoteSchemaName InvalidationKey
|
} deriving (Eq, Generic)
|
||||||
type InvalidationKey = Unique
|
instance Inc.Cacheable InvalidationKeys
|
||||||
|
instance Inc.Select InvalidationKeys
|
||||||
|
$(makeLenses ''InvalidationKeys)
|
||||||
|
|
||||||
|
initialInvalidationKeys :: InvalidationKeys
|
||||||
|
initialInvalidationKeys = InvalidationKeys Inc.initialInvalidationKey mempty
|
||||||
|
|
||||||
|
invalidateKeys :: CacheInvalidations -> InvalidationKeys -> InvalidationKeys
|
||||||
|
invalidateKeys CacheInvalidations{..} InvalidationKeys{..} = InvalidationKeys
|
||||||
|
{ _ikMetadata = if ciMetadata then Inc.invalidate _ikMetadata else _ikMetadata
|
||||||
|
, _ikRemoteSchemas = foldl' (flip invalidateRemoteSchema) _ikRemoteSchemas ciRemoteSchemas }
|
||||||
|
where
|
||||||
|
invalidateRemoteSchema = M.alter $ Just . maybe Inc.initialInvalidationKey Inc.invalidate
|
||||||
|
|
||||||
data BuildInputs
|
data BuildInputs
|
||||||
= BuildInputs
|
= BuildInputs
|
||||||
{ _biReason :: !BuildReason
|
{ _biReason :: !BuildReason
|
||||||
, _biCatalogMetadata :: !CatalogMetadata
|
, _biCatalogMetadata :: !CatalogMetadata
|
||||||
, _biInvalidationMap :: !InvalidationMap
|
, _biInvalidationMap :: !InvalidationKeys
|
||||||
} deriving (Eq)
|
} deriving (Eq)
|
||||||
|
|
||||||
-- | The direct output of 'buildSchemaCacheRule'. Contains most of the things necessary to build a
|
-- | The direct output of 'buildSchemaCacheRule'. Contains most of the things necessary to build a
|
||||||
@ -41,20 +53,21 @@ data BuildInputs
|
|||||||
-- 'MonadWriter' side channel.
|
-- 'MonadWriter' side channel.
|
||||||
data BuildOutputs
|
data BuildOutputs
|
||||||
= BuildOutputs
|
= BuildOutputs
|
||||||
{ _boTables :: !TableCache
|
{ _boTables :: !TableCache
|
||||||
, _boFunctions :: !FunctionCache
|
, _boFunctions :: !FunctionCache
|
||||||
, _boRemoteSchemas :: !RemoteSchemaMap
|
, _boRemoteSchemas :: !(HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject))
|
||||||
, _boAllowlist :: !(HS.HashSet GQLQuery)
|
-- ^ We preserve the 'MetadataObject' from the original catalog metadata in the output so we can
|
||||||
, _boGCtxMap :: !GC.GCtxMap
|
-- reuse it later if we need to mark the remote schema inconsistent during GraphQL schema
|
||||||
, _boDefaultRemoteGCtx :: !GC.GCtx
|
-- generation (because of field conflicts).
|
||||||
|
, _boAllowlist :: !(HS.HashSet GQLQuery)
|
||||||
} deriving (Show, Eq)
|
} deriving (Show, Eq)
|
||||||
$(makeLenses ''BuildOutputs)
|
$(makeLenses ''BuildOutputs)
|
||||||
|
|
||||||
data RebuildableSchemaCache m
|
data RebuildableSchemaCache m
|
||||||
= RebuildableSchemaCache
|
= RebuildableSchemaCache
|
||||||
{ lastBuiltSchemaCache :: !SchemaCache
|
{ lastBuiltSchemaCache :: !SchemaCache
|
||||||
, _rscInvalidationMap :: !InvalidationMap
|
, _rscInvalidationMap :: !InvalidationKeys
|
||||||
, _rscRebuild :: !(Inc.Rule (ReaderT BuildReason m) (CatalogMetadata, InvalidationMap) SchemaCache)
|
, _rscRebuild :: !(Inc.Rule (ReaderT BuildReason m) (CatalogMetadata, InvalidationKeys) SchemaCache)
|
||||||
}
|
}
|
||||||
$(makeLenses ''RebuildableSchemaCache)
|
$(makeLenses ''RebuildableSchemaCache)
|
||||||
|
|
||||||
@ -91,5 +104,38 @@ noDuplicates mkMetadataObject = proc values -> case values of
|
|||||||
returnA -< Nothing
|
returnA -< Nothing
|
||||||
{-# INLINABLE noDuplicates #-}
|
{-# INLINABLE noDuplicates #-}
|
||||||
|
|
||||||
|
-- | Processes a list of catalog metadata into a map of processed information, marking any duplicate
|
||||||
|
-- entries inconsistent.
|
||||||
|
buildInfoMap
|
||||||
|
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||||
|
, Eq k, Hashable k )
|
||||||
|
=> (a -> k)
|
||||||
|
-> (a -> MetadataObject)
|
||||||
|
-> (e, a) `arr` Maybe b
|
||||||
|
-> (e, [a]) `arr` HashMap k b
|
||||||
|
buildInfoMap extractKey mkMetadataObject buildInfo = proc (e, infos) ->
|
||||||
|
(M.groupOn extractKey infos >- returnA)
|
||||||
|
>-> (| Inc.keyed (\_ duplicateInfos ->
|
||||||
|
(duplicateInfos >- noDuplicates mkMetadataObject)
|
||||||
|
>-> (| traverseA (\info -> (e, info) >- buildInfo) |)
|
||||||
|
>-> (\info -> join info >- returnA)) |)
|
||||||
|
>-> (\infoMap -> M.catMaybes infoMap >- returnA)
|
||||||
|
{-# INLINABLE buildInfoMap #-}
|
||||||
|
|
||||||
|
-- | Like 'buildInfo', but includes each processed info’s associated 'MetadataObject' in the result.
|
||||||
|
-- This is useful if the results will be further processed, and the 'MetadataObject' is still needed
|
||||||
|
-- to mark the object inconsistent.
|
||||||
|
buildInfoMapPreservingMetadata
|
||||||
|
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||||
|
, Eq k, Hashable k )
|
||||||
|
=> (a -> k)
|
||||||
|
-> (a -> MetadataObject)
|
||||||
|
-> (e, a) `arr` Maybe b
|
||||||
|
-> (e, [a]) `arr` HashMap k (b, MetadataObject)
|
||||||
|
buildInfoMapPreservingMetadata extractKey mkMetadataObject buildInfo =
|
||||||
|
buildInfoMap extractKey mkMetadataObject proc (e, info) ->
|
||||||
|
((e, info) >- buildInfo) >-> \result -> result <&> (, mkMetadataObject info) >- returnA
|
||||||
|
{-# INLINABLE buildInfoMapPreservingMetadata #-}
|
||||||
|
|
||||||
addTableContext :: QualifiedTable -> Text -> Text
|
addTableContext :: QualifiedTable -> Text -> Text
|
||||||
addTableContext tableName e = "in table " <> tableName <<> ": " <> e
|
addTableContext tableName e = "in table " <> tableName <<> ": " <> e
|
||||||
|
@ -34,31 +34,19 @@ addNonColumnFields
|
|||||||
, [CatalogComputedField]
|
, [CatalogComputedField]
|
||||||
) `arr` FieldInfoMap FieldInfo
|
) `arr` FieldInfoMap FieldInfo
|
||||||
addNonColumnFields = proc (rawTableInfo, columns, relationships, computedFields) -> do
|
addNonColumnFields = proc (rawTableInfo, columns, relationships, computedFields) -> do
|
||||||
let foreignKeys = _tciForeignKeys <$> rawTableInfo
|
relationshipInfos
|
||||||
relationshipInfos <-
|
<- buildInfoMapPreservingMetadata _crRelName mkRelationshipMetadataObject buildRelationship
|
||||||
(| Inc.keyed (\_ relationshipsByName -> do
|
-< (_tciForeignKeys <$> rawTableInfo, relationships)
|
||||||
maybeRelationship <- noDuplicates mkRelationshipMetadataObject -< relationshipsByName
|
computedFieldInfos
|
||||||
(\info -> join info >- returnA) <-<
|
<- buildInfoMapPreservingMetadata
|
||||||
(| traverseA (\relationship -> do
|
(_afcName . _cccComputedField)
|
||||||
info <- buildRelationship -< (foreignKeys, relationship)
|
mkComputedFieldMetadataObject
|
||||||
returnA -< info <&> (, mkRelationshipMetadataObject relationship))
|
buildComputedField
|
||||||
|) maybeRelationship)
|
-< (HS.fromList $ M.keys rawTableInfo, computedFields)
|
||||||
|) (M.groupOn _crRelName relationships)
|
|
||||||
|
|
||||||
let trackedTableNames = HS.fromList $ M.keys rawTableInfo
|
|
||||||
computedFieldInfos <-
|
|
||||||
(| Inc.keyed (\_ computedFieldsByName -> do
|
|
||||||
maybeComputedField <- noDuplicates mkComputedFieldMetadataObject -< computedFieldsByName
|
|
||||||
(\info -> join info >- returnA) <-<
|
|
||||||
(| traverseA (\computedField -> do
|
|
||||||
info <- buildComputedField -< (trackedTableNames, computedField)
|
|
||||||
returnA -< info <&> (, mkComputedFieldMetadataObject computedField))
|
|
||||||
|) maybeComputedField)
|
|
||||||
|) (M.groupOn (_afcName . _cccComputedField) computedFields)
|
|
||||||
|
|
||||||
let mapKey f = M.fromList . map (first f) . M.toList
|
let mapKey f = M.fromList . map (first f) . M.toList
|
||||||
relationshipFields = mapKey fromRel $ M.catMaybes relationshipInfos
|
relationshipFields = mapKey fromRel relationshipInfos
|
||||||
computedFieldFields = mapKey fromComputedField $ M.catMaybes computedFieldInfos
|
computedFieldFields = mapKey fromComputedField computedFieldInfos
|
||||||
|
|
||||||
-- First, check for conflicts between non-column fields, since we can raise a better error
|
-- First, check for conflicts between non-column fields, since we can raise a better error
|
||||||
-- message in terms of the two metadata objects that define them.
|
-- message in terms of the two metadata objects that define them.
|
||||||
|
@ -292,10 +292,14 @@ buildTableCache
|
|||||||
:: forall arr m
|
:: forall arr m
|
||||||
. ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
. ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||||
, Inc.ArrowCache m arr, MonadTx m )
|
, Inc.ArrowCache m arr, MonadTx m )
|
||||||
=> [CatalogTable] `arr` M.HashMap QualifiedTable TableRawInfo
|
=> ( [CatalogTable]
|
||||||
buildTableCache = Inc.cache proc catalogTables -> do
|
, Inc.Dependency Inc.InvalidationKey
|
||||||
|
) `arr` M.HashMap QualifiedTable TableRawInfo
|
||||||
|
buildTableCache = Inc.cache proc (catalogTables, reloadMetadataInvalidationKey) -> do
|
||||||
rawTableInfos <-
|
rawTableInfos <-
|
||||||
(| Inc.keyed (| withTable (\tables -> buildRawTableInfo <<< noDuplicateTables -< tables) |)
|
(| Inc.keyed (| withTable (\tables
|
||||||
|
-> (tables, reloadMetadataInvalidationKey)
|
||||||
|
>- first noDuplicateTables >>> buildRawTableInfo) |)
|
||||||
|) (M.groupOnNE _ctName catalogTables)
|
|) (M.groupOnNE _ctName catalogTables)
|
||||||
let rawTableCache = M.catMaybes rawTableInfos
|
let rawTableCache = M.catMaybes rawTableInfos
|
||||||
enumTables = flip M.mapMaybe rawTableCache \rawTableInfo ->
|
enumTables = flip M.mapMaybe rawTableCache \rawTableInfo ->
|
||||||
@ -314,8 +318,13 @@ buildTableCache = Inc.cache proc catalogTables -> do
|
|||||||
_ -> throwA -< err400 AlreadyExists "duplication definition for table"
|
_ -> throwA -< err400 AlreadyExists "duplication definition for table"
|
||||||
|
|
||||||
-- Step 1: Build the raw table cache from metadata information.
|
-- Step 1: Build the raw table cache from metadata information.
|
||||||
buildRawTableInfo :: ErrorA QErr arr CatalogTable (TableCoreInfoG PGRawColumnInfo PGCol)
|
buildRawTableInfo
|
||||||
buildRawTableInfo = Inc.cache proc (CatalogTable name systemDefined isEnum config maybeInfo) -> do
|
:: ErrorA QErr arr
|
||||||
|
( CatalogTable
|
||||||
|
, Inc.Dependency Inc.InvalidationKey
|
||||||
|
) (TableCoreInfoG PGRawColumnInfo PGCol)
|
||||||
|
buildRawTableInfo = Inc.cache proc (catalogTable, reloadMetadataInvalidationKey) -> do
|
||||||
|
let CatalogTable name systemDefined isEnum config maybeInfo = catalogTable
|
||||||
catalogInfo <-
|
catalogInfo <-
|
||||||
(| onNothingA (throwA -<
|
(| onNothingA (throwA -<
|
||||||
err400 NotExists $ "no such table/view exists in postgres: " <>> name)
|
err400 NotExists $ "no such table/view exists in postgres: " <>> name)
|
||||||
@ -326,7 +335,11 @@ buildTableCache = Inc.cache proc catalogTables -> do
|
|||||||
primaryKey = _ctiPrimaryKey catalogInfo
|
primaryKey = _ctiPrimaryKey catalogInfo
|
||||||
rawPrimaryKey <- liftEitherA -< traverse (resolvePrimaryKeyColumns columnMap) primaryKey
|
rawPrimaryKey <- liftEitherA -< traverse (resolvePrimaryKeyColumns columnMap) primaryKey
|
||||||
enumValues <- if isEnum
|
enumValues <- if isEnum
|
||||||
then bindErrorA -< Just <$> fetchAndValidateEnumValues name rawPrimaryKey columns
|
then do
|
||||||
|
-- We want to make sure we reload enum values whenever someone explicitly calls
|
||||||
|
-- `reload_metadata`.
|
||||||
|
Inc.dependOn -< reloadMetadataInvalidationKey
|
||||||
|
bindErrorA -< Just <$> fetchAndValidateEnumValues name rawPrimaryKey columns
|
||||||
else returnA -< Nothing
|
else returnA -< Nothing
|
||||||
|
|
||||||
returnA -< TableCoreInfo
|
returnA -< TableCoreInfo
|
||||||
|
@ -22,7 +22,7 @@ type UrlFromEnv = Text
|
|||||||
newtype RemoteSchemaName
|
newtype RemoteSchemaName
|
||||||
= RemoteSchemaName
|
= RemoteSchemaName
|
||||||
{ unRemoteSchemaName :: NonEmptyText }
|
{ unRemoteSchemaName :: NonEmptyText }
|
||||||
deriving ( Show, Eq, Lift, Hashable, J.ToJSON, J.ToJSONKey
|
deriving ( Show, Eq, Ord, Lift, Hashable, J.ToJSON, J.ToJSONKey
|
||||||
, J.FromJSON, Q.ToPrepArg, Q.FromCol, DQuote, NFData
|
, J.FromJSON, Q.ToPrepArg, Q.FromCol, DQuote, NFData
|
||||||
, Generic, Cacheable, Arbitrary
|
, Generic, Cacheable, Arbitrary
|
||||||
)
|
)
|
||||||
|
@ -13,6 +13,7 @@ module Hasura.RQL.Types.SchemaCache.Build
|
|||||||
|
|
||||||
, CacheRWM(..)
|
, CacheRWM(..)
|
||||||
, BuildReason(..)
|
, BuildReason(..)
|
||||||
|
, CacheInvalidations(..)
|
||||||
, buildSchemaCache
|
, buildSchemaCache
|
||||||
, buildSchemaCacheFor
|
, buildSchemaCacheFor
|
||||||
, buildSchemaCacheStrict
|
, buildSchemaCacheStrict
|
||||||
@ -26,7 +27,10 @@ import qualified Data.Sequence as Seq
|
|||||||
import qualified Data.Text as T
|
import qualified Data.Text as T
|
||||||
|
|
||||||
import Control.Arrow.Extended
|
import Control.Arrow.Extended
|
||||||
|
import Control.Lens
|
||||||
import Data.Aeson (toJSON)
|
import Data.Aeson (toJSON)
|
||||||
|
import Data.Aeson.Casing
|
||||||
|
import Data.Aeson.TH
|
||||||
import Data.List (nub)
|
import Data.List (nub)
|
||||||
|
|
||||||
import Hasura.RQL.Types.Error
|
import Hasura.RQL.Types.Error
|
||||||
@ -44,6 +48,14 @@ data CollectedInfo
|
|||||||
!SchemaObjId
|
!SchemaObjId
|
||||||
!SchemaDependency
|
!SchemaDependency
|
||||||
deriving (Show, Eq)
|
deriving (Show, Eq)
|
||||||
|
$(makePrisms ''CollectedInfo)
|
||||||
|
|
||||||
|
class AsInconsistentMetadata s where
|
||||||
|
_InconsistentMetadata :: Prism' s InconsistentMetadata
|
||||||
|
instance AsInconsistentMetadata InconsistentMetadata where
|
||||||
|
_InconsistentMetadata = id
|
||||||
|
instance AsInconsistentMetadata CollectedInfo where
|
||||||
|
_InconsistentMetadata = _CIInconsistency
|
||||||
|
|
||||||
partitionCollectedInfo
|
partitionCollectedInfo
|
||||||
:: Seq CollectedInfo
|
:: Seq CollectedInfo
|
||||||
@ -55,12 +67,14 @@ partitionCollectedInfo =
|
|||||||
let dependency = (metadataObject, objectId, schemaDependency)
|
let dependency = (metadataObject, objectId, schemaDependency)
|
||||||
in (inconsistencies, dependency:dependencies)
|
in (inconsistencies, dependency:dependencies)
|
||||||
|
|
||||||
recordInconsistency :: (ArrowWriter (Seq CollectedInfo) arr) => (MetadataObject, Text) `arr` ()
|
recordInconsistency
|
||||||
|
:: (ArrowWriter (Seq w) arr, AsInconsistentMetadata w) => (MetadataObject, Text) `arr` ()
|
||||||
recordInconsistency = first (arr (:[])) >>> recordInconsistencies
|
recordInconsistency = first (arr (:[])) >>> recordInconsistencies
|
||||||
|
|
||||||
recordInconsistencies :: (ArrowWriter (Seq CollectedInfo) arr) => ([MetadataObject], Text) `arr` ()
|
recordInconsistencies
|
||||||
|
:: (ArrowWriter (Seq w) arr, AsInconsistentMetadata w) => ([MetadataObject], Text) `arr` ()
|
||||||
recordInconsistencies = proc (metadataObjects, reason) ->
|
recordInconsistencies = proc (metadataObjects, reason) ->
|
||||||
tellA -< Seq.fromList $ map (CIInconsistency . InconsistentObject reason) metadataObjects
|
tellA -< Seq.fromList $ map (review _InconsistentMetadata . InconsistentObject reason) metadataObjects
|
||||||
|
|
||||||
recordDependencies
|
recordDependencies
|
||||||
:: (ArrowWriter (Seq CollectedInfo) arr)
|
:: (ArrowWriter (Seq CollectedInfo) arr)
|
||||||
@ -69,7 +83,7 @@ recordDependencies = proc (metadataObject, schemaObjectId, dependencies) ->
|
|||||||
tellA -< Seq.fromList $ map (CIDependency metadataObject schemaObjectId) dependencies
|
tellA -< Seq.fromList $ map (CIDependency metadataObject schemaObjectId) dependencies
|
||||||
|
|
||||||
withRecordInconsistency
|
withRecordInconsistency
|
||||||
:: (ArrowChoice arr, ArrowWriter (Seq CollectedInfo) arr)
|
:: (ArrowChoice arr, ArrowWriter (Seq w) arr, AsInconsistentMetadata w)
|
||||||
=> ErrorA QErr arr (e, s) a
|
=> ErrorA QErr arr (e, s) a
|
||||||
-> arr (e, (MetadataObject, s)) (Maybe a)
|
-> arr (e, (MetadataObject, s)) (Maybe a)
|
||||||
withRecordInconsistency f = proc (e, (metadataObject, s)) -> do
|
withRecordInconsistency f = proc (e, (metadataObject, s)) -> do
|
||||||
@ -85,8 +99,7 @@ withRecordInconsistency f = proc (e, (metadataObject, s)) -> do
|
|||||||
-- operations for triggering a schema cache rebuild
|
-- operations for triggering a schema cache rebuild
|
||||||
|
|
||||||
class (CacheRM m) => CacheRWM m where
|
class (CacheRM m) => CacheRWM m where
|
||||||
buildSchemaCacheWithOptions :: BuildReason -> m ()
|
buildSchemaCacheWithOptions :: BuildReason -> CacheInvalidations -> m ()
|
||||||
invalidateCachedRemoteSchema :: RemoteSchemaName -> m ()
|
|
||||||
|
|
||||||
data BuildReason
|
data BuildReason
|
||||||
-- | The build was triggered by an update this instance made to the catalog (in the
|
-- | The build was triggered by an update this instance made to the catalog (in the
|
||||||
@ -99,12 +112,26 @@ data BuildReason
|
|||||||
| CatalogSync
|
| CatalogSync
|
||||||
deriving (Show, Eq)
|
deriving (Show, Eq)
|
||||||
|
|
||||||
|
data CacheInvalidations = CacheInvalidations
|
||||||
|
{ ciMetadata :: !Bool
|
||||||
|
-- ^ Force reloading of all database information, including information not technically stored in
|
||||||
|
-- metadata (currently just enum values). Set by the @reload_metadata@ API.
|
||||||
|
, ciRemoteSchemas :: !(HashSet RemoteSchemaName)
|
||||||
|
-- ^ Force refetching of the given remote schemas, even if their definition has not changed. Set
|
||||||
|
-- by the @reload_remote_schema@ API.
|
||||||
|
}
|
||||||
|
$(deriveJSON (aesonDrop 2 snakeCase) ''CacheInvalidations)
|
||||||
|
|
||||||
|
instance Semigroup CacheInvalidations where
|
||||||
|
CacheInvalidations a1 b1 <> CacheInvalidations a2 b2 = CacheInvalidations (a1 || a2) (b1 <> b2)
|
||||||
|
instance Monoid CacheInvalidations where
|
||||||
|
mempty = CacheInvalidations False mempty
|
||||||
|
|
||||||
instance (CacheRWM m) => CacheRWM (ReaderT r m) where
|
instance (CacheRWM m) => CacheRWM (ReaderT r m) where
|
||||||
buildSchemaCacheWithOptions = lift . buildSchemaCacheWithOptions
|
buildSchemaCacheWithOptions a b = lift $ buildSchemaCacheWithOptions a b
|
||||||
invalidateCachedRemoteSchema = lift . invalidateCachedRemoteSchema
|
|
||||||
|
|
||||||
buildSchemaCache :: (CacheRWM m) => m ()
|
buildSchemaCache :: (CacheRWM m) => m ()
|
||||||
buildSchemaCache = buildSchemaCacheWithOptions CatalogUpdate
|
buildSchemaCache = buildSchemaCacheWithOptions CatalogUpdate mempty
|
||||||
|
|
||||||
-- | Rebuilds the schema cache. If an object with the given object id became newly inconsistent,
|
-- | Rebuilds the schema cache. If an object with the given object id became newly inconsistent,
|
||||||
-- raises an error about it specifically. Otherwise, raises a generic metadata inconsistency error.
|
-- raises an error about it specifically. Otherwise, raises a generic metadata inconsistency error.
|
||||||
|
@ -5,6 +5,7 @@ module Hasura.Server.App where
|
|||||||
|
|
||||||
import Control.Concurrent.MVar
|
import Control.Concurrent.MVar
|
||||||
import Control.Exception (IOException, try)
|
import Control.Exception (IOException, try)
|
||||||
|
import Control.Lens (view, _2)
|
||||||
import Control.Monad.Stateless
|
import Control.Monad.Stateless
|
||||||
import Data.Aeson hiding (json)
|
import Data.Aeson hiding (json)
|
||||||
import Data.Either (isRight)
|
import Data.Either (isRight)
|
||||||
@ -457,9 +458,9 @@ mkWaiApp isoLevel logger sqlGenCtx enableAL pool ci httpManager mode corsCfg ena
|
|||||||
(cacheRef, cacheBuiltTime) <- do
|
(cacheRef, cacheBuiltTime) <- do
|
||||||
pgResp <- runExceptT $ peelRun runCtx pgExecCtxSer Q.ReadWrite $
|
pgResp <- runExceptT $ peelRun runCtx pgExecCtxSer Q.ReadWrite $
|
||||||
(,) <$> buildRebuildableSchemaCache <*> liftTx fetchLastUpdate
|
(,) <$> buildRebuildableSchemaCache <*> liftTx fetchLastUpdate
|
||||||
(schemaCache, time) <- liftIO $ either initErrExit return pgResp
|
(schemaCache, event) <- liftIO $ either initErrExit return pgResp
|
||||||
scRef <- liftIO $ newIORef (schemaCache, initSchemaCacheVer)
|
scRef <- liftIO $ newIORef (schemaCache, initSchemaCacheVer)
|
||||||
return (scRef, snd <$> time)
|
return (scRef, view _2 <$> event)
|
||||||
|
|
||||||
cacheLock <- liftIO $ newMVar ()
|
cacheLock <- liftIO $ newMVar ()
|
||||||
planCache <- liftIO $ E.initPlanCache planCacheOptions
|
planCache <- liftIO $ E.initPlanCache planCacheOptions
|
||||||
|
@ -16,9 +16,6 @@ module Hasura.Server.Migrate
|
|||||||
, dropCatalog
|
, dropCatalog
|
||||||
) where
|
) where
|
||||||
|
|
||||||
import Control.Monad.Unique
|
|
||||||
import Data.Time.Clock (UTCTime)
|
|
||||||
|
|
||||||
import Hasura.Prelude
|
import Hasura.Prelude
|
||||||
|
|
||||||
import qualified Data.Aeson as A
|
import qualified Data.Aeson as A
|
||||||
@ -29,6 +26,10 @@ import qualified Database.PG.Query.Connection as Q
|
|||||||
import qualified Language.Haskell.TH.Lib as TH
|
import qualified Language.Haskell.TH.Lib as TH
|
||||||
import qualified Language.Haskell.TH.Syntax as TH
|
import qualified Language.Haskell.TH.Syntax as TH
|
||||||
|
|
||||||
|
import Control.Lens (view, _2)
|
||||||
|
import Control.Monad.Unique
|
||||||
|
import Data.Time.Clock (UTCTime)
|
||||||
|
|
||||||
import Hasura.Logging (Hasura, LogLevel (..), ToEngineLog (..))
|
import Hasura.Logging (Hasura, LogLevel (..), ToEngineLog (..))
|
||||||
import Hasura.RQL.DDL.Relationship
|
import Hasura.RQL.DDL.Relationship
|
||||||
import Hasura.RQL.DDL.Schema
|
import Hasura.RQL.DDL.Schema
|
||||||
@ -163,7 +164,7 @@ migrateCatalog migrationTime = do
|
|||||||
buildCacheAndRecreateSystemMetadata :: m (RebuildableSchemaCache m)
|
buildCacheAndRecreateSystemMetadata :: m (RebuildableSchemaCache m)
|
||||||
buildCacheAndRecreateSystemMetadata = do
|
buildCacheAndRecreateSystemMetadata = do
|
||||||
schemaCache <- buildRebuildableSchemaCache
|
schemaCache <- buildRebuildableSchemaCache
|
||||||
snd <$> runCacheRWT schemaCache recreateSystemMetadata
|
view _2 <$> runCacheRWT schemaCache recreateSystemMetadata
|
||||||
|
|
||||||
-- the old 0.8 catalog version is non-integral, so we store it in the database as a string
|
-- the old 0.8 catalog version is non-integral, so we store it in the database as a string
|
||||||
getCatalogVersion = liftTx $ runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler
|
getCatalogVersion = liftTx $ runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler
|
||||||
|
@ -12,7 +12,7 @@ import Hasura.Prelude
|
|||||||
import qualified Data.Text as T
|
import qualified Data.Text as T
|
||||||
|
|
||||||
latestCatalogVersion :: Integer
|
latestCatalogVersion :: Integer
|
||||||
latestCatalogVersion = 30
|
latestCatalogVersion = 31
|
||||||
|
|
||||||
latestCatalogVersionString :: T.Text
|
latestCatalogVersionString :: T.Text
|
||||||
latestCatalogVersionString = T.pack $ show latestCatalogVersion
|
latestCatalogVersionString = T.pack $ show latestCatalogVersion
|
||||||
|
@ -150,23 +150,21 @@ $(deriveJSON
|
|||||||
''RQLQueryV2
|
''RQLQueryV2
|
||||||
)
|
)
|
||||||
|
|
||||||
fetchLastUpdate :: Q.TxE QErr (Maybe (InstanceId, UTCTime))
|
fetchLastUpdate :: Q.TxE QErr (Maybe (InstanceId, UTCTime, CacheInvalidations))
|
||||||
fetchLastUpdate = do
|
fetchLastUpdate = over (_Just._3) Q.getAltJ <$> Q.withQE defaultTxErrorHandler [Q.sql|
|
||||||
Q.withQE defaultTxErrorHandler
|
SELECT instance_id::text, occurred_at, invalidations
|
||||||
[Q.sql|
|
FROM hdb_catalog.hdb_schema_update_event
|
||||||
SELECT instance_id::text, occurred_at
|
ORDER BY occurred_at DESC LIMIT 1
|
||||||
FROM hdb_catalog.hdb_schema_update_event
|
|] () True
|
||||||
ORDER BY occurred_at DESC LIMIT 1
|
|
||||||
|] () True
|
|
||||||
|
|
||||||
recordSchemaUpdate :: InstanceId -> Q.TxE QErr ()
|
recordSchemaUpdate :: InstanceId -> CacheInvalidations -> Q.TxE QErr ()
|
||||||
recordSchemaUpdate instanceId =
|
recordSchemaUpdate instanceId invalidations =
|
||||||
liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
|
liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||||
INSERT INTO hdb_catalog.hdb_schema_update_event
|
INSERT INTO hdb_catalog.hdb_schema_update_event
|
||||||
(instance_id, occurred_at) VALUES ($1::uuid, DEFAULT)
|
(instance_id, occurred_at, invalidations) VALUES ($1::uuid, DEFAULT, $2::json)
|
||||||
ON CONFLICT ((occurred_at IS NOT NULL))
|
ON CONFLICT ((occurred_at IS NOT NULL))
|
||||||
DO UPDATE SET instance_id = $1::uuid, occurred_at = DEFAULT
|
DO UPDATE SET instance_id = $1::uuid, occurred_at = DEFAULT, invalidations = $2::json
|
||||||
|] (Identity instanceId) True
|
|] (instanceId, Q.AltJ invalidations) True
|
||||||
|
|
||||||
runQuery
|
runQuery
|
||||||
:: (HasVersion, MonadIO m, MonadError QErr m)
|
:: (HasVersion, MonadIO m, MonadError QErr m)
|
||||||
@ -184,12 +182,12 @@ runQuery pgExecCtx instanceId userInfo sc hMgr sqlGenCtx systemDefined query = d
|
|||||||
either throwError withReload resE
|
either throwError withReload resE
|
||||||
where
|
where
|
||||||
runCtx = RunCtx userInfo hMgr sqlGenCtx
|
runCtx = RunCtx userInfo hMgr sqlGenCtx
|
||||||
withReload r = do
|
withReload (result, updatedCache, invalidations) = do
|
||||||
when (queryModifiesSchemaCache query) $ do
|
when (queryModifiesSchemaCache query) $ do
|
||||||
e <- liftIO $ runExceptT $ runLazyTx pgExecCtx Q.ReadWrite
|
e <- liftIO $ runExceptT $ runLazyTx pgExecCtx Q.ReadWrite $ liftTx $
|
||||||
$ liftTx $ recordSchemaUpdate instanceId
|
recordSchemaUpdate instanceId invalidations
|
||||||
liftEither e
|
liftEither e
|
||||||
return r
|
return (result, updatedCache)
|
||||||
|
|
||||||
-- | A predicate that determines whether the given query might modify/rebuild the schema cache. If
|
-- | A predicate that determines whether the given query might modify/rebuild the schema cache. If
|
||||||
-- so, it needs to acquire the global lock on the schema cache so that other queries do not modify
|
-- so, it needs to acquire the global lock on the schema cache so that other queries do not modify
|
||||||
|
@ -58,9 +58,10 @@ instance ToEngineLog SchemaSyncThreadLog Hasura where
|
|||||||
|
|
||||||
data EventPayload
|
data EventPayload
|
||||||
= EventPayload
|
= EventPayload
|
||||||
{ _epInstanceId :: !InstanceId
|
{ _epInstanceId :: !InstanceId
|
||||||
, _epOccurredAt :: !UTC.UTCTime
|
, _epOccurredAt :: !UTC.UTCTime
|
||||||
} deriving (Show, Eq)
|
, _epInvalidations :: !CacheInvalidations
|
||||||
|
}
|
||||||
$(deriveJSON (aesonDrop 3 snakeCase) ''EventPayload)
|
$(deriveJSON (aesonDrop 3 snakeCase) ''EventPayload)
|
||||||
|
|
||||||
data ThreadError
|
data ThreadError
|
||||||
@ -136,9 +137,9 @@ listener sqlGenCtx pool logger httpMgr updateEventRef
|
|||||||
Just time -> (dbInstId /= instanceId) && accrdAt > time
|
Just time -> (dbInstId /= instanceId) && accrdAt > time
|
||||||
|
|
||||||
refreshCache Nothing = return ()
|
refreshCache Nothing = return ()
|
||||||
refreshCache (Just (dbInstId, accrdAt)) =
|
refreshCache (Just (dbInstId, accrdAt, invalidations)) =
|
||||||
when (shouldRefresh dbInstId accrdAt) $
|
when (shouldRefresh dbInstId accrdAt) $
|
||||||
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef
|
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef invalidations
|
||||||
threadType "schema cache reloaded after postgres listen init"
|
threadType "schema cache reloaded after postgres listen init"
|
||||||
|
|
||||||
notifyHandler = \case
|
notifyHandler = \case
|
||||||
@ -179,7 +180,7 @@ processor sqlGenCtx pool logger httpMgr updateEventRef
|
|||||||
event <- STM.atomically getLatestEvent
|
event <- STM.atomically getLatestEvent
|
||||||
logInfo logger threadType $ object ["processed_event" .= event]
|
logInfo logger threadType $ object ["processed_event" .= event]
|
||||||
when (shouldReload event) $
|
when (shouldReload event) $
|
||||||
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef
|
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef (_epInvalidations event)
|
||||||
threadType "schema cache reloaded"
|
threadType "schema cache reloaded"
|
||||||
where
|
where
|
||||||
-- checks if there is an event
|
-- checks if there is an event
|
||||||
@ -202,15 +203,17 @@ refreshSchemaCache
|
|||||||
-> Logger Hasura
|
-> Logger Hasura
|
||||||
-> HTTP.Manager
|
-> HTTP.Manager
|
||||||
-> SchemaCacheRef
|
-> SchemaCacheRef
|
||||||
|
-> CacheInvalidations
|
||||||
-> ThreadType
|
-> ThreadType
|
||||||
-> T.Text -> IO ()
|
-> T.Text -> IO ()
|
||||||
refreshSchemaCache sqlGenCtx pool logger httpManager cacheRef threadType msg = do
|
refreshSchemaCache sqlGenCtx pool logger httpManager cacheRef invalidations threadType msg = do
|
||||||
-- Reload schema cache from catalog
|
-- Reload schema cache from catalog
|
||||||
resE <- liftIO $ runExceptT $ withSCUpdate cacheRef logger do
|
resE <- liftIO $ runExceptT $ withSCUpdate cacheRef logger do
|
||||||
rebuildableCache <- fst <$> liftIO (readIORef $ _scrCache cacheRef)
|
rebuildableCache <- fst <$> liftIO (readIORef $ _scrCache cacheRef)
|
||||||
buildSchemaCacheWithOptions CatalogSync
|
((), cache, _) <- buildSchemaCacheWithOptions CatalogSync invalidations
|
||||||
& runCacheRWT rebuildableCache
|
& runCacheRWT rebuildableCache
|
||||||
& peelRun runCtx pgCtx PG.ReadWrite
|
& peelRun runCtx pgCtx PG.ReadWrite
|
||||||
|
pure ((), cache)
|
||||||
case resE of
|
case resE of
|
||||||
Left e -> logError logger threadType $ TEQueryError e
|
Left e -> logError logger threadType $ TEQueryError e
|
||||||
Right () -> logInfo logger threadType $ object ["message" .= msg]
|
Right () -> logInfo logger threadType $ object ["message" .= msg]
|
||||||
|
@ -411,7 +411,8 @@ CREATE TABLE hdb_catalog.remote_schemas (
|
|||||||
|
|
||||||
CREATE TABLE hdb_catalog.hdb_schema_update_event (
|
CREATE TABLE hdb_catalog.hdb_schema_update_event (
|
||||||
instance_id uuid NOT NULL,
|
instance_id uuid NOT NULL,
|
||||||
occurred_at timestamptz NOT NULL DEFAULT NOW()
|
occurred_at timestamptz NOT NULL DEFAULT NOW(),
|
||||||
|
invalidations json NOT NULL
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE UNIQUE INDEX hdb_schema_update_event_one_row
|
CREATE UNIQUE INDEX hdb_schema_update_event_one_row
|
||||||
@ -422,13 +423,16 @@ $function$
|
|||||||
DECLARE
|
DECLARE
|
||||||
instance_id uuid;
|
instance_id uuid;
|
||||||
occurred_at timestamptz;
|
occurred_at timestamptz;
|
||||||
|
invalidations json;
|
||||||
curr_rec record;
|
curr_rec record;
|
||||||
BEGIN
|
BEGIN
|
||||||
instance_id = NEW.instance_id;
|
instance_id = NEW.instance_id;
|
||||||
occurred_at = NEW.occurred_at;
|
occurred_at = NEW.occurred_at;
|
||||||
|
invalidations = NEW.invalidations;
|
||||||
PERFORM pg_notify('hasura_schema_update', json_build_object(
|
PERFORM pg_notify('hasura_schema_update', json_build_object(
|
||||||
'instance_id', instance_id,
|
'instance_id', instance_id,
|
||||||
'occurred_at', occurred_at
|
'occurred_at', occurred_at,
|
||||||
|
'invalidations', invalidations
|
||||||
)::text);
|
)::text);
|
||||||
RETURN curr_rec;
|
RETURN curr_rec;
|
||||||
END;
|
END;
|
||||||
@ -657,9 +661,9 @@ CREATE VIEW hdb_catalog.hdb_computed_field_function AS
|
|||||||
FROM hdb_catalog.hdb_computed_field
|
FROM hdb_catalog.hdb_computed_field
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION hdb_catalog.check_violation(msg text) RETURNS bool AS
|
CREATE OR REPLACE FUNCTION hdb_catalog.check_violation(msg text) RETURNS bool AS
|
||||||
$$
|
$$
|
||||||
BEGIN
|
BEGIN
|
||||||
RAISE check_violation USING message=msg;
|
RAISE check_violation USING message=msg;
|
||||||
END;
|
END;
|
||||||
$$ LANGUAGE plpgsql;
|
$$ LANGUAGE plpgsql;
|
||||||
|
22
server/src-rsr/migrations/30_to_31.sql
Normal file
22
server/src-rsr/migrations/30_to_31.sql
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
TRUNCATE hdb_catalog.hdb_schema_update_event;
|
||||||
|
ALTER TABLE hdb_catalog.hdb_schema_update_event ADD COLUMN invalidations json NOT NULL;
|
||||||
|
CREATE OR REPLACE FUNCTION hdb_catalog.hdb_schema_update_event_notifier() RETURNS trigger AS
|
||||||
|
$function$
|
||||||
|
DECLARE
|
||||||
|
instance_id uuid;
|
||||||
|
occurred_at timestamptz;
|
||||||
|
invalidations json;
|
||||||
|
curr_rec record;
|
||||||
|
BEGIN
|
||||||
|
instance_id = NEW.instance_id;
|
||||||
|
occurred_at = NEW.occurred_at;
|
||||||
|
invalidations = NEW.invalidations;
|
||||||
|
PERFORM pg_notify('hasura_schema_update', json_build_object(
|
||||||
|
'instance_id', instance_id,
|
||||||
|
'occurred_at', occurred_at,
|
||||||
|
'invalidations', invalidations
|
||||||
|
)::text);
|
||||||
|
RETURN curr_rec;
|
||||||
|
END;
|
||||||
|
$function$
|
||||||
|
LANGUAGE plpgsql;
|
19
server/src-rsr/migrations/31_to_30.sql
Normal file
19
server/src-rsr/migrations/31_to_30.sql
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
TRUNCATE hdb_catalog.hdb_schema_update_event;
|
||||||
|
CREATE OR REPLACE FUNCTION hdb_catalog.hdb_schema_update_event_notifier() RETURNS trigger AS
|
||||||
|
$function$
|
||||||
|
DECLARE
|
||||||
|
instance_id uuid;
|
||||||
|
occurred_at timestamptz;
|
||||||
|
curr_rec record;
|
||||||
|
BEGIN
|
||||||
|
instance_id = NEW.instance_id;
|
||||||
|
occurred_at = NEW.occurred_at;
|
||||||
|
PERFORM pg_notify('hasura_schema_update', json_build_object(
|
||||||
|
'instance_id', instance_id,
|
||||||
|
'occurred_at', occurred_at
|
||||||
|
)::text);
|
||||||
|
RETURN curr_rec;
|
||||||
|
END;
|
||||||
|
$function$
|
||||||
|
LANGUAGE plpgsql;
|
||||||
|
ALTER TABLE hdb_catalog.hdb_schema_update_event DROP COLUMN invalidations;
|
@ -36,11 +36,10 @@ instance (MonadBase IO m) => TableCoreInfoRM (CacheRefT m)
|
|||||||
instance (MonadBase IO m) => CacheRM (CacheRefT m) where
|
instance (MonadBase IO m) => CacheRM (CacheRefT m) where
|
||||||
askSchemaCache = CacheRefT (fmap lastBuiltSchemaCache . readMVar)
|
askSchemaCache = CacheRefT (fmap lastBuiltSchemaCache . readMVar)
|
||||||
|
|
||||||
instance (MonadIO m, MonadBaseControl IO m, MonadTx m, MonadUnique m) => CacheRWM (CacheRefT m) where
|
instance (MonadIO m, MonadBaseControl IO m, MonadTx m) => CacheRWM (CacheRefT m) where
|
||||||
buildSchemaCacheWithOptions options = CacheRefT $ flip modifyMVar \schemaCache ->
|
buildSchemaCacheWithOptions reason invalidations = CacheRefT $ flip modifyMVar \schemaCache -> do
|
||||||
swap <$> runCacheRWT schemaCache (buildSchemaCacheWithOptions options)
|
((), cache, _) <- runCacheRWT schemaCache (buildSchemaCacheWithOptions reason invalidations)
|
||||||
invalidateCachedRemoteSchema name = CacheRefT $ flip modifyMVar \schemaCache ->
|
pure (cache, ())
|
||||||
swap <$> runCacheRWT schemaCache (invalidateCachedRemoteSchema name)
|
|
||||||
|
|
||||||
instance Example (CacheRefT m ()) where
|
instance Example (CacheRefT m ()) where
|
||||||
type Arg (CacheRefT m ()) = CacheRefT m :~> IO
|
type Arg (CacheRefT m ()) = CacheRefT m :~> IO
|
||||||
|
@ -0,0 +1,55 @@
|
|||||||
|
# This is a regression test for #3791.
|
||||||
|
- description: Setup enum table, create relationship, and insert invalid enum value
|
||||||
|
url: /v1/query
|
||||||
|
status: 200
|
||||||
|
query:
|
||||||
|
type: bulk
|
||||||
|
args:
|
||||||
|
- type: set_table_is_enum
|
||||||
|
args:
|
||||||
|
table: weekdays
|
||||||
|
is_enum: true
|
||||||
|
- type: create_object_relationship
|
||||||
|
args:
|
||||||
|
table: employees
|
||||||
|
name: favorite_color_object
|
||||||
|
using:
|
||||||
|
foreign_key_constraint_on: favorite_color
|
||||||
|
- type: run_sql
|
||||||
|
args:
|
||||||
|
sql: INSERT INTO colors (value, comment) VALUES ('illegal+graphql+identifier', '')
|
||||||
|
- type: reload_metadata
|
||||||
|
args: {}
|
||||||
|
|
||||||
|
- description: Query inconsistent objects
|
||||||
|
url: /v1/query
|
||||||
|
status: 200
|
||||||
|
response:
|
||||||
|
is_consistent: false
|
||||||
|
inconsistent_objects:
|
||||||
|
- definition:
|
||||||
|
schema: public
|
||||||
|
name: colors
|
||||||
|
reason: the table "colors" cannot be used as an enum because the value
|
||||||
|
"illegal+graphql+identifier" is not a valid GraphQL enum value name
|
||||||
|
type: table
|
||||||
|
- definition:
|
||||||
|
using:
|
||||||
|
foreign_key_constraint_on: favorite_color
|
||||||
|
name: favorite_color_object
|
||||||
|
comment:
|
||||||
|
table:
|
||||||
|
schema: public
|
||||||
|
name: employees
|
||||||
|
reason: table "colors" is not tracked
|
||||||
|
type: object_relation
|
||||||
|
query:
|
||||||
|
type: get_inconsistent_metadata
|
||||||
|
args: {}
|
||||||
|
|
||||||
|
- description: Drop inconsistent objects
|
||||||
|
url: /v1/query
|
||||||
|
status: 200
|
||||||
|
query:
|
||||||
|
type: drop_inconsistent_metadata
|
||||||
|
args: {}
|
@ -90,7 +90,7 @@ class TestRemoteSchemaBasic:
|
|||||||
q = mk_add_remote_q('simple 2', 'http://localhost:5000/hello-graphql')
|
q = mk_add_remote_q('simple 2', 'http://localhost:5000/hello-graphql')
|
||||||
st_code, resp = hge_ctx.v1q(q)
|
st_code, resp = hge_ctx.v1q(q)
|
||||||
assert st_code == 400
|
assert st_code == 400
|
||||||
assert resp['code'] == 'constraint-violation'
|
assert resp['code'] == 'unexpected'
|
||||||
|
|
||||||
def test_remove_schema_error(self, hge_ctx):
|
def test_remove_schema_error(self, hge_ctx):
|
||||||
"""remove remote schema which is not added"""
|
"""remove remote schema which is not added"""
|
||||||
|
@ -679,6 +679,9 @@ class TestSetTableIsEnum(DefaultTestQueries):
|
|||||||
def test_add_test_schema_enum_table(self, hge_ctx):
|
def test_add_test_schema_enum_table(self, hge_ctx):
|
||||||
check_query_f(hge_ctx, self.dir() + '/add_test_schema_enum_table.yaml')
|
check_query_f(hge_ctx, self.dir() + '/add_test_schema_enum_table.yaml')
|
||||||
|
|
||||||
|
def test_relationship_with_inconsistent_enum_table(self, hge_ctx):
|
||||||
|
check_query_f(hge_ctx, self.dir() + '/relationship_with_inconsistent_enum_table.yaml')
|
||||||
|
|
||||||
class TestSetTableCustomFields(DefaultTestQueries):
|
class TestSetTableCustomFields(DefaultTestQueries):
|
||||||
@classmethod
|
@classmethod
|
||||||
def dir(cls):
|
def dir(cls):
|
||||||
@ -735,5 +738,3 @@ class TestBulkQuery(DefaultTestQueries):
|
|||||||
|
|
||||||
def test_run_bulk_with_select_and_reads(self, hge_ctx):
|
def test_run_bulk_with_select_and_reads(self, hge_ctx):
|
||||||
check_query_f(hge_ctx, self.dir() + '/select_with_reads.yaml')
|
check_query_f(hge_ctx, self.dir() + '/select_with_reads.yaml')
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user