mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-15 17:31:56 +03:00
Merge pull request #3798 from lexi-lambda/3759-3791-minor-metadata-build-bugfixes
Fix two minor bugs in the schema cache build process (fix #3759 and #3791)
This commit is contained in:
commit
fb9498488f
@ -46,7 +46,7 @@ runApp (HGEOptionsG rci hgeCmd) =
|
||||
execQuery queryBs
|
||||
& runHasSystemDefinedT (SystemDefined False)
|
||||
& runCacheRWT schemaCache
|
||||
& fmap fst
|
||||
& fmap (\(res, _, _) -> res)
|
||||
either printErrJExit (liftIO . BLC.putStrLn) res
|
||||
|
||||
HCVersion -> liftIO $ putStrLn $ "Hasura GraphQL Engine: " ++ convertText currentVersion
|
||||
|
@ -1,27 +1,51 @@
|
||||
-- | A simple implementation of /incremental build rules/, which can be used to avoid unnecessary
|
||||
-- recomputation on incrementally-changing input. See 'Rule' for more details.
|
||||
module Hasura.Incremental
|
||||
( Rule
|
||||
module Hasura.Incremental (
|
||||
-- * The @Rule@ datatype
|
||||
Rule
|
||||
, Result
|
||||
, build
|
||||
, rebuild
|
||||
, rebuildRule
|
||||
, result
|
||||
|
||||
-- * Abstract interface
|
||||
, ArrowDistribute(..)
|
||||
, ArrowCache(..)
|
||||
, MonadDepend(..)
|
||||
, DependT
|
||||
|
||||
-- * Fine-grained dependencies
|
||||
, Dependency
|
||||
, Selector
|
||||
, Select(Selector)
|
||||
, selectD
|
||||
, selectKeyD
|
||||
, Cacheable(..)
|
||||
, Accesses
|
||||
|
||||
-- * Cache invalidation
|
||||
, InvalidationKey
|
||||
, initialInvalidationKey
|
||||
, invalidate
|
||||
) where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import Hasura.Incremental.Internal.Cache
|
||||
import Hasura.Incremental.Internal.Dependency
|
||||
import Hasura.Incremental.Internal.Rule
|
||||
import Hasura.Incremental.Select
|
||||
|
||||
-- | A simple helper type that can be used to implement explicit cache invalidation. Internally,
|
||||
-- each 'InvalidationKey' is a counter; 'initialInvalidationKey' starts the counter at 0 and
|
||||
-- 'invalidate' increments it by 1. Two 'InvalidationKey's are equal iff they have the same internal
|
||||
-- count, so depending on an 'InvalidationKey' provides a mechanism to force portions of the build
|
||||
-- process to be reexecuted by calling 'invalidate' before running the build.
|
||||
newtype InvalidationKey = InvalidationKey Int
|
||||
deriving (Show, Eq, Cacheable)
|
||||
|
||||
initialInvalidationKey :: InvalidationKey
|
||||
initialInvalidationKey = InvalidationKey 0
|
||||
|
||||
invalidate :: InvalidationKey -> InvalidationKey
|
||||
invalidate (InvalidationKey n) = InvalidationKey (n + 1)
|
||||
|
@ -6,6 +6,7 @@ module Hasura.Incremental.Select
|
||||
( Select(..)
|
||||
, ConstS(..)
|
||||
, selectKey
|
||||
, FieldS(..)
|
||||
, UniqueS
|
||||
, newUniqueS
|
||||
, DMapS(..)
|
||||
@ -25,6 +26,10 @@ import qualified Data.HashMap.Strict as M
|
||||
import Control.Monad.Unique
|
||||
import Data.GADT.Compare
|
||||
import Data.Kind
|
||||
import Data.Proxy (Proxy (..))
|
||||
import GHC.OverloadedLabels (IsLabel (..))
|
||||
import GHC.Records (HasField (..))
|
||||
import GHC.TypeLits (KnownSymbol, sameSymbol, symbolVal)
|
||||
import Unsafe.Coerce (unsafeCoerce)
|
||||
|
||||
-- | The 'Select' class provides a way to access subparts of a product type using a reified
|
||||
@ -34,10 +39,18 @@ import Unsafe.Coerce (unsafeCoerce)
|
||||
--
|
||||
-- This is useful to implement dependency tracking, since it’s possible to track in a reified form
|
||||
-- exactly which parts of a data structure are used.
|
||||
--
|
||||
-- Instances of 'Select' can be automatically derived for record types (just define an empty
|
||||
-- instance). The instance uses the magical 'HasField' constraints, and 'Selector's for the type can
|
||||
-- be written using @OverloadedLabels@.
|
||||
class (GCompare (Selector a)) => Select a where
|
||||
type Selector a :: Type -> Type
|
||||
select :: Selector a b -> a -> b
|
||||
|
||||
type Selector r = FieldS r
|
||||
default select :: Selector a ~ FieldS a => Selector a b -> a -> b
|
||||
select (FieldS (_ :: Proxy s)) = getField @s
|
||||
|
||||
instance (Eq k, Ord k, Hashable k) => Select (HashMap k v) where
|
||||
type Selector (HashMap k v) = ConstS k (Maybe v)
|
||||
select (ConstS k) = M.lookup k
|
||||
@ -66,6 +79,29 @@ instance (Ord k) => GCompare (ConstS k a) where
|
||||
EQ -> GEQ
|
||||
GT -> GGT
|
||||
|
||||
data FieldS r a where
|
||||
FieldS :: (KnownSymbol s, HasField s r a) => !(Proxy s) -> FieldS r a
|
||||
|
||||
instance (KnownSymbol s, HasField s r a) => IsLabel s (FieldS r a) where
|
||||
fromLabel = FieldS (Proxy @s)
|
||||
|
||||
instance GEq (FieldS r) where
|
||||
FieldS a `geq` FieldS b = case sameSymbol a b of
|
||||
-- If two fields of the same record have the same name, then their fields fundamentally must
|
||||
-- have the same type! However, unfortunately, `HasField` constraints use a functional
|
||||
-- dependency to enforce this rather than a type family, and functional dependencies don’t
|
||||
-- provide evidence, so we have to use `unsafeCoerce` here. Yuck!
|
||||
Just Refl -> Just (unsafeCoerce Refl)
|
||||
Nothing -> Nothing
|
||||
|
||||
instance GCompare (FieldS r) where
|
||||
FieldS a `gcompare` FieldS b = case sameSymbol a b of
|
||||
-- See note about `HasField` and `unsafeCoerce` above.
|
||||
Just Refl -> unsafeCoerce GEQ
|
||||
Nothing
|
||||
| symbolVal a < symbolVal b -> GLT
|
||||
| otherwise -> GGT
|
||||
|
||||
-- | A 'UniqueS' is, as the name implies, a globally-unique 'Selector', which can be created using
|
||||
-- 'newUniqueS'. If a value of type @'UniqueS' a@ is found to be equal (via 'geq') with another
|
||||
-- value of type @'UniqueS' b@, then @a@ and @b@ must be the same type. This effectively allows the
|
||||
|
@ -380,7 +380,7 @@ runExportMetadata _ =
|
||||
|
||||
runReloadMetadata :: (QErrM m, CacheRWM m) => ReloadMetadata -> m EncJSON
|
||||
runReloadMetadata ReloadMetadata = do
|
||||
buildSchemaCache
|
||||
buildSchemaCacheWithOptions CatalogUpdate mempty { ciMetadata = True }
|
||||
return successMsg
|
||||
|
||||
runDumpInternalState
|
||||
|
@ -13,6 +13,7 @@ import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashSet as S
|
||||
import qualified Database.PG.Query as Q
|
||||
|
||||
import Hasura.GraphQL.RemoteServer
|
||||
@ -87,8 +88,8 @@ runReloadRemoteSchema (RemoteSchemaNameQuery name) = do
|
||||
void $ onNothing (Map.lookup name rmSchemas) $
|
||||
throw400 NotExists $ "remote schema with name " <> name <<> " does not exist"
|
||||
|
||||
invalidateCachedRemoteSchema name
|
||||
withNewInconsistentObjsCheck buildSchemaCache
|
||||
let invalidations = mempty { ciRemoteSchemas = S.singleton name }
|
||||
withNewInconsistentObjsCheck $ buildSchemaCacheWithOptions CatalogUpdate invalidations
|
||||
pure successMsg
|
||||
|
||||
addRemoteSchemaToCatalog
|
||||
|
@ -1,4 +1,5 @@
|
||||
{-# LANGUAGE Arrows #-}
|
||||
{-# LANGUAGE Arrows #-}
|
||||
{-# LANGUAGE OverloadedLabels #-}
|
||||
|
||||
{-| Top-level functions concerned specifically with operations on the schema cache, such as
|
||||
rebuilding it from the catalog and incorporating schema changes. See the module documentation for
|
||||
@ -60,75 +61,97 @@ buildRebuildableSchemaCache
|
||||
=> m (RebuildableSchemaCache m)
|
||||
buildRebuildableSchemaCache = do
|
||||
catalogMetadata <- liftTx fetchCatalogData
|
||||
result <- flip runReaderT CatalogSync $ Inc.build buildSchemaCacheRule (catalogMetadata, M.empty)
|
||||
pure $ RebuildableSchemaCache (Inc.result result) M.empty (Inc.rebuildRule result)
|
||||
result <- flip runReaderT CatalogSync $
|
||||
Inc.build buildSchemaCacheRule (catalogMetadata, initialInvalidationKeys)
|
||||
pure $ RebuildableSchemaCache (Inc.result result) initialInvalidationKeys (Inc.rebuildRule result)
|
||||
|
||||
newtype CacheRWT m a
|
||||
= CacheRWT { unCacheRWT :: StateT (RebuildableSchemaCache m) m a }
|
||||
-- The CacheInvalidations component of the state could actually be collected using WriterT, but
|
||||
-- WriterT implementations prior to transformers-0.5.6.0 (which added
|
||||
-- Control.Monad.Trans.Writer.CPS) are leaky, and we don’t have that yet.
|
||||
= CacheRWT (StateT (RebuildableSchemaCache m, CacheInvalidations) m a)
|
||||
deriving
|
||||
( Functor, Applicative, Monad, MonadIO, MonadReader r, MonadError e, MonadWriter w, MonadTx
|
||||
( Functor, Applicative, Monad, MonadIO, MonadReader r, MonadError e, MonadTx
|
||||
, UserInfoM, HasHttpManager, HasSQLGenCtx, HasSystemDefined )
|
||||
|
||||
runCacheRWT :: RebuildableSchemaCache m -> CacheRWT m a -> m (a, RebuildableSchemaCache m)
|
||||
runCacheRWT cache = flip runStateT cache . unCacheRWT
|
||||
runCacheRWT
|
||||
:: Functor m
|
||||
=> RebuildableSchemaCache m -> CacheRWT m a -> m (a, RebuildableSchemaCache m, CacheInvalidations)
|
||||
runCacheRWT cache (CacheRWT m) =
|
||||
runStateT m (cache, mempty) <&> \(v, (newCache, invalidations)) -> (v, newCache, invalidations)
|
||||
|
||||
instance MonadTrans CacheRWT where
|
||||
lift = CacheRWT . lift
|
||||
|
||||
instance (Monad m) => TableCoreInfoRM (CacheRWT m)
|
||||
instance (Monad m) => CacheRM (CacheRWT m) where
|
||||
askSchemaCache = CacheRWT $ gets lastBuiltSchemaCache
|
||||
askSchemaCache = CacheRWT $ gets (lastBuiltSchemaCache . fst)
|
||||
|
||||
instance (MonadIO m, MonadTx m, MonadUnique m) => CacheRWM (CacheRWT m) where
|
||||
buildSchemaCacheWithOptions buildReason = CacheRWT do
|
||||
RebuildableSchemaCache _ invalidationMap rule <- get
|
||||
instance (MonadIO m, MonadTx m) => CacheRWM (CacheRWT m) where
|
||||
buildSchemaCacheWithOptions buildReason invalidations = CacheRWT do
|
||||
(RebuildableSchemaCache _ invalidationKeys rule, oldInvalidations) <- get
|
||||
let newInvalidationKeys = invalidateKeys invalidations invalidationKeys
|
||||
catalogMetadata <- liftTx fetchCatalogData
|
||||
result <- lift $ flip runReaderT buildReason $ Inc.build rule (catalogMetadata, invalidationMap)
|
||||
result <- lift $ flip runReaderT buildReason $
|
||||
Inc.build rule (catalogMetadata, newInvalidationKeys)
|
||||
let schemaCache = Inc.result result
|
||||
prunedInvalidationMap = pruneInvalidationMap schemaCache invalidationMap
|
||||
put $! RebuildableSchemaCache schemaCache prunedInvalidationMap (Inc.rebuildRule result)
|
||||
prunedInvalidationKeys = pruneInvalidationKeys schemaCache newInvalidationKeys
|
||||
!newCache = RebuildableSchemaCache schemaCache prunedInvalidationKeys (Inc.rebuildRule result)
|
||||
!newInvalidations = oldInvalidations <> invalidations
|
||||
put (newCache, newInvalidations)
|
||||
where
|
||||
pruneInvalidationMap schemaCache = M.filterWithKey \name _ ->
|
||||
-- Prunes invalidation keys that no longer exist in the schema to avoid leaking memory by
|
||||
-- hanging onto unnecessary keys.
|
||||
pruneInvalidationKeys schemaCache = over ikRemoteSchemas $ M.filterWithKey \name _ ->
|
||||
M.member name (scRemoteSchemas schemaCache)
|
||||
|
||||
invalidateCachedRemoteSchema name = CacheRWT do
|
||||
unique <- newUnique
|
||||
assign (rscInvalidationMap . at name) (Just unique)
|
||||
|
||||
buildSchemaCacheRule
|
||||
-- Note: by supplying BuildReason via MonadReader, it does not participate in caching, which is
|
||||
-- what we want!
|
||||
:: ( HasVersion, ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
||||
, MonadIO m, MonadTx m, MonadReader BuildReason m, HasHttpManager m, HasSQLGenCtx m )
|
||||
=> (CatalogMetadata, InvalidationMap) `arr` SchemaCache
|
||||
buildSchemaCacheRule = proc inputs -> do
|
||||
(outputs, collectedInfo) <- runWriterA buildAndCollectInfo -< inputs
|
||||
=> (CatalogMetadata, InvalidationKeys) `arr` SchemaCache
|
||||
buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||
invalidationKeysDep <- Inc.newDependency -< invalidationKeys
|
||||
|
||||
-- Step 1: Process metadata and collect dependency information.
|
||||
(outputs, collectedInfo) <-
|
||||
runWriterA buildAndCollectInfo -< (catalogMetadata, invalidationKeysDep)
|
||||
let (inconsistentObjects, unresolvedDependencies) = partitionCollectedInfo collectedInfo
|
||||
(resolvedOutputs, extraInconsistentObjects, resolvedDependencies) <-
|
||||
|
||||
-- Step 2: Resolve dependency information and drop dangling dependents.
|
||||
(resolvedOutputs, dependencyInconsistentObjects, resolvedDependencies) <-
|
||||
resolveDependencies -< (outputs, unresolvedDependencies)
|
||||
|
||||
-- Step 3: Build the GraphQL schema.
|
||||
((remoteSchemaMap, gqlSchema, remoteGQLSchema), gqlSchemaInconsistentObjects)
|
||||
<- runWriterA buildGQLSchema
|
||||
-< (_boTables resolvedOutputs, _boFunctions resolvedOutputs, _boRemoteSchemas resolvedOutputs)
|
||||
|
||||
returnA -< SchemaCache
|
||||
{ scTables = _boTables resolvedOutputs
|
||||
, scFunctions = _boFunctions resolvedOutputs
|
||||
, scRemoteSchemas = _boRemoteSchemas resolvedOutputs
|
||||
, scRemoteSchemas = remoteSchemaMap
|
||||
, scAllowlist = _boAllowlist resolvedOutputs
|
||||
, scGCtxMap = _boGCtxMap resolvedOutputs
|
||||
, scDefaultRemoteGCtx = _boDefaultRemoteGCtx resolvedOutputs
|
||||
, scGCtxMap = gqlSchema
|
||||
, scDefaultRemoteGCtx = remoteGQLSchema
|
||||
, scDepMap = resolvedDependencies
|
||||
, scInconsistentObjs = inconsistentObjects <> extraInconsistentObjects
|
||||
, scInconsistentObjs =
|
||||
inconsistentObjects <> dependencyInconsistentObjects <> toList gqlSchemaInconsistentObjects
|
||||
}
|
||||
where
|
||||
buildAndCollectInfo
|
||||
:: ( HasVersion, ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
||||
, ArrowWriter (Seq CollectedInfo) arr, MonadIO m, MonadTx m, MonadReader BuildReason m
|
||||
, HasHttpManager m, HasSQLGenCtx m )
|
||||
=> (CatalogMetadata, InvalidationMap) `arr` BuildOutputs
|
||||
buildAndCollectInfo = proc (catalogMetadata, invalidationMap) -> do
|
||||
=> (CatalogMetadata, Inc.Dependency InvalidationKeys) `arr` BuildOutputs
|
||||
buildAndCollectInfo = proc (catalogMetadata, invalidationKeys) -> do
|
||||
let CatalogMetadata tables relationships permissions
|
||||
eventTriggers remoteSchemas functions allowlistDefs
|
||||
computedFields = catalogMetadata
|
||||
|
||||
-- tables
|
||||
tableRawInfos <- buildTableCache -< tables
|
||||
tableRawInfos <- buildTableCache -< (tables, Inc.selectD #_ikMetadata invalidationKeys)
|
||||
|
||||
-- relationships and computed fields
|
||||
let relationshipsByTable = M.groupOn _crTable relationships
|
||||
@ -182,23 +205,15 @@ buildSchemaCacheRule = proc inputs -> do
|
||||
& map (queryWithoutTypeNames . getGQLQuery . _lqQuery)
|
||||
& HS.fromList
|
||||
|
||||
-- build GraphQL context with tables and functions
|
||||
baseGQLSchema <- bindA -< GS.mkGCtxMap tableCache functionCache
|
||||
|
||||
-- remote schemas
|
||||
let invalidatedRemoteSchemas = flip map remoteSchemas \remoteSchema ->
|
||||
(M.lookup (_arsqName remoteSchema) invalidationMap, remoteSchema)
|
||||
(remoteSchemaMap, gqlSchema, remoteGQLSchema) <-
|
||||
(| foldlA' (\schemas schema -> (schemas, schema) >- addRemoteSchema)
|
||||
|) (M.empty, baseGQLSchema, GC.emptyGCtx) invalidatedRemoteSchemas
|
||||
let remoteSchemaInvalidationKeys = Inc.selectD #_ikRemoteSchemas invalidationKeys
|
||||
remoteSchemaMap <- buildRemoteSchemas -< (remoteSchemaInvalidationKeys, remoteSchemas)
|
||||
|
||||
returnA -< BuildOutputs
|
||||
{ _boTables = tableCache
|
||||
, _boFunctions = functionCache
|
||||
, _boRemoteSchemas = remoteSchemaMap
|
||||
, _boAllowlist = allowList
|
||||
, _boGCtxMap = gqlSchema
|
||||
, _boDefaultRemoteGCtx = remoteGQLSchema
|
||||
}
|
||||
|
||||
mkEventTriggerMetadataObject (CatalogEventTrigger qt trn configuration) =
|
||||
@ -206,6 +221,9 @@ buildSchemaCacheRule = proc inputs -> do
|
||||
definition = object ["table" .= qt, "configuration" .= configuration]
|
||||
in MetadataObject objectId definition
|
||||
|
||||
mkRemoteSchemaMetadataObject remoteSchema =
|
||||
MetadataObject (MORemoteSchema (_arsqName remoteSchema)) (toJSON remoteSchema)
|
||||
|
||||
-- Given a map of table info, “folds in” another map of information, accumulating inconsistent
|
||||
-- metadata objects for any entries in the second map that don’t appear in the first map. This
|
||||
-- is used to “line up” the metadata for relationships, computed fields, permissions, etc. with
|
||||
@ -236,14 +254,7 @@ buildSchemaCacheRule = proc inputs -> do
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Inc.ArrowCache m arr, MonadIO m, MonadTx m, MonadReader BuildReason m, HasSQLGenCtx m )
|
||||
=> (TableCoreInfo, [CatalogEventTrigger]) `arr` EventTriggerInfoMap
|
||||
buildTableEventTriggers = proc (tableInfo, eventTriggers) ->
|
||||
(\infos -> M.catMaybes infos >- returnA) <-<
|
||||
(| Inc.keyed (\_ duplicateEventTriggers -> do
|
||||
maybeEventTrigger <- noDuplicates mkEventTriggerMetadataObject -< duplicateEventTriggers
|
||||
(\info -> join info >- returnA) <-<
|
||||
(| traverseA (\eventTrigger -> buildEventTrigger -< (tableInfo, eventTrigger))
|
||||
|) maybeEventTrigger)
|
||||
|) (M.groupOn _cetName eventTriggers)
|
||||
buildTableEventTriggers = buildInfoMap _cetName mkEventTriggerMetadataObject buildEventTrigger
|
||||
where
|
||||
buildEventTrigger = proc (tableInfo, eventTrigger) -> do
|
||||
let CatalogEventTrigger qt trn configuration = eventTrigger
|
||||
@ -268,24 +279,48 @@ buildSchemaCacheRule = proc inputs -> do
|
||||
liftTx $ delTriggerQ triggerName -- executes DROP IF EXISTS.. sql
|
||||
mkAllTriggersQ triggerName tableName (M.elems tableColumns) triggerDefinition
|
||||
|
||||
addRemoteSchema
|
||||
:: ( HasVersion, ArrowChoice arr, ArrowWriter (Seq CollectedInfo) arr, ArrowKleisli m arr
|
||||
, MonadIO m, HasHttpManager m )
|
||||
=> ( (RemoteSchemaMap, GS.GCtxMap, GS.GCtx)
|
||||
, (Maybe InvalidationKey, AddRemoteSchemaQuery)
|
||||
buildRemoteSchemas
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Inc.ArrowCache m arr , MonadIO m, HasHttpManager m )
|
||||
=> ( Inc.Dependency (HashMap RemoteSchemaName Inc.InvalidationKey)
|
||||
, [AddRemoteSchemaQuery]
|
||||
) `arr` HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject)
|
||||
buildRemoteSchemas =
|
||||
buildInfoMapPreservingMetadata _arsqName mkRemoteSchemaMetadataObject buildRemoteSchema
|
||||
where
|
||||
-- We want to cache this call because it fetches the remote schema over HTTP, and we don’t
|
||||
-- want to re-run that if the remote schema definition hasn’t changed.
|
||||
buildRemoteSchema = Inc.cache proc (invalidationKeys, remoteSchema) -> do
|
||||
Inc.dependOn -< Inc.selectKeyD (_arsqName remoteSchema) invalidationKeys
|
||||
(| withRecordInconsistency (liftEitherA <<< bindA -<
|
||||
runExceptT $ addRemoteSchemaP2Setup remoteSchema)
|
||||
|) (mkRemoteSchemaMetadataObject remoteSchema)
|
||||
|
||||
-- Builds the GraphQL schema and merges in remote schemas. This function is kind of gross, as
|
||||
-- it’s possible for the remote schema merging to fail, at which point we have to mark them
|
||||
-- inconsistent. This means we have to accumulate the consistent remote schemas as we go, in
|
||||
-- addition to the built GraphQL context.
|
||||
buildGQLSchema
|
||||
:: ( ArrowChoice arr, ArrowWriter (Seq InconsistentMetadata) arr, ArrowKleisli m arr
|
||||
, MonadError QErr m )
|
||||
=> ( TableCache
|
||||
, FunctionCache
|
||||
, HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject)
|
||||
) `arr` (RemoteSchemaMap, GS.GCtxMap, GS.GCtx)
|
||||
addRemoteSchema = proc ((remoteSchemas, gCtxMap, defGCtx), (_, remoteSchema)) -> do
|
||||
let name = _arsqName remoteSchema
|
||||
(| onNothingA (returnA -< (remoteSchemas, gCtxMap, defGCtx)) |) <-<
|
||||
(| withRecordInconsistency (case M.lookup name remoteSchemas of
|
||||
Just _ -> throwA -< err400 AlreadyExists "duplicate definition for remote schema"
|
||||
Nothing -> liftEitherA <<< bindA -< runExceptT do
|
||||
rsCtx <- addRemoteSchemaP2Setup remoteSchema
|
||||
let rGCtx = convRemoteGCtx $ rscGCtx rsCtx
|
||||
mergedGCtxMap <- mergeRemoteSchema gCtxMap rGCtx
|
||||
mergedDefGCtx <- mergeGCtx defGCtx rGCtx
|
||||
pure (M.insert name rsCtx remoteSchemas, mergedGCtxMap, mergedDefGCtx))
|
||||
|) (MetadataObject (MORemoteSchema name) (toJSON remoteSchema))
|
||||
buildGQLSchema = proc (tableCache, functionCache, remoteSchemas) -> do
|
||||
baseGQLSchema <- bindA -< GS.mkGCtxMap tableCache functionCache
|
||||
(| foldlA' (\(remoteSchemaMap, gqlSchemas, remoteGQLSchemas)
|
||||
(remoteSchemaName, (remoteSchema, metadataObject)) ->
|
||||
(| withRecordInconsistency (do
|
||||
let gqlSchema = convRemoteGCtx $ rscGCtx remoteSchema
|
||||
mergedGQLSchemas <- bindErrorA -< mergeRemoteSchema gqlSchemas gqlSchema
|
||||
mergedRemoteGQLSchemas <- bindErrorA -< mergeGCtx remoteGQLSchemas gqlSchema
|
||||
let mergedRemoteSchemaMap = M.insert remoteSchemaName remoteSchema remoteSchemaMap
|
||||
returnA -< (mergedRemoteSchemaMap, mergedGQLSchemas, mergedRemoteGQLSchemas))
|
||||
|) metadataObject
|
||||
>-> (| onNothingA ((remoteSchemaMap, gqlSchemas, remoteGQLSchemas) >- returnA) |))
|
||||
|) (M.empty, baseGQLSchema, GC.emptyGCtx) (M.toList remoteSchemas)
|
||||
|
||||
|
||||
-- | @'withMetadataCheck' cascade action@ runs @action@ and checks if the schema changed as a
|
||||
-- result. If it did, it checks to ensure the changes do not violate any integrity constraints, and
|
||||
|
@ -1,4 +1,5 @@
|
||||
{-# LANGUAGE Arrows #-}
|
||||
{-# LANGUAGE Arrows #-}
|
||||
{-# LANGUAGE RecordWildCards #-}
|
||||
|
||||
-- | Types/functions shared between modules that implement "Hasura.RQL.DDL.Schema.Cache". Other
|
||||
-- modules should not import this module directly.
|
||||
@ -6,14 +7,13 @@ module Hasura.RQL.DDL.Schema.Cache.Common where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.HashMap.Strict.Extended as M
|
||||
import qualified Data.HashSet as HS
|
||||
import qualified Data.Sequence as Seq
|
||||
|
||||
import Control.Arrow.Extended
|
||||
import Control.Lens
|
||||
import Control.Monad.Unique
|
||||
|
||||
import qualified Hasura.GraphQL.Context as GC
|
||||
import qualified Hasura.Incremental as Inc
|
||||
|
||||
import Hasura.RQL.Types
|
||||
@ -22,18 +22,30 @@ import Hasura.RQL.Types.QueryCollection
|
||||
import Hasura.RQL.Types.Run
|
||||
import Hasura.SQL.Types
|
||||
|
||||
-- | A map used to explicitly invalidate part of the build cache, which is most useful for external
|
||||
-- resources (currently only remote schemas). The 'InvalidationKey' values it contains are used as
|
||||
-- inputs to build rules, so setting an entry to a fresh 'InvalidationKey' forces it to be
|
||||
-- re-executed.
|
||||
type InvalidationMap = HashMap RemoteSchemaName InvalidationKey
|
||||
type InvalidationKey = Unique
|
||||
-- | 'InvalidationKeys' used to apply requested 'CacheInvalidations'.
|
||||
data InvalidationKeys = InvalidationKeys
|
||||
{ _ikMetadata :: !Inc.InvalidationKey
|
||||
, _ikRemoteSchemas :: !(HashMap RemoteSchemaName Inc.InvalidationKey)
|
||||
} deriving (Eq, Generic)
|
||||
instance Inc.Cacheable InvalidationKeys
|
||||
instance Inc.Select InvalidationKeys
|
||||
$(makeLenses ''InvalidationKeys)
|
||||
|
||||
initialInvalidationKeys :: InvalidationKeys
|
||||
initialInvalidationKeys = InvalidationKeys Inc.initialInvalidationKey mempty
|
||||
|
||||
invalidateKeys :: CacheInvalidations -> InvalidationKeys -> InvalidationKeys
|
||||
invalidateKeys CacheInvalidations{..} InvalidationKeys{..} = InvalidationKeys
|
||||
{ _ikMetadata = if ciMetadata then Inc.invalidate _ikMetadata else _ikMetadata
|
||||
, _ikRemoteSchemas = foldl' (flip invalidateRemoteSchema) _ikRemoteSchemas ciRemoteSchemas }
|
||||
where
|
||||
invalidateRemoteSchema = M.alter $ Just . maybe Inc.initialInvalidationKey Inc.invalidate
|
||||
|
||||
data BuildInputs
|
||||
= BuildInputs
|
||||
{ _biReason :: !BuildReason
|
||||
, _biCatalogMetadata :: !CatalogMetadata
|
||||
, _biInvalidationMap :: !InvalidationMap
|
||||
, _biInvalidationMap :: !InvalidationKeys
|
||||
} deriving (Eq)
|
||||
|
||||
-- | The direct output of 'buildSchemaCacheRule'. Contains most of the things necessary to build a
|
||||
@ -41,20 +53,21 @@ data BuildInputs
|
||||
-- 'MonadWriter' side channel.
|
||||
data BuildOutputs
|
||||
= BuildOutputs
|
||||
{ _boTables :: !TableCache
|
||||
, _boFunctions :: !FunctionCache
|
||||
, _boRemoteSchemas :: !RemoteSchemaMap
|
||||
, _boAllowlist :: !(HS.HashSet GQLQuery)
|
||||
, _boGCtxMap :: !GC.GCtxMap
|
||||
, _boDefaultRemoteGCtx :: !GC.GCtx
|
||||
{ _boTables :: !TableCache
|
||||
, _boFunctions :: !FunctionCache
|
||||
, _boRemoteSchemas :: !(HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject))
|
||||
-- ^ We preserve the 'MetadataObject' from the original catalog metadata in the output so we can
|
||||
-- reuse it later if we need to mark the remote schema inconsistent during GraphQL schema
|
||||
-- generation (because of field conflicts).
|
||||
, _boAllowlist :: !(HS.HashSet GQLQuery)
|
||||
} deriving (Show, Eq)
|
||||
$(makeLenses ''BuildOutputs)
|
||||
|
||||
data RebuildableSchemaCache m
|
||||
= RebuildableSchemaCache
|
||||
{ lastBuiltSchemaCache :: !SchemaCache
|
||||
, _rscInvalidationMap :: !InvalidationMap
|
||||
, _rscRebuild :: !(Inc.Rule (ReaderT BuildReason m) (CatalogMetadata, InvalidationMap) SchemaCache)
|
||||
, _rscInvalidationMap :: !InvalidationKeys
|
||||
, _rscRebuild :: !(Inc.Rule (ReaderT BuildReason m) (CatalogMetadata, InvalidationKeys) SchemaCache)
|
||||
}
|
||||
$(makeLenses ''RebuildableSchemaCache)
|
||||
|
||||
@ -91,5 +104,38 @@ noDuplicates mkMetadataObject = proc values -> case values of
|
||||
returnA -< Nothing
|
||||
{-# INLINABLE noDuplicates #-}
|
||||
|
||||
-- | Processes a list of catalog metadata into a map of processed information, marking any duplicate
|
||||
-- entries inconsistent.
|
||||
buildInfoMap
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Eq k, Hashable k )
|
||||
=> (a -> k)
|
||||
-> (a -> MetadataObject)
|
||||
-> (e, a) `arr` Maybe b
|
||||
-> (e, [a]) `arr` HashMap k b
|
||||
buildInfoMap extractKey mkMetadataObject buildInfo = proc (e, infos) ->
|
||||
(M.groupOn extractKey infos >- returnA)
|
||||
>-> (| Inc.keyed (\_ duplicateInfos ->
|
||||
(duplicateInfos >- noDuplicates mkMetadataObject)
|
||||
>-> (| traverseA (\info -> (e, info) >- buildInfo) |)
|
||||
>-> (\info -> join info >- returnA)) |)
|
||||
>-> (\infoMap -> M.catMaybes infoMap >- returnA)
|
||||
{-# INLINABLE buildInfoMap #-}
|
||||
|
||||
-- | Like 'buildInfo', but includes each processed info’s associated 'MetadataObject' in the result.
|
||||
-- This is useful if the results will be further processed, and the 'MetadataObject' is still needed
|
||||
-- to mark the object inconsistent.
|
||||
buildInfoMapPreservingMetadata
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Eq k, Hashable k )
|
||||
=> (a -> k)
|
||||
-> (a -> MetadataObject)
|
||||
-> (e, a) `arr` Maybe b
|
||||
-> (e, [a]) `arr` HashMap k (b, MetadataObject)
|
||||
buildInfoMapPreservingMetadata extractKey mkMetadataObject buildInfo =
|
||||
buildInfoMap extractKey mkMetadataObject proc (e, info) ->
|
||||
((e, info) >- buildInfo) >-> \result -> result <&> (, mkMetadataObject info) >- returnA
|
||||
{-# INLINABLE buildInfoMapPreservingMetadata #-}
|
||||
|
||||
addTableContext :: QualifiedTable -> Text -> Text
|
||||
addTableContext tableName e = "in table " <> tableName <<> ": " <> e
|
||||
|
@ -34,31 +34,19 @@ addNonColumnFields
|
||||
, [CatalogComputedField]
|
||||
) `arr` FieldInfoMap FieldInfo
|
||||
addNonColumnFields = proc (rawTableInfo, columns, relationships, computedFields) -> do
|
||||
let foreignKeys = _tciForeignKeys <$> rawTableInfo
|
||||
relationshipInfos <-
|
||||
(| Inc.keyed (\_ relationshipsByName -> do
|
||||
maybeRelationship <- noDuplicates mkRelationshipMetadataObject -< relationshipsByName
|
||||
(\info -> join info >- returnA) <-<
|
||||
(| traverseA (\relationship -> do
|
||||
info <- buildRelationship -< (foreignKeys, relationship)
|
||||
returnA -< info <&> (, mkRelationshipMetadataObject relationship))
|
||||
|) maybeRelationship)
|
||||
|) (M.groupOn _crRelName relationships)
|
||||
|
||||
let trackedTableNames = HS.fromList $ M.keys rawTableInfo
|
||||
computedFieldInfos <-
|
||||
(| Inc.keyed (\_ computedFieldsByName -> do
|
||||
maybeComputedField <- noDuplicates mkComputedFieldMetadataObject -< computedFieldsByName
|
||||
(\info -> join info >- returnA) <-<
|
||||
(| traverseA (\computedField -> do
|
||||
info <- buildComputedField -< (trackedTableNames, computedField)
|
||||
returnA -< info <&> (, mkComputedFieldMetadataObject computedField))
|
||||
|) maybeComputedField)
|
||||
|) (M.groupOn (_afcName . _cccComputedField) computedFields)
|
||||
relationshipInfos
|
||||
<- buildInfoMapPreservingMetadata _crRelName mkRelationshipMetadataObject buildRelationship
|
||||
-< (_tciForeignKeys <$> rawTableInfo, relationships)
|
||||
computedFieldInfos
|
||||
<- buildInfoMapPreservingMetadata
|
||||
(_afcName . _cccComputedField)
|
||||
mkComputedFieldMetadataObject
|
||||
buildComputedField
|
||||
-< (HS.fromList $ M.keys rawTableInfo, computedFields)
|
||||
|
||||
let mapKey f = M.fromList . map (first f) . M.toList
|
||||
relationshipFields = mapKey fromRel $ M.catMaybes relationshipInfos
|
||||
computedFieldFields = mapKey fromComputedField $ M.catMaybes computedFieldInfos
|
||||
relationshipFields = mapKey fromRel relationshipInfos
|
||||
computedFieldFields = mapKey fromComputedField computedFieldInfos
|
||||
|
||||
-- First, check for conflicts between non-column fields, since we can raise a better error
|
||||
-- message in terms of the two metadata objects that define them.
|
||||
|
@ -292,10 +292,14 @@ buildTableCache
|
||||
:: forall arr m
|
||||
. ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Inc.ArrowCache m arr, MonadTx m )
|
||||
=> [CatalogTable] `arr` M.HashMap QualifiedTable TableRawInfo
|
||||
buildTableCache = Inc.cache proc catalogTables -> do
|
||||
=> ( [CatalogTable]
|
||||
, Inc.Dependency Inc.InvalidationKey
|
||||
) `arr` M.HashMap QualifiedTable TableRawInfo
|
||||
buildTableCache = Inc.cache proc (catalogTables, reloadMetadataInvalidationKey) -> do
|
||||
rawTableInfos <-
|
||||
(| Inc.keyed (| withTable (\tables -> buildRawTableInfo <<< noDuplicateTables -< tables) |)
|
||||
(| Inc.keyed (| withTable (\tables
|
||||
-> (tables, reloadMetadataInvalidationKey)
|
||||
>- first noDuplicateTables >>> buildRawTableInfo) |)
|
||||
|) (M.groupOnNE _ctName catalogTables)
|
||||
let rawTableCache = M.catMaybes rawTableInfos
|
||||
enumTables = flip M.mapMaybe rawTableCache \rawTableInfo ->
|
||||
@ -314,8 +318,13 @@ buildTableCache = Inc.cache proc catalogTables -> do
|
||||
_ -> throwA -< err400 AlreadyExists "duplication definition for table"
|
||||
|
||||
-- Step 1: Build the raw table cache from metadata information.
|
||||
buildRawTableInfo :: ErrorA QErr arr CatalogTable (TableCoreInfoG PGRawColumnInfo PGCol)
|
||||
buildRawTableInfo = Inc.cache proc (CatalogTable name systemDefined isEnum config maybeInfo) -> do
|
||||
buildRawTableInfo
|
||||
:: ErrorA QErr arr
|
||||
( CatalogTable
|
||||
, Inc.Dependency Inc.InvalidationKey
|
||||
) (TableCoreInfoG PGRawColumnInfo PGCol)
|
||||
buildRawTableInfo = Inc.cache proc (catalogTable, reloadMetadataInvalidationKey) -> do
|
||||
let CatalogTable name systemDefined isEnum config maybeInfo = catalogTable
|
||||
catalogInfo <-
|
||||
(| onNothingA (throwA -<
|
||||
err400 NotExists $ "no such table/view exists in postgres: " <>> name)
|
||||
@ -326,7 +335,11 @@ buildTableCache = Inc.cache proc catalogTables -> do
|
||||
primaryKey = _ctiPrimaryKey catalogInfo
|
||||
rawPrimaryKey <- liftEitherA -< traverse (resolvePrimaryKeyColumns columnMap) primaryKey
|
||||
enumValues <- if isEnum
|
||||
then bindErrorA -< Just <$> fetchAndValidateEnumValues name rawPrimaryKey columns
|
||||
then do
|
||||
-- We want to make sure we reload enum values whenever someone explicitly calls
|
||||
-- `reload_metadata`.
|
||||
Inc.dependOn -< reloadMetadataInvalidationKey
|
||||
bindErrorA -< Just <$> fetchAndValidateEnumValues name rawPrimaryKey columns
|
||||
else returnA -< Nothing
|
||||
|
||||
returnA -< TableCoreInfo
|
||||
|
@ -22,7 +22,7 @@ type UrlFromEnv = Text
|
||||
newtype RemoteSchemaName
|
||||
= RemoteSchemaName
|
||||
{ unRemoteSchemaName :: NonEmptyText }
|
||||
deriving ( Show, Eq, Lift, Hashable, J.ToJSON, J.ToJSONKey
|
||||
deriving ( Show, Eq, Ord, Lift, Hashable, J.ToJSON, J.ToJSONKey
|
||||
, J.FromJSON, Q.ToPrepArg, Q.FromCol, DQuote, NFData
|
||||
, Generic, Cacheable, Arbitrary
|
||||
)
|
||||
|
@ -13,6 +13,7 @@ module Hasura.RQL.Types.SchemaCache.Build
|
||||
|
||||
, CacheRWM(..)
|
||||
, BuildReason(..)
|
||||
, CacheInvalidations(..)
|
||||
, buildSchemaCache
|
||||
, buildSchemaCacheFor
|
||||
, buildSchemaCacheStrict
|
||||
@ -26,7 +27,10 @@ import qualified Data.Sequence as Seq
|
||||
import qualified Data.Text as T
|
||||
|
||||
import Control.Arrow.Extended
|
||||
import Control.Lens
|
||||
import Data.Aeson (toJSON)
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import Data.List (nub)
|
||||
|
||||
import Hasura.RQL.Types.Error
|
||||
@ -44,6 +48,14 @@ data CollectedInfo
|
||||
!SchemaObjId
|
||||
!SchemaDependency
|
||||
deriving (Show, Eq)
|
||||
$(makePrisms ''CollectedInfo)
|
||||
|
||||
class AsInconsistentMetadata s where
|
||||
_InconsistentMetadata :: Prism' s InconsistentMetadata
|
||||
instance AsInconsistentMetadata InconsistentMetadata where
|
||||
_InconsistentMetadata = id
|
||||
instance AsInconsistentMetadata CollectedInfo where
|
||||
_InconsistentMetadata = _CIInconsistency
|
||||
|
||||
partitionCollectedInfo
|
||||
:: Seq CollectedInfo
|
||||
@ -55,12 +67,14 @@ partitionCollectedInfo =
|
||||
let dependency = (metadataObject, objectId, schemaDependency)
|
||||
in (inconsistencies, dependency:dependencies)
|
||||
|
||||
recordInconsistency :: (ArrowWriter (Seq CollectedInfo) arr) => (MetadataObject, Text) `arr` ()
|
||||
recordInconsistency
|
||||
:: (ArrowWriter (Seq w) arr, AsInconsistentMetadata w) => (MetadataObject, Text) `arr` ()
|
||||
recordInconsistency = first (arr (:[])) >>> recordInconsistencies
|
||||
|
||||
recordInconsistencies :: (ArrowWriter (Seq CollectedInfo) arr) => ([MetadataObject], Text) `arr` ()
|
||||
recordInconsistencies
|
||||
:: (ArrowWriter (Seq w) arr, AsInconsistentMetadata w) => ([MetadataObject], Text) `arr` ()
|
||||
recordInconsistencies = proc (metadataObjects, reason) ->
|
||||
tellA -< Seq.fromList $ map (CIInconsistency . InconsistentObject reason) metadataObjects
|
||||
tellA -< Seq.fromList $ map (review _InconsistentMetadata . InconsistentObject reason) metadataObjects
|
||||
|
||||
recordDependencies
|
||||
:: (ArrowWriter (Seq CollectedInfo) arr)
|
||||
@ -69,7 +83,7 @@ recordDependencies = proc (metadataObject, schemaObjectId, dependencies) ->
|
||||
tellA -< Seq.fromList $ map (CIDependency metadataObject schemaObjectId) dependencies
|
||||
|
||||
withRecordInconsistency
|
||||
:: (ArrowChoice arr, ArrowWriter (Seq CollectedInfo) arr)
|
||||
:: (ArrowChoice arr, ArrowWriter (Seq w) arr, AsInconsistentMetadata w)
|
||||
=> ErrorA QErr arr (e, s) a
|
||||
-> arr (e, (MetadataObject, s)) (Maybe a)
|
||||
withRecordInconsistency f = proc (e, (metadataObject, s)) -> do
|
||||
@ -85,8 +99,7 @@ withRecordInconsistency f = proc (e, (metadataObject, s)) -> do
|
||||
-- operations for triggering a schema cache rebuild
|
||||
|
||||
class (CacheRM m) => CacheRWM m where
|
||||
buildSchemaCacheWithOptions :: BuildReason -> m ()
|
||||
invalidateCachedRemoteSchema :: RemoteSchemaName -> m ()
|
||||
buildSchemaCacheWithOptions :: BuildReason -> CacheInvalidations -> m ()
|
||||
|
||||
data BuildReason
|
||||
-- | The build was triggered by an update this instance made to the catalog (in the
|
||||
@ -99,12 +112,26 @@ data BuildReason
|
||||
| CatalogSync
|
||||
deriving (Show, Eq)
|
||||
|
||||
data CacheInvalidations = CacheInvalidations
|
||||
{ ciMetadata :: !Bool
|
||||
-- ^ Force reloading of all database information, including information not technically stored in
|
||||
-- metadata (currently just enum values). Set by the @reload_metadata@ API.
|
||||
, ciRemoteSchemas :: !(HashSet RemoteSchemaName)
|
||||
-- ^ Force refetching of the given remote schemas, even if their definition has not changed. Set
|
||||
-- by the @reload_remote_schema@ API.
|
||||
}
|
||||
$(deriveJSON (aesonDrop 2 snakeCase) ''CacheInvalidations)
|
||||
|
||||
instance Semigroup CacheInvalidations where
|
||||
CacheInvalidations a1 b1 <> CacheInvalidations a2 b2 = CacheInvalidations (a1 || a2) (b1 <> b2)
|
||||
instance Monoid CacheInvalidations where
|
||||
mempty = CacheInvalidations False mempty
|
||||
|
||||
instance (CacheRWM m) => CacheRWM (ReaderT r m) where
|
||||
buildSchemaCacheWithOptions = lift . buildSchemaCacheWithOptions
|
||||
invalidateCachedRemoteSchema = lift . invalidateCachedRemoteSchema
|
||||
buildSchemaCacheWithOptions a b = lift $ buildSchemaCacheWithOptions a b
|
||||
|
||||
buildSchemaCache :: (CacheRWM m) => m ()
|
||||
buildSchemaCache = buildSchemaCacheWithOptions CatalogUpdate
|
||||
buildSchemaCache = buildSchemaCacheWithOptions CatalogUpdate mempty
|
||||
|
||||
-- | Rebuilds the schema cache. If an object with the given object id became newly inconsistent,
|
||||
-- raises an error about it specifically. Otherwise, raises a generic metadata inconsistency error.
|
||||
|
@ -5,6 +5,7 @@ module Hasura.Server.App where
|
||||
|
||||
import Control.Concurrent.MVar
|
||||
import Control.Exception (IOException, try)
|
||||
import Control.Lens (view, _2)
|
||||
import Control.Monad.Stateless
|
||||
import Data.Aeson hiding (json)
|
||||
import Data.Either (isRight)
|
||||
@ -457,9 +458,9 @@ mkWaiApp isoLevel logger sqlGenCtx enableAL pool ci httpManager mode corsCfg ena
|
||||
(cacheRef, cacheBuiltTime) <- do
|
||||
pgResp <- runExceptT $ peelRun runCtx pgExecCtxSer Q.ReadWrite $
|
||||
(,) <$> buildRebuildableSchemaCache <*> liftTx fetchLastUpdate
|
||||
(schemaCache, time) <- liftIO $ either initErrExit return pgResp
|
||||
(schemaCache, event) <- liftIO $ either initErrExit return pgResp
|
||||
scRef <- liftIO $ newIORef (schemaCache, initSchemaCacheVer)
|
||||
return (scRef, snd <$> time)
|
||||
return (scRef, view _2 <$> event)
|
||||
|
||||
cacheLock <- liftIO $ newMVar ()
|
||||
planCache <- liftIO $ E.initPlanCache planCacheOptions
|
||||
|
@ -16,9 +16,6 @@ module Hasura.Server.Migrate
|
||||
, dropCatalog
|
||||
) where
|
||||
|
||||
import Control.Monad.Unique
|
||||
import Data.Time.Clock (UTCTime)
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson as A
|
||||
@ -29,6 +26,10 @@ import qualified Database.PG.Query.Connection as Q
|
||||
import qualified Language.Haskell.TH.Lib as TH
|
||||
import qualified Language.Haskell.TH.Syntax as TH
|
||||
|
||||
import Control.Lens (view, _2)
|
||||
import Control.Monad.Unique
|
||||
import Data.Time.Clock (UTCTime)
|
||||
|
||||
import Hasura.Logging (Hasura, LogLevel (..), ToEngineLog (..))
|
||||
import Hasura.RQL.DDL.Relationship
|
||||
import Hasura.RQL.DDL.Schema
|
||||
@ -163,7 +164,7 @@ migrateCatalog migrationTime = do
|
||||
buildCacheAndRecreateSystemMetadata :: m (RebuildableSchemaCache m)
|
||||
buildCacheAndRecreateSystemMetadata = do
|
||||
schemaCache <- buildRebuildableSchemaCache
|
||||
snd <$> runCacheRWT schemaCache recreateSystemMetadata
|
||||
view _2 <$> runCacheRWT schemaCache recreateSystemMetadata
|
||||
|
||||
-- the old 0.8 catalog version is non-integral, so we store it in the database as a string
|
||||
getCatalogVersion = liftTx $ runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler
|
||||
|
@ -12,7 +12,7 @@ import Hasura.Prelude
|
||||
import qualified Data.Text as T
|
||||
|
||||
latestCatalogVersion :: Integer
|
||||
latestCatalogVersion = 30
|
||||
latestCatalogVersion = 31
|
||||
|
||||
latestCatalogVersionString :: T.Text
|
||||
latestCatalogVersionString = T.pack $ show latestCatalogVersion
|
||||
|
@ -150,23 +150,21 @@ $(deriveJSON
|
||||
''RQLQueryV2
|
||||
)
|
||||
|
||||
fetchLastUpdate :: Q.TxE QErr (Maybe (InstanceId, UTCTime))
|
||||
fetchLastUpdate = do
|
||||
Q.withQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
SELECT instance_id::text, occurred_at
|
||||
FROM hdb_catalog.hdb_schema_update_event
|
||||
ORDER BY occurred_at DESC LIMIT 1
|
||||
|] () True
|
||||
fetchLastUpdate :: Q.TxE QErr (Maybe (InstanceId, UTCTime, CacheInvalidations))
|
||||
fetchLastUpdate = over (_Just._3) Q.getAltJ <$> Q.withQE defaultTxErrorHandler [Q.sql|
|
||||
SELECT instance_id::text, occurred_at, invalidations
|
||||
FROM hdb_catalog.hdb_schema_update_event
|
||||
ORDER BY occurred_at DESC LIMIT 1
|
||||
|] () True
|
||||
|
||||
recordSchemaUpdate :: InstanceId -> Q.TxE QErr ()
|
||||
recordSchemaUpdate instanceId =
|
||||
recordSchemaUpdate :: InstanceId -> CacheInvalidations -> Q.TxE QErr ()
|
||||
recordSchemaUpdate instanceId invalidations =
|
||||
liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
INSERT INTO hdb_catalog.hdb_schema_update_event
|
||||
(instance_id, occurred_at) VALUES ($1::uuid, DEFAULT)
|
||||
(instance_id, occurred_at, invalidations) VALUES ($1::uuid, DEFAULT, $2::json)
|
||||
ON CONFLICT ((occurred_at IS NOT NULL))
|
||||
DO UPDATE SET instance_id = $1::uuid, occurred_at = DEFAULT
|
||||
|] (Identity instanceId) True
|
||||
DO UPDATE SET instance_id = $1::uuid, occurred_at = DEFAULT, invalidations = $2::json
|
||||
|] (instanceId, Q.AltJ invalidations) True
|
||||
|
||||
runQuery
|
||||
:: (HasVersion, MonadIO m, MonadError QErr m)
|
||||
@ -184,12 +182,12 @@ runQuery pgExecCtx instanceId userInfo sc hMgr sqlGenCtx systemDefined query = d
|
||||
either throwError withReload resE
|
||||
where
|
||||
runCtx = RunCtx userInfo hMgr sqlGenCtx
|
||||
withReload r = do
|
||||
withReload (result, updatedCache, invalidations) = do
|
||||
when (queryModifiesSchemaCache query) $ do
|
||||
e <- liftIO $ runExceptT $ runLazyTx pgExecCtx Q.ReadWrite
|
||||
$ liftTx $ recordSchemaUpdate instanceId
|
||||
e <- liftIO $ runExceptT $ runLazyTx pgExecCtx Q.ReadWrite $ liftTx $
|
||||
recordSchemaUpdate instanceId invalidations
|
||||
liftEither e
|
||||
return r
|
||||
return (result, updatedCache)
|
||||
|
||||
-- | A predicate that determines whether the given query might modify/rebuild the schema cache. If
|
||||
-- so, it needs to acquire the global lock on the schema cache so that other queries do not modify
|
||||
|
@ -58,9 +58,10 @@ instance ToEngineLog SchemaSyncThreadLog Hasura where
|
||||
|
||||
data EventPayload
|
||||
= EventPayload
|
||||
{ _epInstanceId :: !InstanceId
|
||||
, _epOccurredAt :: !UTC.UTCTime
|
||||
} deriving (Show, Eq)
|
||||
{ _epInstanceId :: !InstanceId
|
||||
, _epOccurredAt :: !UTC.UTCTime
|
||||
, _epInvalidations :: !CacheInvalidations
|
||||
}
|
||||
$(deriveJSON (aesonDrop 3 snakeCase) ''EventPayload)
|
||||
|
||||
data ThreadError
|
||||
@ -136,9 +137,9 @@ listener sqlGenCtx pool logger httpMgr updateEventRef
|
||||
Just time -> (dbInstId /= instanceId) && accrdAt > time
|
||||
|
||||
refreshCache Nothing = return ()
|
||||
refreshCache (Just (dbInstId, accrdAt)) =
|
||||
refreshCache (Just (dbInstId, accrdAt, invalidations)) =
|
||||
when (shouldRefresh dbInstId accrdAt) $
|
||||
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef
|
||||
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef invalidations
|
||||
threadType "schema cache reloaded after postgres listen init"
|
||||
|
||||
notifyHandler = \case
|
||||
@ -179,7 +180,7 @@ processor sqlGenCtx pool logger httpMgr updateEventRef
|
||||
event <- STM.atomically getLatestEvent
|
||||
logInfo logger threadType $ object ["processed_event" .= event]
|
||||
when (shouldReload event) $
|
||||
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef
|
||||
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef (_epInvalidations event)
|
||||
threadType "schema cache reloaded"
|
||||
where
|
||||
-- checks if there is an event
|
||||
@ -202,15 +203,17 @@ refreshSchemaCache
|
||||
-> Logger Hasura
|
||||
-> HTTP.Manager
|
||||
-> SchemaCacheRef
|
||||
-> CacheInvalidations
|
||||
-> ThreadType
|
||||
-> T.Text -> IO ()
|
||||
refreshSchemaCache sqlGenCtx pool logger httpManager cacheRef threadType msg = do
|
||||
refreshSchemaCache sqlGenCtx pool logger httpManager cacheRef invalidations threadType msg = do
|
||||
-- Reload schema cache from catalog
|
||||
resE <- liftIO $ runExceptT $ withSCUpdate cacheRef logger do
|
||||
rebuildableCache <- fst <$> liftIO (readIORef $ _scrCache cacheRef)
|
||||
buildSchemaCacheWithOptions CatalogSync
|
||||
((), cache, _) <- buildSchemaCacheWithOptions CatalogSync invalidations
|
||||
& runCacheRWT rebuildableCache
|
||||
& peelRun runCtx pgCtx PG.ReadWrite
|
||||
pure ((), cache)
|
||||
case resE of
|
||||
Left e -> logError logger threadType $ TEQueryError e
|
||||
Right () -> logInfo logger threadType $ object ["message" .= msg]
|
||||
|
@ -411,7 +411,8 @@ CREATE TABLE hdb_catalog.remote_schemas (
|
||||
|
||||
CREATE TABLE hdb_catalog.hdb_schema_update_event (
|
||||
instance_id uuid NOT NULL,
|
||||
occurred_at timestamptz NOT NULL DEFAULT NOW()
|
||||
occurred_at timestamptz NOT NULL DEFAULT NOW(),
|
||||
invalidations json NOT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX hdb_schema_update_event_one_row
|
||||
@ -422,13 +423,16 @@ $function$
|
||||
DECLARE
|
||||
instance_id uuid;
|
||||
occurred_at timestamptz;
|
||||
invalidations json;
|
||||
curr_rec record;
|
||||
BEGIN
|
||||
instance_id = NEW.instance_id;
|
||||
occurred_at = NEW.occurred_at;
|
||||
invalidations = NEW.invalidations;
|
||||
PERFORM pg_notify('hasura_schema_update', json_build_object(
|
||||
'instance_id', instance_id,
|
||||
'occurred_at', occurred_at
|
||||
'occurred_at', occurred_at,
|
||||
'invalidations', invalidations
|
||||
)::text);
|
||||
RETURN curr_rec;
|
||||
END;
|
||||
@ -657,9 +661,9 @@ CREATE VIEW hdb_catalog.hdb_computed_field_function AS
|
||||
FROM hdb_catalog.hdb_computed_field
|
||||
);
|
||||
|
||||
CREATE OR REPLACE FUNCTION hdb_catalog.check_violation(msg text) RETURNS bool AS
|
||||
CREATE OR REPLACE FUNCTION hdb_catalog.check_violation(msg text) RETURNS bool AS
|
||||
$$
|
||||
BEGIN
|
||||
RAISE check_violation USING message=msg;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
22
server/src-rsr/migrations/30_to_31.sql
Normal file
22
server/src-rsr/migrations/30_to_31.sql
Normal file
@ -0,0 +1,22 @@
|
||||
TRUNCATE hdb_catalog.hdb_schema_update_event;
|
||||
ALTER TABLE hdb_catalog.hdb_schema_update_event ADD COLUMN invalidations json NOT NULL;
|
||||
CREATE OR REPLACE FUNCTION hdb_catalog.hdb_schema_update_event_notifier() RETURNS trigger AS
|
||||
$function$
|
||||
DECLARE
|
||||
instance_id uuid;
|
||||
occurred_at timestamptz;
|
||||
invalidations json;
|
||||
curr_rec record;
|
||||
BEGIN
|
||||
instance_id = NEW.instance_id;
|
||||
occurred_at = NEW.occurred_at;
|
||||
invalidations = NEW.invalidations;
|
||||
PERFORM pg_notify('hasura_schema_update', json_build_object(
|
||||
'instance_id', instance_id,
|
||||
'occurred_at', occurred_at,
|
||||
'invalidations', invalidations
|
||||
)::text);
|
||||
RETURN curr_rec;
|
||||
END;
|
||||
$function$
|
||||
LANGUAGE plpgsql;
|
19
server/src-rsr/migrations/31_to_30.sql
Normal file
19
server/src-rsr/migrations/31_to_30.sql
Normal file
@ -0,0 +1,19 @@
|
||||
TRUNCATE hdb_catalog.hdb_schema_update_event;
|
||||
CREATE OR REPLACE FUNCTION hdb_catalog.hdb_schema_update_event_notifier() RETURNS trigger AS
|
||||
$function$
|
||||
DECLARE
|
||||
instance_id uuid;
|
||||
occurred_at timestamptz;
|
||||
curr_rec record;
|
||||
BEGIN
|
||||
instance_id = NEW.instance_id;
|
||||
occurred_at = NEW.occurred_at;
|
||||
PERFORM pg_notify('hasura_schema_update', json_build_object(
|
||||
'instance_id', instance_id,
|
||||
'occurred_at', occurred_at
|
||||
)::text);
|
||||
RETURN curr_rec;
|
||||
END;
|
||||
$function$
|
||||
LANGUAGE plpgsql;
|
||||
ALTER TABLE hdb_catalog.hdb_schema_update_event DROP COLUMN invalidations;
|
@ -36,11 +36,10 @@ instance (MonadBase IO m) => TableCoreInfoRM (CacheRefT m)
|
||||
instance (MonadBase IO m) => CacheRM (CacheRefT m) where
|
||||
askSchemaCache = CacheRefT (fmap lastBuiltSchemaCache . readMVar)
|
||||
|
||||
instance (MonadIO m, MonadBaseControl IO m, MonadTx m, MonadUnique m) => CacheRWM (CacheRefT m) where
|
||||
buildSchemaCacheWithOptions options = CacheRefT $ flip modifyMVar \schemaCache ->
|
||||
swap <$> runCacheRWT schemaCache (buildSchemaCacheWithOptions options)
|
||||
invalidateCachedRemoteSchema name = CacheRefT $ flip modifyMVar \schemaCache ->
|
||||
swap <$> runCacheRWT schemaCache (invalidateCachedRemoteSchema name)
|
||||
instance (MonadIO m, MonadBaseControl IO m, MonadTx m) => CacheRWM (CacheRefT m) where
|
||||
buildSchemaCacheWithOptions reason invalidations = CacheRefT $ flip modifyMVar \schemaCache -> do
|
||||
((), cache, _) <- runCacheRWT schemaCache (buildSchemaCacheWithOptions reason invalidations)
|
||||
pure (cache, ())
|
||||
|
||||
instance Example (CacheRefT m ()) where
|
||||
type Arg (CacheRefT m ()) = CacheRefT m :~> IO
|
||||
|
@ -0,0 +1,55 @@
|
||||
# This is a regression test for #3791.
|
||||
- description: Setup enum table, create relationship, and insert invalid enum value
|
||||
url: /v1/query
|
||||
status: 200
|
||||
query:
|
||||
type: bulk
|
||||
args:
|
||||
- type: set_table_is_enum
|
||||
args:
|
||||
table: weekdays
|
||||
is_enum: true
|
||||
- type: create_object_relationship
|
||||
args:
|
||||
table: employees
|
||||
name: favorite_color_object
|
||||
using:
|
||||
foreign_key_constraint_on: favorite_color
|
||||
- type: run_sql
|
||||
args:
|
||||
sql: INSERT INTO colors (value, comment) VALUES ('illegal+graphql+identifier', '')
|
||||
- type: reload_metadata
|
||||
args: {}
|
||||
|
||||
- description: Query inconsistent objects
|
||||
url: /v1/query
|
||||
status: 200
|
||||
response:
|
||||
is_consistent: false
|
||||
inconsistent_objects:
|
||||
- definition:
|
||||
schema: public
|
||||
name: colors
|
||||
reason: the table "colors" cannot be used as an enum because the value
|
||||
"illegal+graphql+identifier" is not a valid GraphQL enum value name
|
||||
type: table
|
||||
- definition:
|
||||
using:
|
||||
foreign_key_constraint_on: favorite_color
|
||||
name: favorite_color_object
|
||||
comment:
|
||||
table:
|
||||
schema: public
|
||||
name: employees
|
||||
reason: table "colors" is not tracked
|
||||
type: object_relation
|
||||
query:
|
||||
type: get_inconsistent_metadata
|
||||
args: {}
|
||||
|
||||
- description: Drop inconsistent objects
|
||||
url: /v1/query
|
||||
status: 200
|
||||
query:
|
||||
type: drop_inconsistent_metadata
|
||||
args: {}
|
@ -90,7 +90,7 @@ class TestRemoteSchemaBasic:
|
||||
q = mk_add_remote_q('simple 2', 'http://localhost:5000/hello-graphql')
|
||||
st_code, resp = hge_ctx.v1q(q)
|
||||
assert st_code == 400
|
||||
assert resp['code'] == 'constraint-violation'
|
||||
assert resp['code'] == 'unexpected'
|
||||
|
||||
def test_remove_schema_error(self, hge_ctx):
|
||||
"""remove remote schema which is not added"""
|
||||
|
@ -679,6 +679,9 @@ class TestSetTableIsEnum(DefaultTestQueries):
|
||||
def test_add_test_schema_enum_table(self, hge_ctx):
|
||||
check_query_f(hge_ctx, self.dir() + '/add_test_schema_enum_table.yaml')
|
||||
|
||||
def test_relationship_with_inconsistent_enum_table(self, hge_ctx):
|
||||
check_query_f(hge_ctx, self.dir() + '/relationship_with_inconsistent_enum_table.yaml')
|
||||
|
||||
class TestSetTableCustomFields(DefaultTestQueries):
|
||||
@classmethod
|
||||
def dir(cls):
|
||||
@ -735,5 +738,3 @@ class TestBulkQuery(DefaultTestQueries):
|
||||
|
||||
def test_run_bulk_with_select_and_reads(self, hge_ctx):
|
||||
check_query_f(hge_ctx, self.dir() + '/select_with_reads.yaml')
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user