mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-15 01:12:56 +03:00
Explicitly invalidate enum values when metadata is reloaded
This fixes #3759. Also, while we’re at it, also improve the way invalidations are synced across instances so enums and remote schemas are appropriately reloaded by the schema syncing process.
This commit is contained in:
parent
3cdb3841e6
commit
8ef205fba5
@ -46,7 +46,7 @@ runApp (HGEOptionsG rci hgeCmd) =
|
|||||||
execQuery queryBs
|
execQuery queryBs
|
||||||
& runHasSystemDefinedT (SystemDefined False)
|
& runHasSystemDefinedT (SystemDefined False)
|
||||||
& runCacheRWT schemaCache
|
& runCacheRWT schemaCache
|
||||||
& fmap fst
|
& fmap (\(res, _, _) -> res)
|
||||||
either printErrJExit (liftIO . BLC.putStrLn) res
|
either printErrJExit (liftIO . BLC.putStrLn) res
|
||||||
|
|
||||||
HCVersion -> liftIO $ putStrLn $ "Hasura GraphQL Engine: " ++ convertText currentVersion
|
HCVersion -> liftIO $ putStrLn $ "Hasura GraphQL Engine: " ++ convertText currentVersion
|
||||||
|
@ -380,7 +380,7 @@ runExportMetadata _ =
|
|||||||
|
|
||||||
runReloadMetadata :: (QErrM m, CacheRWM m) => ReloadMetadata -> m EncJSON
|
runReloadMetadata :: (QErrM m, CacheRWM m) => ReloadMetadata -> m EncJSON
|
||||||
runReloadMetadata ReloadMetadata = do
|
runReloadMetadata ReloadMetadata = do
|
||||||
buildSchemaCache
|
buildSchemaCacheWithOptions CatalogUpdate mempty { ciMetadata = True }
|
||||||
return successMsg
|
return successMsg
|
||||||
|
|
||||||
runDumpInternalState
|
runDumpInternalState
|
||||||
|
@ -13,6 +13,7 @@ import Hasura.Prelude
|
|||||||
|
|
||||||
import qualified Data.Aeson as J
|
import qualified Data.Aeson as J
|
||||||
import qualified Data.HashMap.Strict as Map
|
import qualified Data.HashMap.Strict as Map
|
||||||
|
import qualified Data.HashSet as S
|
||||||
import qualified Database.PG.Query as Q
|
import qualified Database.PG.Query as Q
|
||||||
|
|
||||||
import Hasura.GraphQL.RemoteServer
|
import Hasura.GraphQL.RemoteServer
|
||||||
@ -87,8 +88,8 @@ runReloadRemoteSchema (RemoteSchemaNameQuery name) = do
|
|||||||
void $ onNothing (Map.lookup name rmSchemas) $
|
void $ onNothing (Map.lookup name rmSchemas) $
|
||||||
throw400 NotExists $ "remote schema with name " <> name <<> " does not exist"
|
throw400 NotExists $ "remote schema with name " <> name <<> " does not exist"
|
||||||
|
|
||||||
invalidateCachedRemoteSchema name
|
let invalidations = mempty { ciRemoteSchemas = S.singleton name }
|
||||||
withNewInconsistentObjsCheck buildSchemaCache
|
withNewInconsistentObjsCheck $ buildSchemaCacheWithOptions CatalogUpdate invalidations
|
||||||
pure successMsg
|
pure successMsg
|
||||||
|
|
||||||
addRemoteSchemaToCatalog
|
addRemoteSchemaToCatalog
|
||||||
|
@ -66,39 +66,45 @@ buildRebuildableSchemaCache = do
|
|||||||
pure $ RebuildableSchemaCache (Inc.result result) initialInvalidationKeys (Inc.rebuildRule result)
|
pure $ RebuildableSchemaCache (Inc.result result) initialInvalidationKeys (Inc.rebuildRule result)
|
||||||
|
|
||||||
newtype CacheRWT m a
|
newtype CacheRWT m a
|
||||||
= CacheRWT { unCacheRWT :: StateT (RebuildableSchemaCache m) m a }
|
-- The CacheInvalidations component of the state could actually be collected using WriterT, but
|
||||||
|
-- WriterT implementations prior to transformers-0.5.6.0 (which added
|
||||||
|
-- Control.Monad.Trans.Writer.CPS) are leaky, and we don’t have that yet.
|
||||||
|
= CacheRWT (StateT (RebuildableSchemaCache m, CacheInvalidations) m a)
|
||||||
deriving
|
deriving
|
||||||
( Functor, Applicative, Monad, MonadIO, MonadReader r, MonadError e, MonadWriter w, MonadTx
|
( Functor, Applicative, Monad, MonadIO, MonadReader r, MonadError e, MonadTx
|
||||||
, UserInfoM, HasHttpManager, HasSQLGenCtx, HasSystemDefined )
|
, UserInfoM, HasHttpManager, HasSQLGenCtx, HasSystemDefined )
|
||||||
|
|
||||||
runCacheRWT :: RebuildableSchemaCache m -> CacheRWT m a -> m (a, RebuildableSchemaCache m)
|
runCacheRWT
|
||||||
runCacheRWT cache = flip runStateT cache . unCacheRWT
|
:: Functor m
|
||||||
|
=> RebuildableSchemaCache m -> CacheRWT m a -> m (a, RebuildableSchemaCache m, CacheInvalidations)
|
||||||
|
runCacheRWT cache (CacheRWT m) =
|
||||||
|
runStateT m (cache, mempty) <&> \(v, (newCache, invalidations)) -> (v, newCache, invalidations)
|
||||||
|
|
||||||
instance MonadTrans CacheRWT where
|
instance MonadTrans CacheRWT where
|
||||||
lift = CacheRWT . lift
|
lift = CacheRWT . lift
|
||||||
|
|
||||||
instance (Monad m) => TableCoreInfoRM (CacheRWT m)
|
instance (Monad m) => TableCoreInfoRM (CacheRWT m)
|
||||||
instance (Monad m) => CacheRM (CacheRWT m) where
|
instance (Monad m) => CacheRM (CacheRWT m) where
|
||||||
askSchemaCache = CacheRWT $ gets lastBuiltSchemaCache
|
askSchemaCache = CacheRWT $ gets (lastBuiltSchemaCache . fst)
|
||||||
|
|
||||||
instance (MonadIO m, MonadTx m) => CacheRWM (CacheRWT m) where
|
instance (MonadIO m, MonadTx m) => CacheRWM (CacheRWT m) where
|
||||||
buildSchemaCacheWithOptions buildReason = CacheRWT do
|
buildSchemaCacheWithOptions buildReason invalidations = CacheRWT do
|
||||||
RebuildableSchemaCache _ invalidationKeys rule <- get
|
(RebuildableSchemaCache _ invalidationKeys rule, oldInvalidations) <- get
|
||||||
|
let newInvalidationKeys = invalidateKeys invalidations invalidationKeys
|
||||||
catalogMetadata <- liftTx fetchCatalogData
|
catalogMetadata <- liftTx fetchCatalogData
|
||||||
result <- lift $ flip runReaderT buildReason $ Inc.build rule (catalogMetadata, invalidationKeys)
|
result <- lift $ flip runReaderT buildReason $
|
||||||
|
Inc.build rule (catalogMetadata, newInvalidationKeys)
|
||||||
let schemaCache = Inc.result result
|
let schemaCache = Inc.result result
|
||||||
prunedInvalidationKeys = pruneInvalidationKeys schemaCache invalidationKeys
|
prunedInvalidationKeys = pruneInvalidationKeys schemaCache newInvalidationKeys
|
||||||
put $! RebuildableSchemaCache schemaCache prunedInvalidationKeys (Inc.rebuildRule result)
|
!newCache = RebuildableSchemaCache schemaCache prunedInvalidationKeys (Inc.rebuildRule result)
|
||||||
|
!newInvalidations = oldInvalidations <> invalidations
|
||||||
|
put (newCache, newInvalidations)
|
||||||
where
|
where
|
||||||
-- Prunes invalidation keys that no longer exist in the schema to avoid leaking memory by
|
-- Prunes invalidation keys that no longer exist in the schema to avoid leaking memory by
|
||||||
-- hanging onto unnecessary keys.
|
-- hanging onto unnecessary keys.
|
||||||
pruneInvalidationKeys schemaCache = over ikRemoteSchemas $ M.filterWithKey \name _ ->
|
pruneInvalidationKeys schemaCache = over ikRemoteSchemas $ M.filterWithKey \name _ ->
|
||||||
M.member name (scRemoteSchemas schemaCache)
|
M.member name (scRemoteSchemas schemaCache)
|
||||||
|
|
||||||
invalidateCachedRemoteSchema name =
|
|
||||||
CacheRWT $ modifying (rscInvalidationMap . ikRemoteSchemas . at name) $
|
|
||||||
Just . maybe Inc.initialInvalidationKey Inc.invalidate
|
|
||||||
|
|
||||||
buildSchemaCacheRule
|
buildSchemaCacheRule
|
||||||
-- Note: by supplying BuildReason via MonadReader, it does not participate in caching, which is
|
-- Note: by supplying BuildReason via MonadReader, it does not participate in caching, which is
|
||||||
-- what we want!
|
-- what we want!
|
||||||
@ -145,7 +151,7 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
|||||||
computedFields = catalogMetadata
|
computedFields = catalogMetadata
|
||||||
|
|
||||||
-- tables
|
-- tables
|
||||||
tableRawInfos <- buildTableCache -< tables
|
tableRawInfos <- buildTableCache -< (tables, Inc.selectD #_ikMetadata invalidationKeys)
|
||||||
|
|
||||||
-- relationships and computed fields
|
-- relationships and computed fields
|
||||||
let relationshipsByTable = M.groupOn _crTable relationships
|
let relationshipsByTable = M.groupOn _crTable relationships
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
{-# LANGUAGE Arrows #-}
|
{-# LANGUAGE Arrows #-}
|
||||||
|
{-# LANGUAGE RecordWildCards #-}
|
||||||
|
|
||||||
-- | Types/functions shared between modules that implement "Hasura.RQL.DDL.Schema.Cache". Other
|
-- | Types/functions shared between modules that implement "Hasura.RQL.DDL.Schema.Cache". Other
|
||||||
-- modules should not import this module directly.
|
-- modules should not import this module directly.
|
||||||
@ -6,6 +7,7 @@ module Hasura.RQL.DDL.Schema.Cache.Common where
|
|||||||
|
|
||||||
import Hasura.Prelude
|
import Hasura.Prelude
|
||||||
|
|
||||||
|
import qualified Data.HashMap.Strict as M
|
||||||
import qualified Data.HashSet as HS
|
import qualified Data.HashSet as HS
|
||||||
import qualified Data.Sequence as Seq
|
import qualified Data.Sequence as Seq
|
||||||
|
|
||||||
@ -20,11 +22,10 @@ import Hasura.RQL.Types.QueryCollection
|
|||||||
import Hasura.RQL.Types.Run
|
import Hasura.RQL.Types.Run
|
||||||
import Hasura.SQL.Types
|
import Hasura.SQL.Types
|
||||||
|
|
||||||
|
-- | 'InvalidationKeys' used to apply requested 'CacheInvalidations'.
|
||||||
data InvalidationKeys = InvalidationKeys
|
data InvalidationKeys = InvalidationKeys
|
||||||
{ _ikMetadata :: !Inc.InvalidationKey
|
{ _ikMetadata :: !Inc.InvalidationKey
|
||||||
-- ^ Invalidated by the @reload_metadata@ API.
|
|
||||||
, _ikRemoteSchemas :: !(HashMap RemoteSchemaName Inc.InvalidationKey)
|
, _ikRemoteSchemas :: !(HashMap RemoteSchemaName Inc.InvalidationKey)
|
||||||
-- ^ Invalidated by the @reload_remote_schema@ API.
|
|
||||||
} deriving (Eq, Generic)
|
} deriving (Eq, Generic)
|
||||||
instance Inc.Cacheable InvalidationKeys
|
instance Inc.Cacheable InvalidationKeys
|
||||||
instance Inc.Select InvalidationKeys
|
instance Inc.Select InvalidationKeys
|
||||||
@ -33,6 +34,13 @@ $(makeLenses ''InvalidationKeys)
|
|||||||
initialInvalidationKeys :: InvalidationKeys
|
initialInvalidationKeys :: InvalidationKeys
|
||||||
initialInvalidationKeys = InvalidationKeys Inc.initialInvalidationKey mempty
|
initialInvalidationKeys = InvalidationKeys Inc.initialInvalidationKey mempty
|
||||||
|
|
||||||
|
invalidateKeys :: CacheInvalidations -> InvalidationKeys -> InvalidationKeys
|
||||||
|
invalidateKeys CacheInvalidations{..} InvalidationKeys{..} = InvalidationKeys
|
||||||
|
{ _ikMetadata = if ciMetadata then Inc.invalidate _ikMetadata else _ikMetadata
|
||||||
|
, _ikRemoteSchemas = foldl' (flip invalidateRemoteSchema) _ikRemoteSchemas ciRemoteSchemas }
|
||||||
|
where
|
||||||
|
invalidateRemoteSchema = M.alter $ Just . maybe Inc.initialInvalidationKey Inc.invalidate
|
||||||
|
|
||||||
data BuildInputs
|
data BuildInputs
|
||||||
= BuildInputs
|
= BuildInputs
|
||||||
{ _biReason :: !BuildReason
|
{ _biReason :: !BuildReason
|
||||||
|
@ -292,10 +292,14 @@ buildTableCache
|
|||||||
:: forall arr m
|
:: forall arr m
|
||||||
. ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
. ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||||
, Inc.ArrowCache m arr, MonadTx m )
|
, Inc.ArrowCache m arr, MonadTx m )
|
||||||
=> [CatalogTable] `arr` M.HashMap QualifiedTable TableRawInfo
|
=> ( [CatalogTable]
|
||||||
buildTableCache = Inc.cache proc catalogTables -> do
|
, Inc.Dependency Inc.InvalidationKey
|
||||||
|
) `arr` M.HashMap QualifiedTable TableRawInfo
|
||||||
|
buildTableCache = Inc.cache proc (catalogTables, reloadMetadataInvalidationKey) -> do
|
||||||
rawTableInfos <-
|
rawTableInfos <-
|
||||||
(| Inc.keyed (| withTable (\tables -> buildRawTableInfo <<< noDuplicateTables -< tables) |)
|
(| Inc.keyed (| withTable (\tables
|
||||||
|
-> (tables, reloadMetadataInvalidationKey)
|
||||||
|
>- first noDuplicateTables >>> buildRawTableInfo) |)
|
||||||
|) (M.groupOnNE _ctName catalogTables)
|
|) (M.groupOnNE _ctName catalogTables)
|
||||||
let rawTableCache = M.catMaybes rawTableInfos
|
let rawTableCache = M.catMaybes rawTableInfos
|
||||||
enumTables = flip M.mapMaybe rawTableCache \rawTableInfo ->
|
enumTables = flip M.mapMaybe rawTableCache \rawTableInfo ->
|
||||||
@ -314,8 +318,13 @@ buildTableCache = Inc.cache proc catalogTables -> do
|
|||||||
_ -> throwA -< err400 AlreadyExists "duplication definition for table"
|
_ -> throwA -< err400 AlreadyExists "duplication definition for table"
|
||||||
|
|
||||||
-- Step 1: Build the raw table cache from metadata information.
|
-- Step 1: Build the raw table cache from metadata information.
|
||||||
buildRawTableInfo :: ErrorA QErr arr CatalogTable (TableCoreInfoG PGRawColumnInfo PGCol)
|
buildRawTableInfo
|
||||||
buildRawTableInfo = Inc.cache proc (CatalogTable name systemDefined isEnum config maybeInfo) -> do
|
:: ErrorA QErr arr
|
||||||
|
( CatalogTable
|
||||||
|
, Inc.Dependency Inc.InvalidationKey
|
||||||
|
) (TableCoreInfoG PGRawColumnInfo PGCol)
|
||||||
|
buildRawTableInfo = Inc.cache proc (catalogTable, reloadMetadataInvalidationKey) -> do
|
||||||
|
let CatalogTable name systemDefined isEnum config maybeInfo = catalogTable
|
||||||
catalogInfo <-
|
catalogInfo <-
|
||||||
(| onNothingA (throwA -<
|
(| onNothingA (throwA -<
|
||||||
err400 NotExists $ "no such table/view exists in postgres: " <>> name)
|
err400 NotExists $ "no such table/view exists in postgres: " <>> name)
|
||||||
@ -326,7 +335,11 @@ buildTableCache = Inc.cache proc catalogTables -> do
|
|||||||
primaryKey = _ctiPrimaryKey catalogInfo
|
primaryKey = _ctiPrimaryKey catalogInfo
|
||||||
rawPrimaryKey <- liftEitherA -< traverse (resolvePrimaryKeyColumns columnMap) primaryKey
|
rawPrimaryKey <- liftEitherA -< traverse (resolvePrimaryKeyColumns columnMap) primaryKey
|
||||||
enumValues <- if isEnum
|
enumValues <- if isEnum
|
||||||
then bindErrorA -< Just <$> fetchAndValidateEnumValues name rawPrimaryKey columns
|
then do
|
||||||
|
-- We want to make sure we reload enum values whenever someone explicitly calls
|
||||||
|
-- `reload_metadata`.
|
||||||
|
Inc.dependOn -< reloadMetadataInvalidationKey
|
||||||
|
bindErrorA -< Just <$> fetchAndValidateEnumValues name rawPrimaryKey columns
|
||||||
else returnA -< Nothing
|
else returnA -< Nothing
|
||||||
|
|
||||||
returnA -< TableCoreInfo
|
returnA -< TableCoreInfo
|
||||||
|
@ -13,6 +13,7 @@ module Hasura.RQL.Types.SchemaCache.Build
|
|||||||
|
|
||||||
, CacheRWM(..)
|
, CacheRWM(..)
|
||||||
, BuildReason(..)
|
, BuildReason(..)
|
||||||
|
, CacheInvalidations(..)
|
||||||
, buildSchemaCache
|
, buildSchemaCache
|
||||||
, buildSchemaCacheFor
|
, buildSchemaCacheFor
|
||||||
, buildSchemaCacheStrict
|
, buildSchemaCacheStrict
|
||||||
@ -28,6 +29,8 @@ import qualified Data.Text as T
|
|||||||
import Control.Arrow.Extended
|
import Control.Arrow.Extended
|
||||||
import Control.Lens
|
import Control.Lens
|
||||||
import Data.Aeson (toJSON)
|
import Data.Aeson (toJSON)
|
||||||
|
import Data.Aeson.Casing
|
||||||
|
import Data.Aeson.TH
|
||||||
import Data.List (nub)
|
import Data.List (nub)
|
||||||
|
|
||||||
import Hasura.RQL.Types.Error
|
import Hasura.RQL.Types.Error
|
||||||
@ -96,8 +99,7 @@ withRecordInconsistency f = proc (e, (metadataObject, s)) -> do
|
|||||||
-- operations for triggering a schema cache rebuild
|
-- operations for triggering a schema cache rebuild
|
||||||
|
|
||||||
class (CacheRM m) => CacheRWM m where
|
class (CacheRM m) => CacheRWM m where
|
||||||
buildSchemaCacheWithOptions :: BuildReason -> m ()
|
buildSchemaCacheWithOptions :: BuildReason -> CacheInvalidations -> m ()
|
||||||
invalidateCachedRemoteSchema :: RemoteSchemaName -> m ()
|
|
||||||
|
|
||||||
data BuildReason
|
data BuildReason
|
||||||
-- | The build was triggered by an update this instance made to the catalog (in the
|
-- | The build was triggered by an update this instance made to the catalog (in the
|
||||||
@ -110,12 +112,26 @@ data BuildReason
|
|||||||
| CatalogSync
|
| CatalogSync
|
||||||
deriving (Show, Eq)
|
deriving (Show, Eq)
|
||||||
|
|
||||||
|
data CacheInvalidations = CacheInvalidations
|
||||||
|
{ ciMetadata :: !Bool
|
||||||
|
-- ^ Force reloading of all database information, including information not technically stored in
|
||||||
|
-- metadata (currently just enum values). Set by the @reload_metadata@ API.
|
||||||
|
, ciRemoteSchemas :: !(HashSet RemoteSchemaName)
|
||||||
|
-- ^ Force refetching of the given remote schemas, even if their definition has not changed. Set
|
||||||
|
-- by the @reload_remote_schema@ API.
|
||||||
|
}
|
||||||
|
$(deriveJSON (aesonDrop 2 snakeCase) ''CacheInvalidations)
|
||||||
|
|
||||||
|
instance Semigroup CacheInvalidations where
|
||||||
|
CacheInvalidations a1 b1 <> CacheInvalidations a2 b2 = CacheInvalidations (a1 || a2) (b1 <> b2)
|
||||||
|
instance Monoid CacheInvalidations where
|
||||||
|
mempty = CacheInvalidations False mempty
|
||||||
|
|
||||||
instance (CacheRWM m) => CacheRWM (ReaderT r m) where
|
instance (CacheRWM m) => CacheRWM (ReaderT r m) where
|
||||||
buildSchemaCacheWithOptions = lift . buildSchemaCacheWithOptions
|
buildSchemaCacheWithOptions a b = lift $ buildSchemaCacheWithOptions a b
|
||||||
invalidateCachedRemoteSchema = lift . invalidateCachedRemoteSchema
|
|
||||||
|
|
||||||
buildSchemaCache :: (CacheRWM m) => m ()
|
buildSchemaCache :: (CacheRWM m) => m ()
|
||||||
buildSchemaCache = buildSchemaCacheWithOptions CatalogUpdate
|
buildSchemaCache = buildSchemaCacheWithOptions CatalogUpdate mempty
|
||||||
|
|
||||||
-- | Rebuilds the schema cache. If an object with the given object id became newly inconsistent,
|
-- | Rebuilds the schema cache. If an object with the given object id became newly inconsistent,
|
||||||
-- raises an error about it specifically. Otherwise, raises a generic metadata inconsistency error.
|
-- raises an error about it specifically. Otherwise, raises a generic metadata inconsistency error.
|
||||||
|
@ -5,6 +5,7 @@ module Hasura.Server.App where
|
|||||||
|
|
||||||
import Control.Concurrent.MVar
|
import Control.Concurrent.MVar
|
||||||
import Control.Exception (IOException, try)
|
import Control.Exception (IOException, try)
|
||||||
|
import Control.Lens (view, _2)
|
||||||
import Control.Monad.Stateless
|
import Control.Monad.Stateless
|
||||||
import Data.Aeson hiding (json)
|
import Data.Aeson hiding (json)
|
||||||
import Data.Either (isRight)
|
import Data.Either (isRight)
|
||||||
@ -461,9 +462,9 @@ mkWaiApp isoLevel logger sqlGenCtx enableAL pool ci httpManager mode corsCfg ena
|
|||||||
(cacheRef, cacheBuiltTime) <- do
|
(cacheRef, cacheBuiltTime) <- do
|
||||||
pgResp <- runExceptT $ peelRun runCtx pgExecCtxSer Q.ReadWrite $
|
pgResp <- runExceptT $ peelRun runCtx pgExecCtxSer Q.ReadWrite $
|
||||||
(,) <$> buildRebuildableSchemaCache <*> liftTx fetchLastUpdate
|
(,) <$> buildRebuildableSchemaCache <*> liftTx fetchLastUpdate
|
||||||
(schemaCache, time) <- liftIO $ either initErrExit return pgResp
|
(schemaCache, event) <- liftIO $ either initErrExit return pgResp
|
||||||
scRef <- liftIO $ newIORef (schemaCache, initSchemaCacheVer)
|
scRef <- liftIO $ newIORef (schemaCache, initSchemaCacheVer)
|
||||||
return (scRef, snd <$> time)
|
return (scRef, view _2 <$> event)
|
||||||
|
|
||||||
cacheLock <- liftIO $ newMVar ()
|
cacheLock <- liftIO $ newMVar ()
|
||||||
planCache <- liftIO $ E.initPlanCache planCacheOptions
|
planCache <- liftIO $ E.initPlanCache planCacheOptions
|
||||||
|
@ -16,9 +16,6 @@ module Hasura.Server.Migrate
|
|||||||
, dropCatalog
|
, dropCatalog
|
||||||
) where
|
) where
|
||||||
|
|
||||||
import Control.Monad.Unique
|
|
||||||
import Data.Time.Clock (UTCTime)
|
|
||||||
|
|
||||||
import Hasura.Prelude
|
import Hasura.Prelude
|
||||||
|
|
||||||
import qualified Data.Aeson as A
|
import qualified Data.Aeson as A
|
||||||
@ -29,6 +26,10 @@ import qualified Database.PG.Query.Connection as Q
|
|||||||
import qualified Language.Haskell.TH.Lib as TH
|
import qualified Language.Haskell.TH.Lib as TH
|
||||||
import qualified Language.Haskell.TH.Syntax as TH
|
import qualified Language.Haskell.TH.Syntax as TH
|
||||||
|
|
||||||
|
import Control.Lens (view, _2)
|
||||||
|
import Control.Monad.Unique
|
||||||
|
import Data.Time.Clock (UTCTime)
|
||||||
|
|
||||||
import Hasura.Logging (Hasura, LogLevel (..), ToEngineLog (..))
|
import Hasura.Logging (Hasura, LogLevel (..), ToEngineLog (..))
|
||||||
import Hasura.RQL.DDL.Relationship
|
import Hasura.RQL.DDL.Relationship
|
||||||
import Hasura.RQL.DDL.Schema
|
import Hasura.RQL.DDL.Schema
|
||||||
@ -163,7 +164,7 @@ migrateCatalog migrationTime = do
|
|||||||
buildCacheAndRecreateSystemMetadata :: m (RebuildableSchemaCache m)
|
buildCacheAndRecreateSystemMetadata :: m (RebuildableSchemaCache m)
|
||||||
buildCacheAndRecreateSystemMetadata = do
|
buildCacheAndRecreateSystemMetadata = do
|
||||||
schemaCache <- buildRebuildableSchemaCache
|
schemaCache <- buildRebuildableSchemaCache
|
||||||
snd <$> runCacheRWT schemaCache recreateSystemMetadata
|
view _2 <$> runCacheRWT schemaCache recreateSystemMetadata
|
||||||
|
|
||||||
-- the old 0.8 catalog version is non-integral, so we store it in the database as a string
|
-- the old 0.8 catalog version is non-integral, so we store it in the database as a string
|
||||||
getCatalogVersion = liftTx $ runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler
|
getCatalogVersion = liftTx $ runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler
|
||||||
|
@ -12,7 +12,7 @@ import Hasura.Prelude
|
|||||||
import qualified Data.Text as T
|
import qualified Data.Text as T
|
||||||
|
|
||||||
latestCatalogVersion :: Integer
|
latestCatalogVersion :: Integer
|
||||||
latestCatalogVersion = 30
|
latestCatalogVersion = 31
|
||||||
|
|
||||||
latestCatalogVersionString :: T.Text
|
latestCatalogVersionString :: T.Text
|
||||||
latestCatalogVersionString = T.pack $ show latestCatalogVersion
|
latestCatalogVersionString = T.pack $ show latestCatalogVersion
|
||||||
|
@ -150,23 +150,21 @@ $(deriveJSON
|
|||||||
''RQLQueryV2
|
''RQLQueryV2
|
||||||
)
|
)
|
||||||
|
|
||||||
fetchLastUpdate :: Q.TxE QErr (Maybe (InstanceId, UTCTime))
|
fetchLastUpdate :: Q.TxE QErr (Maybe (InstanceId, UTCTime, CacheInvalidations))
|
||||||
fetchLastUpdate = do
|
fetchLastUpdate = over (_Just._3) Q.getAltJ <$> Q.withQE defaultTxErrorHandler [Q.sql|
|
||||||
Q.withQE defaultTxErrorHandler
|
SELECT instance_id::text, occurred_at, invalidations
|
||||||
[Q.sql|
|
FROM hdb_catalog.hdb_schema_update_event
|
||||||
SELECT instance_id::text, occurred_at
|
ORDER BY occurred_at DESC LIMIT 1
|
||||||
FROM hdb_catalog.hdb_schema_update_event
|
|] () True
|
||||||
ORDER BY occurred_at DESC LIMIT 1
|
|
||||||
|] () True
|
|
||||||
|
|
||||||
recordSchemaUpdate :: InstanceId -> Q.TxE QErr ()
|
recordSchemaUpdate :: InstanceId -> CacheInvalidations -> Q.TxE QErr ()
|
||||||
recordSchemaUpdate instanceId =
|
recordSchemaUpdate instanceId invalidations =
|
||||||
liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
|
liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||||
INSERT INTO hdb_catalog.hdb_schema_update_event
|
INSERT INTO hdb_catalog.hdb_schema_update_event
|
||||||
(instance_id, occurred_at) VALUES ($1::uuid, DEFAULT)
|
(instance_id, occurred_at, invalidations) VALUES ($1::uuid, DEFAULT, $2::json)
|
||||||
ON CONFLICT ((occurred_at IS NOT NULL))
|
ON CONFLICT ((occurred_at IS NOT NULL))
|
||||||
DO UPDATE SET instance_id = $1::uuid, occurred_at = DEFAULT
|
DO UPDATE SET instance_id = $1::uuid, occurred_at = DEFAULT, invalidations = $2::json
|
||||||
|] (Identity instanceId) True
|
|] (instanceId, Q.AltJ invalidations) True
|
||||||
|
|
||||||
runQuery
|
runQuery
|
||||||
:: (HasVersion, MonadIO m, MonadError QErr m)
|
:: (HasVersion, MonadIO m, MonadError QErr m)
|
||||||
@ -184,12 +182,12 @@ runQuery pgExecCtx instanceId userInfo sc hMgr sqlGenCtx systemDefined query = d
|
|||||||
either throwError withReload resE
|
either throwError withReload resE
|
||||||
where
|
where
|
||||||
runCtx = RunCtx userInfo hMgr sqlGenCtx
|
runCtx = RunCtx userInfo hMgr sqlGenCtx
|
||||||
withReload r = do
|
withReload (result, updatedCache, invalidations) = do
|
||||||
when (queryModifiesSchemaCache query) $ do
|
when (queryModifiesSchemaCache query) $ do
|
||||||
e <- liftIO $ runExceptT $ runLazyTx pgExecCtx Q.ReadWrite
|
e <- liftIO $ runExceptT $ runLazyTx pgExecCtx Q.ReadWrite $ liftTx $
|
||||||
$ liftTx $ recordSchemaUpdate instanceId
|
recordSchemaUpdate instanceId invalidations
|
||||||
liftEither e
|
liftEither e
|
||||||
return r
|
return (result, updatedCache)
|
||||||
|
|
||||||
-- | A predicate that determines whether the given query might modify/rebuild the schema cache. If
|
-- | A predicate that determines whether the given query might modify/rebuild the schema cache. If
|
||||||
-- so, it needs to acquire the global lock on the schema cache so that other queries do not modify
|
-- so, it needs to acquire the global lock on the schema cache so that other queries do not modify
|
||||||
|
@ -58,9 +58,10 @@ instance ToEngineLog SchemaSyncThreadLog Hasura where
|
|||||||
|
|
||||||
data EventPayload
|
data EventPayload
|
||||||
= EventPayload
|
= EventPayload
|
||||||
{ _epInstanceId :: !InstanceId
|
{ _epInstanceId :: !InstanceId
|
||||||
, _epOccurredAt :: !UTC.UTCTime
|
, _epOccurredAt :: !UTC.UTCTime
|
||||||
} deriving (Show, Eq)
|
, _epInvalidations :: !CacheInvalidations
|
||||||
|
}
|
||||||
$(deriveJSON (aesonDrop 3 snakeCase) ''EventPayload)
|
$(deriveJSON (aesonDrop 3 snakeCase) ''EventPayload)
|
||||||
|
|
||||||
data ThreadError
|
data ThreadError
|
||||||
@ -136,9 +137,9 @@ listener sqlGenCtx pool logger httpMgr updateEventRef
|
|||||||
Just time -> (dbInstId /= instanceId) && accrdAt > time
|
Just time -> (dbInstId /= instanceId) && accrdAt > time
|
||||||
|
|
||||||
refreshCache Nothing = return ()
|
refreshCache Nothing = return ()
|
||||||
refreshCache (Just (dbInstId, accrdAt)) =
|
refreshCache (Just (dbInstId, accrdAt, invalidations)) =
|
||||||
when (shouldRefresh dbInstId accrdAt) $
|
when (shouldRefresh dbInstId accrdAt) $
|
||||||
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef
|
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef invalidations
|
||||||
threadType "schema cache reloaded after postgres listen init"
|
threadType "schema cache reloaded after postgres listen init"
|
||||||
|
|
||||||
notifyHandler = \case
|
notifyHandler = \case
|
||||||
@ -179,7 +180,7 @@ processor sqlGenCtx pool logger httpMgr updateEventRef
|
|||||||
event <- STM.atomically getLatestEvent
|
event <- STM.atomically getLatestEvent
|
||||||
logInfo logger threadType $ object ["processed_event" .= event]
|
logInfo logger threadType $ object ["processed_event" .= event]
|
||||||
when (shouldReload event) $
|
when (shouldReload event) $
|
||||||
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef
|
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef (_epInvalidations event)
|
||||||
threadType "schema cache reloaded"
|
threadType "schema cache reloaded"
|
||||||
where
|
where
|
||||||
-- checks if there is an event
|
-- checks if there is an event
|
||||||
@ -202,15 +203,17 @@ refreshSchemaCache
|
|||||||
-> Logger Hasura
|
-> Logger Hasura
|
||||||
-> HTTP.Manager
|
-> HTTP.Manager
|
||||||
-> SchemaCacheRef
|
-> SchemaCacheRef
|
||||||
|
-> CacheInvalidations
|
||||||
-> ThreadType
|
-> ThreadType
|
||||||
-> T.Text -> IO ()
|
-> T.Text -> IO ()
|
||||||
refreshSchemaCache sqlGenCtx pool logger httpManager cacheRef threadType msg = do
|
refreshSchemaCache sqlGenCtx pool logger httpManager cacheRef invalidations threadType msg = do
|
||||||
-- Reload schema cache from catalog
|
-- Reload schema cache from catalog
|
||||||
resE <- liftIO $ runExceptT $ withSCUpdate cacheRef logger do
|
resE <- liftIO $ runExceptT $ withSCUpdate cacheRef logger do
|
||||||
rebuildableCache <- fst <$> liftIO (readIORef $ _scrCache cacheRef)
|
rebuildableCache <- fst <$> liftIO (readIORef $ _scrCache cacheRef)
|
||||||
buildSchemaCacheWithOptions CatalogSync
|
((), cache, _) <- buildSchemaCacheWithOptions CatalogSync invalidations
|
||||||
& runCacheRWT rebuildableCache
|
& runCacheRWT rebuildableCache
|
||||||
& peelRun runCtx pgCtx PG.ReadWrite
|
& peelRun runCtx pgCtx PG.ReadWrite
|
||||||
|
pure ((), cache)
|
||||||
case resE of
|
case resE of
|
||||||
Left e -> logError logger threadType $ TEQueryError e
|
Left e -> logError logger threadType $ TEQueryError e
|
||||||
Right () -> logInfo logger threadType $ object ["message" .= msg]
|
Right () -> logInfo logger threadType $ object ["message" .= msg]
|
||||||
|
@ -411,7 +411,8 @@ CREATE TABLE hdb_catalog.remote_schemas (
|
|||||||
|
|
||||||
CREATE TABLE hdb_catalog.hdb_schema_update_event (
|
CREATE TABLE hdb_catalog.hdb_schema_update_event (
|
||||||
instance_id uuid NOT NULL,
|
instance_id uuid NOT NULL,
|
||||||
occurred_at timestamptz NOT NULL DEFAULT NOW()
|
occurred_at timestamptz NOT NULL DEFAULT NOW(),
|
||||||
|
invalidations json NOT NULL
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE UNIQUE INDEX hdb_schema_update_event_one_row
|
CREATE UNIQUE INDEX hdb_schema_update_event_one_row
|
||||||
@ -422,13 +423,16 @@ $function$
|
|||||||
DECLARE
|
DECLARE
|
||||||
instance_id uuid;
|
instance_id uuid;
|
||||||
occurred_at timestamptz;
|
occurred_at timestamptz;
|
||||||
|
invalidations json;
|
||||||
curr_rec record;
|
curr_rec record;
|
||||||
BEGIN
|
BEGIN
|
||||||
instance_id = NEW.instance_id;
|
instance_id = NEW.instance_id;
|
||||||
occurred_at = NEW.occurred_at;
|
occurred_at = NEW.occurred_at;
|
||||||
|
invalidations = NEW.invalidations;
|
||||||
PERFORM pg_notify('hasura_schema_update', json_build_object(
|
PERFORM pg_notify('hasura_schema_update', json_build_object(
|
||||||
'instance_id', instance_id,
|
'instance_id', instance_id,
|
||||||
'occurred_at', occurred_at
|
'occurred_at', occurred_at,
|
||||||
|
'invalidations', invalidations
|
||||||
)::text);
|
)::text);
|
||||||
RETURN curr_rec;
|
RETURN curr_rec;
|
||||||
END;
|
END;
|
||||||
@ -657,9 +661,9 @@ CREATE VIEW hdb_catalog.hdb_computed_field_function AS
|
|||||||
FROM hdb_catalog.hdb_computed_field
|
FROM hdb_catalog.hdb_computed_field
|
||||||
);
|
);
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION hdb_catalog.check_violation(msg text) RETURNS bool AS
|
CREATE OR REPLACE FUNCTION hdb_catalog.check_violation(msg text) RETURNS bool AS
|
||||||
$$
|
$$
|
||||||
BEGIN
|
BEGIN
|
||||||
RAISE check_violation USING message=msg;
|
RAISE check_violation USING message=msg;
|
||||||
END;
|
END;
|
||||||
$$ LANGUAGE plpgsql;
|
$$ LANGUAGE plpgsql;
|
||||||
|
22
server/src-rsr/migrations/30_to_31.sql
Normal file
22
server/src-rsr/migrations/30_to_31.sql
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
TRUNCATE hdb_catalog.hdb_schema_update_event;
|
||||||
|
ALTER TABLE hdb_catalog.hdb_schema_update_event ADD COLUMN invalidations json NOT NULL;
|
||||||
|
CREATE OR REPLACE FUNCTION hdb_catalog.hdb_schema_update_event_notifier() RETURNS trigger AS
|
||||||
|
$function$
|
||||||
|
DECLARE
|
||||||
|
instance_id uuid;
|
||||||
|
occurred_at timestamptz;
|
||||||
|
invalidations json;
|
||||||
|
curr_rec record;
|
||||||
|
BEGIN
|
||||||
|
instance_id = NEW.instance_id;
|
||||||
|
occurred_at = NEW.occurred_at;
|
||||||
|
invalidations = NEW.invalidations;
|
||||||
|
PERFORM pg_notify('hasura_schema_update', json_build_object(
|
||||||
|
'instance_id', instance_id,
|
||||||
|
'occurred_at', occurred_at,
|
||||||
|
'invalidations', invalidations
|
||||||
|
)::text);
|
||||||
|
RETURN curr_rec;
|
||||||
|
END;
|
||||||
|
$function$
|
||||||
|
LANGUAGE plpgsql;
|
19
server/src-rsr/migrations/31_to_30.sql
Normal file
19
server/src-rsr/migrations/31_to_30.sql
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
TRUNCATE hdb_catalog.hdb_schema_update_event;
|
||||||
|
CREATE OR REPLACE FUNCTION hdb_catalog.hdb_schema_update_event_notifier() RETURNS trigger AS
|
||||||
|
$function$
|
||||||
|
DECLARE
|
||||||
|
instance_id uuid;
|
||||||
|
occurred_at timestamptz;
|
||||||
|
curr_rec record;
|
||||||
|
BEGIN
|
||||||
|
instance_id = NEW.instance_id;
|
||||||
|
occurred_at = NEW.occurred_at;
|
||||||
|
PERFORM pg_notify('hasura_schema_update', json_build_object(
|
||||||
|
'instance_id', instance_id,
|
||||||
|
'occurred_at', occurred_at
|
||||||
|
)::text);
|
||||||
|
RETURN curr_rec;
|
||||||
|
END;
|
||||||
|
$function$
|
||||||
|
LANGUAGE plpgsql;
|
||||||
|
ALTER TABLE hdb_catalog.hdb_schema_update_event DROP COLUMN invalidations;
|
@ -36,11 +36,10 @@ instance (MonadBase IO m) => TableCoreInfoRM (CacheRefT m)
|
|||||||
instance (MonadBase IO m) => CacheRM (CacheRefT m) where
|
instance (MonadBase IO m) => CacheRM (CacheRefT m) where
|
||||||
askSchemaCache = CacheRefT (fmap lastBuiltSchemaCache . readMVar)
|
askSchemaCache = CacheRefT (fmap lastBuiltSchemaCache . readMVar)
|
||||||
|
|
||||||
instance (MonadIO m, MonadBaseControl IO m, MonadTx m, MonadUnique m) => CacheRWM (CacheRefT m) where
|
instance (MonadIO m, MonadBaseControl IO m, MonadTx m) => CacheRWM (CacheRefT m) where
|
||||||
buildSchemaCacheWithOptions options = CacheRefT $ flip modifyMVar \schemaCache ->
|
buildSchemaCacheWithOptions reason invalidations = CacheRefT $ flip modifyMVar \schemaCache -> do
|
||||||
swap <$> runCacheRWT schemaCache (buildSchemaCacheWithOptions options)
|
((), cache, _) <- runCacheRWT schemaCache (buildSchemaCacheWithOptions reason invalidations)
|
||||||
invalidateCachedRemoteSchema name = CacheRefT $ flip modifyMVar \schemaCache ->
|
pure (cache, ())
|
||||||
swap <$> runCacheRWT schemaCache (invalidateCachedRemoteSchema name)
|
|
||||||
|
|
||||||
instance Example (CacheRefT m ()) where
|
instance Example (CacheRefT m ()) where
|
||||||
type Arg (CacheRefT m ()) = CacheRefT m :~> IO
|
type Arg (CacheRefT m ()) = CacheRefT m :~> IO
|
||||||
|
@ -0,0 +1,55 @@
|
|||||||
|
# This is a regression test for #3791.
|
||||||
|
- description: Setup enum table, create relationship, and insert invalid enum value
|
||||||
|
url: /v1/query
|
||||||
|
status: 200
|
||||||
|
query:
|
||||||
|
type: bulk
|
||||||
|
args:
|
||||||
|
- type: set_table_is_enum
|
||||||
|
args:
|
||||||
|
table: weekdays
|
||||||
|
is_enum: true
|
||||||
|
- type: create_object_relationship
|
||||||
|
args:
|
||||||
|
table: employees
|
||||||
|
name: favorite_color_object
|
||||||
|
using:
|
||||||
|
foreign_key_constraint_on: favorite_color
|
||||||
|
- type: run_sql
|
||||||
|
args:
|
||||||
|
sql: INSERT INTO colors (value, comment) VALUES ('illegal+graphql+identifier', '')
|
||||||
|
- type: reload_metadata
|
||||||
|
args: {}
|
||||||
|
|
||||||
|
- description: Query inconsistent objects
|
||||||
|
url: /v1/query
|
||||||
|
status: 200
|
||||||
|
response:
|
||||||
|
is_consistent: false
|
||||||
|
inconsistent_objects:
|
||||||
|
- definition:
|
||||||
|
schema: public
|
||||||
|
name: colors
|
||||||
|
reason: the table "colors" cannot be used as an enum because the value
|
||||||
|
"illegal+graphql+identifier" is not a valid GraphQL enum value name
|
||||||
|
type: table
|
||||||
|
- definition:
|
||||||
|
using:
|
||||||
|
foreign_key_constraint_on: favorite_color
|
||||||
|
name: favorite_color_object
|
||||||
|
comment:
|
||||||
|
table:
|
||||||
|
schema: public
|
||||||
|
name: employees
|
||||||
|
reason: table "colors" is not tracked
|
||||||
|
type: object_relation
|
||||||
|
query:
|
||||||
|
type: get_inconsistent_metadata
|
||||||
|
args: {}
|
||||||
|
|
||||||
|
- description: Drop inconsistent objects
|
||||||
|
url: /v1/query
|
||||||
|
status: 200
|
||||||
|
query:
|
||||||
|
type: drop_inconsistent_metadata
|
||||||
|
args: {}
|
@ -679,6 +679,9 @@ class TestSetTableIsEnum(DefaultTestQueries):
|
|||||||
def test_add_test_schema_enum_table(self, hge_ctx):
|
def test_add_test_schema_enum_table(self, hge_ctx):
|
||||||
check_query_f(hge_ctx, self.dir() + '/add_test_schema_enum_table.yaml')
|
check_query_f(hge_ctx, self.dir() + '/add_test_schema_enum_table.yaml')
|
||||||
|
|
||||||
|
def test_relationship_with_inconsistent_enum_table(self, hge_ctx):
|
||||||
|
check_query_f(hge_ctx, self.dir() + '/relationship_with_inconsistent_enum_table.yaml')
|
||||||
|
|
||||||
class TestSetTableCustomFields(DefaultTestQueries):
|
class TestSetTableCustomFields(DefaultTestQueries):
|
||||||
@classmethod
|
@classmethod
|
||||||
def dir(cls):
|
def dir(cls):
|
||||||
@ -735,5 +738,3 @@ class TestBulkQuery(DefaultTestQueries):
|
|||||||
|
|
||||||
def test_run_bulk_with_select_and_reads(self, hge_ctx):
|
def test_run_bulk_with_select_and_reads(self, hge_ctx):
|
||||||
check_query_f(hge_ctx, self.dir() + '/select_with_reads.yaml')
|
check_query_f(hge_ctx, self.dir() + '/select_with_reads.yaml')
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user