2020-01-29 23:15:53 +03:00
|
|
|
|
{-# LANGUAGE Arrows #-}
|
|
|
|
|
{-# LANGUAGE OverloadedLabels #-}
|
2019-11-20 21:21:30 +03:00
|
|
|
|
|
2019-08-14 02:34:37 +03:00
|
|
|
|
{-| Top-level functions concerned specifically with operations on the schema cache, such as
|
|
|
|
|
rebuilding it from the catalog and incorporating schema changes. See the module documentation for
|
|
|
|
|
"Hasura.RQL.DDL.Schema" for more details.
|
|
|
|
|
|
|
|
|
|
__Note__: this module is __mutually recursive__ with other @Hasura.RQL.DDL.Schema.*@ modules, which
|
|
|
|
|
both define pieces of the implementation of building the schema cache and define handlers that
|
|
|
|
|
trigger schema cache rebuilds. -}
|
|
|
|
|
module Hasura.RQL.DDL.Schema.Cache
|
2019-11-20 21:21:30 +03:00
|
|
|
|
( RebuildableSchemaCache
|
|
|
|
|
, lastBuiltSchemaCache
|
|
|
|
|
, buildRebuildableSchemaCache
|
|
|
|
|
, CacheRWT
|
|
|
|
|
, runCacheRWT
|
2019-08-14 02:34:37 +03:00
|
|
|
|
|
|
|
|
|
, withMetadataCheck
|
|
|
|
|
) where
|
|
|
|
|
|
|
|
|
|
import Hasura.Prelude
|
|
|
|
|
|
2019-12-09 01:17:39 +03:00
|
|
|
|
import qualified Data.HashMap.Strict.Extended as M
|
|
|
|
|
import qualified Data.HashSet as HS
|
|
|
|
|
import qualified Data.Text as T
|
2020-07-14 22:00:58 +03:00
|
|
|
|
import qualified Data.Environment as Env
|
2019-12-09 01:17:39 +03:00
|
|
|
|
import qualified Database.PG.Query as Q
|
2019-08-14 02:34:37 +03:00
|
|
|
|
|
2019-11-27 01:49:42 +03:00
|
|
|
|
import Control.Arrow.Extended
|
2019-12-09 01:17:39 +03:00
|
|
|
|
import Control.Lens hiding ((.=))
|
2019-11-27 01:49:42 +03:00
|
|
|
|
import Control.Monad.Unique
|
2019-08-14 02:34:37 +03:00
|
|
|
|
import Data.Aeson
|
2019-12-09 01:17:39 +03:00
|
|
|
|
import Data.List (nub)
|
2019-08-14 02:34:37 +03:00
|
|
|
|
|
2019-12-09 01:17:39 +03:00
|
|
|
|
import qualified Hasura.GraphQL.Context as GC
|
2020-06-08 15:13:01 +03:00
|
|
|
|
import qualified Hasura.GraphQL.RelaySchema as Relay
|
2019-12-09 01:17:39 +03:00
|
|
|
|
import qualified Hasura.GraphQL.Schema as GS
|
2020-03-26 14:52:20 +03:00
|
|
|
|
import qualified Hasura.GraphQL.Validate.Types as VT
|
2019-12-09 01:17:39 +03:00
|
|
|
|
import qualified Hasura.Incremental as Inc
|
2020-03-26 14:52:20 +03:00
|
|
|
|
import qualified Language.GraphQL.Draft.Syntax as G
|
2019-08-14 02:34:37 +03:00
|
|
|
|
|
|
|
|
|
import Hasura.Db
|
|
|
|
|
import Hasura.GraphQL.RemoteServer
|
2020-02-13 20:38:23 +03:00
|
|
|
|
import Hasura.GraphQL.Schema.CustomTypes
|
2020-04-24 12:10:53 +03:00
|
|
|
|
import Hasura.GraphQL.Schema.Merge
|
2020-03-26 14:52:20 +03:00
|
|
|
|
import Hasura.GraphQL.Utils (showNames)
|
2020-02-13 20:38:23 +03:00
|
|
|
|
import Hasura.RQL.DDL.Action
|
2019-10-18 11:29:47 +03:00
|
|
|
|
import Hasura.RQL.DDL.ComputedField
|
2020-02-13 20:38:23 +03:00
|
|
|
|
import Hasura.RQL.DDL.CustomTypes
|
2019-08-14 02:34:37 +03:00
|
|
|
|
import Hasura.RQL.DDL.Deps
|
|
|
|
|
import Hasura.RQL.DDL.EventTrigger
|
|
|
|
|
import Hasura.RQL.DDL.RemoteSchema
|
2020-06-08 15:13:01 +03:00
|
|
|
|
import Hasura.RQL.DDL.ScheduledTrigger
|
2019-12-09 01:17:39 +03:00
|
|
|
|
import Hasura.RQL.DDL.Schema.Cache.Common
|
|
|
|
|
import Hasura.RQL.DDL.Schema.Cache.Dependencies
|
|
|
|
|
import Hasura.RQL.DDL.Schema.Cache.Fields
|
|
|
|
|
import Hasura.RQL.DDL.Schema.Cache.Permission
|
2019-08-14 02:34:37 +03:00
|
|
|
|
import Hasura.RQL.DDL.Schema.Catalog
|
|
|
|
|
import Hasura.RQL.DDL.Schema.Diff
|
|
|
|
|
import Hasura.RQL.DDL.Schema.Function
|
|
|
|
|
import Hasura.RQL.DDL.Schema.Table
|
2020-03-26 23:42:33 +03:00
|
|
|
|
import Hasura.RQL.DDL.Utils (clearHdbViews)
|
2019-08-14 02:34:37 +03:00
|
|
|
|
import Hasura.RQL.Types
|
|
|
|
|
import Hasura.RQL.Types.Catalog
|
2020-01-23 00:55:55 +03:00
|
|
|
|
import Hasura.Server.Version (HasVersion)
|
2020-04-24 12:10:53 +03:00
|
|
|
|
import Hasura.Session
|
2019-08-14 02:34:37 +03:00
|
|
|
|
import Hasura.SQL.Types
|
|
|
|
|
|
2020-02-13 20:38:23 +03:00
|
|
|
|
mergeCustomTypes
|
|
|
|
|
:: MonadError QErr f
|
2020-04-24 12:10:53 +03:00
|
|
|
|
=> GS.GCtxMap -> GS.GCtx -> (NonObjectTypeMap, AnnotatedObjects)
|
2020-02-13 20:38:23 +03:00
|
|
|
|
-> f (GS.GCtxMap, GS.GCtx)
|
|
|
|
|
mergeCustomTypes gCtxMap remoteSchemaCtx customTypesState = do
|
2020-04-24 12:10:53 +03:00
|
|
|
|
let adminCustomTypes = uncurry buildCustomTypesSchema customTypesState adminRoleName
|
2020-02-13 20:38:23 +03:00
|
|
|
|
let commonTypes = M.intersectionWith (,) existingTypes adminCustomTypes
|
|
|
|
|
conflictingCustomTypes =
|
|
|
|
|
map (G.unNamedType . fst) $ M.toList $
|
|
|
|
|
flip M.filter commonTypes $ \case
|
|
|
|
|
-- only scalars can be common
|
|
|
|
|
(VT.TIScalar _, VT.TIScalar _) -> False
|
|
|
|
|
(_, _) -> True
|
|
|
|
|
unless (null conflictingCustomTypes) $
|
|
|
|
|
throw400 InvalidCustomTypes $
|
2020-06-03 19:01:04 +03:00
|
|
|
|
"following custom types conflict with the " <>
|
2020-02-13 20:38:23 +03:00
|
|
|
|
"autogenerated hasura types or from remote schemas: "
|
|
|
|
|
<> showNames conflictingCustomTypes
|
|
|
|
|
|
2020-04-24 12:10:53 +03:00
|
|
|
|
let gCtxMapWithCustomTypes = flip M.mapWithKey gCtxMap $ \roleName schemaCtx ->
|
|
|
|
|
flip fmap schemaCtx $ \gCtx ->
|
|
|
|
|
let customTypes = uncurry buildCustomTypesSchema customTypesState roleName
|
|
|
|
|
in addCustomTypes gCtx customTypes
|
2020-02-13 20:38:23 +03:00
|
|
|
|
|
|
|
|
|
-- populate the gctx of each role with the custom types
|
|
|
|
|
return ( gCtxMapWithCustomTypes
|
|
|
|
|
, addCustomTypes remoteSchemaCtx adminCustomTypes
|
|
|
|
|
)
|
|
|
|
|
where
|
|
|
|
|
addCustomTypes gCtx customTypes =
|
|
|
|
|
gCtx { GS._gTypes = GS._gTypes gCtx <> customTypes}
|
|
|
|
|
existingTypes =
|
2020-04-24 12:10:53 +03:00
|
|
|
|
case M.lookup adminRoleName gCtxMap of
|
|
|
|
|
Just schemaCtx -> GS._gTypes $ GC._rctxDefault schemaCtx
|
|
|
|
|
Nothing -> GS._gTypes remoteSchemaCtx
|
2020-02-13 20:38:23 +03:00
|
|
|
|
|
2019-11-20 21:21:30 +03:00
|
|
|
|
buildRebuildableSchemaCache
|
2020-01-23 00:55:55 +03:00
|
|
|
|
:: (HasVersion, MonadIO m, MonadUnique m, MonadTx m, HasHttpManager m, HasSQLGenCtx m)
|
2020-07-14 22:00:58 +03:00
|
|
|
|
=> Env.Environment
|
|
|
|
|
-> m (RebuildableSchemaCache m)
|
|
|
|
|
buildRebuildableSchemaCache env = do
|
2019-11-20 21:21:30 +03:00
|
|
|
|
catalogMetadata <- liftTx fetchCatalogData
|
2020-01-29 23:15:53 +03:00
|
|
|
|
result <- flip runReaderT CatalogSync $
|
2020-07-14 22:00:58 +03:00
|
|
|
|
Inc.build (buildSchemaCacheRule env) (catalogMetadata, initialInvalidationKeys)
|
2020-01-29 23:15:53 +03:00
|
|
|
|
pure $ RebuildableSchemaCache (Inc.result result) initialInvalidationKeys (Inc.rebuildRule result)
|
2019-11-20 21:21:30 +03:00
|
|
|
|
|
|
|
|
|
newtype CacheRWT m a
|
2020-01-30 02:03:49 +03:00
|
|
|
|
-- The CacheInvalidations component of the state could actually be collected using WriterT, but
|
|
|
|
|
-- WriterT implementations prior to transformers-0.5.6.0 (which added
|
|
|
|
|
-- Control.Monad.Trans.Writer.CPS) are leaky, and we don’t have that yet.
|
|
|
|
|
= CacheRWT (StateT (RebuildableSchemaCache m, CacheInvalidations) m a)
|
2019-11-20 21:21:30 +03:00
|
|
|
|
deriving
|
2020-01-30 02:03:49 +03:00
|
|
|
|
( Functor, Applicative, Monad, MonadIO, MonadReader r, MonadError e, MonadTx
|
2019-11-20 21:21:30 +03:00
|
|
|
|
, UserInfoM, HasHttpManager, HasSQLGenCtx, HasSystemDefined )
|
|
|
|
|
|
2020-01-30 02:03:49 +03:00
|
|
|
|
runCacheRWT
|
|
|
|
|
:: Functor m
|
|
|
|
|
=> RebuildableSchemaCache m -> CacheRWT m a -> m (a, RebuildableSchemaCache m, CacheInvalidations)
|
|
|
|
|
runCacheRWT cache (CacheRWT m) =
|
|
|
|
|
runStateT m (cache, mempty) <&> \(v, (newCache, invalidations)) -> (v, newCache, invalidations)
|
2019-11-20 21:21:30 +03:00
|
|
|
|
|
|
|
|
|
instance MonadTrans CacheRWT where
|
|
|
|
|
lift = CacheRWT . lift
|
|
|
|
|
|
|
|
|
|
instance (Monad m) => TableCoreInfoRM (CacheRWT m)
|
|
|
|
|
instance (Monad m) => CacheRM (CacheRWT m) where
|
2020-01-30 02:03:49 +03:00
|
|
|
|
askSchemaCache = CacheRWT $ gets (lastBuiltSchemaCache . fst)
|
2019-11-20 21:21:30 +03:00
|
|
|
|
|
2020-01-29 23:15:53 +03:00
|
|
|
|
instance (MonadIO m, MonadTx m) => CacheRWM (CacheRWT m) where
|
2020-01-30 02:03:49 +03:00
|
|
|
|
buildSchemaCacheWithOptions buildReason invalidations = CacheRWT do
|
|
|
|
|
(RebuildableSchemaCache _ invalidationKeys rule, oldInvalidations) <- get
|
|
|
|
|
let newInvalidationKeys = invalidateKeys invalidations invalidationKeys
|
2019-11-20 21:21:30 +03:00
|
|
|
|
catalogMetadata <- liftTx fetchCatalogData
|
2020-01-30 02:03:49 +03:00
|
|
|
|
result <- lift $ flip runReaderT buildReason $
|
|
|
|
|
Inc.build rule (catalogMetadata, newInvalidationKeys)
|
2019-12-13 00:46:33 +03:00
|
|
|
|
let schemaCache = Inc.result result
|
2020-01-30 02:03:49 +03:00
|
|
|
|
prunedInvalidationKeys = pruneInvalidationKeys schemaCache newInvalidationKeys
|
|
|
|
|
!newCache = RebuildableSchemaCache schemaCache prunedInvalidationKeys (Inc.rebuildRule result)
|
|
|
|
|
!newInvalidations = oldInvalidations <> invalidations
|
|
|
|
|
put (newCache, newInvalidations)
|
2019-11-20 21:21:30 +03:00
|
|
|
|
where
|
2020-01-29 23:15:53 +03:00
|
|
|
|
-- Prunes invalidation keys that no longer exist in the schema to avoid leaking memory by
|
|
|
|
|
-- hanging onto unnecessary keys.
|
|
|
|
|
pruneInvalidationKeys schemaCache = over ikRemoteSchemas $ M.filterWithKey \name _ ->
|
2020-03-26 14:52:20 +03:00
|
|
|
|
-- see Note [Keep invalidation keys for inconsistent objects]
|
|
|
|
|
name `elem` getAllRemoteSchemas schemaCache
|
2019-11-20 21:21:30 +03:00
|
|
|
|
|
|
|
|
|
buildSchemaCacheRule
|
|
|
|
|
-- Note: by supplying BuildReason via MonadReader, it does not participate in caching, which is
|
|
|
|
|
-- what we want!
|
2020-01-23 00:55:55 +03:00
|
|
|
|
:: ( HasVersion, ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
2019-11-27 01:49:42 +03:00
|
|
|
|
, MonadIO m, MonadTx m, MonadReader BuildReason m, HasHttpManager m, HasSQLGenCtx m )
|
2020-07-14 22:00:58 +03:00
|
|
|
|
=> Env.Environment
|
|
|
|
|
-> (CatalogMetadata, InvalidationKeys) `arr` SchemaCache
|
|
|
|
|
buildSchemaCacheRule env = proc (catalogMetadata, invalidationKeys) -> do
|
2020-01-29 23:15:53 +03:00
|
|
|
|
invalidationKeysDep <- Inc.newDependency -< invalidationKeys
|
|
|
|
|
|
|
|
|
|
-- Step 1: Process metadata and collect dependency information.
|
|
|
|
|
(outputs, collectedInfo) <-
|
|
|
|
|
runWriterA buildAndCollectInfo -< (catalogMetadata, invalidationKeysDep)
|
2019-11-20 21:21:30 +03:00
|
|
|
|
let (inconsistentObjects, unresolvedDependencies) = partitionCollectedInfo collectedInfo
|
2020-01-29 23:15:53 +03:00
|
|
|
|
|
|
|
|
|
-- Step 2: Resolve dependency information and drop dangling dependents.
|
|
|
|
|
(resolvedOutputs, dependencyInconsistentObjects, resolvedDependencies) <-
|
2019-11-20 21:21:30 +03:00
|
|
|
|
resolveDependencies -< (outputs, unresolvedDependencies)
|
2020-01-29 23:15:53 +03:00
|
|
|
|
|
|
|
|
|
-- Step 3: Build the GraphQL schema.
|
|
|
|
|
((remoteSchemaMap, gqlSchema, remoteGQLSchema), gqlSchemaInconsistentObjects)
|
2020-02-13 20:38:23 +03:00
|
|
|
|
<- runWriterA buildGQLSchema -< ( _boTables resolvedOutputs
|
|
|
|
|
, _boFunctions resolvedOutputs
|
|
|
|
|
, _boRemoteSchemas resolvedOutputs
|
|
|
|
|
, _boCustomTypes resolvedOutputs
|
|
|
|
|
, _boActions resolvedOutputs
|
2020-05-27 18:02:58 +03:00
|
|
|
|
, _boRemoteRelationshipTypes resolvedOutputs
|
2020-02-13 20:38:23 +03:00
|
|
|
|
)
|
2020-01-29 23:15:53 +03:00
|
|
|
|
|
2020-06-08 15:13:01 +03:00
|
|
|
|
-- Step 4: Build the relay GraphQL schema
|
|
|
|
|
relayGQLSchema <- bindA -< Relay.mkRelayGCtxMap (_boTables resolvedOutputs) (_boFunctions resolvedOutputs)
|
|
|
|
|
|
2019-11-20 21:21:30 +03:00
|
|
|
|
returnA -< SchemaCache
|
|
|
|
|
{ scTables = _boTables resolvedOutputs
|
2020-02-13 20:38:23 +03:00
|
|
|
|
, scActions = _boActions resolvedOutputs
|
2019-11-20 21:21:30 +03:00
|
|
|
|
, scFunctions = _boFunctions resolvedOutputs
|
2020-01-29 23:15:53 +03:00
|
|
|
|
, scRemoteSchemas = remoteSchemaMap
|
2019-11-20 21:21:30 +03:00
|
|
|
|
, scAllowlist = _boAllowlist resolvedOutputs
|
2020-02-13 20:38:23 +03:00
|
|
|
|
, scCustomTypes = _boCustomTypes resolvedOutputs
|
2020-01-29 23:15:53 +03:00
|
|
|
|
, scGCtxMap = gqlSchema
|
|
|
|
|
, scDefaultRemoteGCtx = remoteGQLSchema
|
2020-06-08 15:13:01 +03:00
|
|
|
|
, scRelayGCtxMap = relayGQLSchema
|
2019-11-20 21:21:30 +03:00
|
|
|
|
, scDepMap = resolvedDependencies
|
2020-01-29 23:15:53 +03:00
|
|
|
|
, scInconsistentObjs =
|
|
|
|
|
inconsistentObjects <> dependencyInconsistentObjects <> toList gqlSchemaInconsistentObjects
|
2020-05-13 15:33:16 +03:00
|
|
|
|
, scCronTriggers = _boCronTriggers resolvedOutputs
|
2019-11-20 21:21:30 +03:00
|
|
|
|
}
|
2019-08-14 02:34:37 +03:00
|
|
|
|
where
|
2019-11-20 21:21:30 +03:00
|
|
|
|
buildAndCollectInfo
|
2020-01-29 23:15:53 +03:00
|
|
|
|
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
2019-11-27 01:49:42 +03:00
|
|
|
|
, ArrowWriter (Seq CollectedInfo) arr, MonadIO m, MonadTx m, MonadReader BuildReason m
|
|
|
|
|
, HasHttpManager m, HasSQLGenCtx m )
|
2020-01-29 23:15:53 +03:00
|
|
|
|
=> (CatalogMetadata, Inc.Dependency InvalidationKeys) `arr` BuildOutputs
|
|
|
|
|
buildAndCollectInfo = proc (catalogMetadata, invalidationKeys) -> do
|
2019-11-20 21:21:30 +03:00
|
|
|
|
let CatalogMetadata tables relationships permissions
|
2019-12-09 07:18:53 +03:00
|
|
|
|
eventTriggers remoteSchemas functions allowlistDefs
|
2020-05-27 18:02:58 +03:00
|
|
|
|
|
|
|
|
|
computedFields catalogCustomTypes actions remoteRelationships
|
|
|
|
|
cronTriggers = catalogMetadata
|
2019-11-20 21:21:30 +03:00
|
|
|
|
|
|
|
|
|
-- tables
|
2020-01-30 02:03:49 +03:00
|
|
|
|
tableRawInfos <- buildTableCache -< (tables, Inc.selectD #_ikMetadata invalidationKeys)
|
2019-11-20 21:21:30 +03:00
|
|
|
|
|
2020-05-27 18:02:58 +03:00
|
|
|
|
-- remote schemas
|
|
|
|
|
let remoteSchemaInvalidationKeys = Inc.selectD #_ikRemoteSchemas invalidationKeys
|
|
|
|
|
remoteSchemaMap <- buildRemoteSchemas -< (remoteSchemaInvalidationKeys, remoteSchemas)
|
|
|
|
|
|
2019-11-20 21:21:30 +03:00
|
|
|
|
-- relationships and computed fields
|
|
|
|
|
let relationshipsByTable = M.groupOn _crTable relationships
|
|
|
|
|
computedFieldsByTable = M.groupOn (_afcTable . _cccComputedField) computedFields
|
2020-05-27 18:02:58 +03:00
|
|
|
|
remoteRelationshipsByTable = M.groupOn rtrTable remoteRelationships
|
|
|
|
|
rawTableCoreInfos <- (tableRawInfos >- returnA)
|
2019-11-27 01:49:42 +03:00
|
|
|
|
>-> (\info -> (info, relationshipsByTable) >- alignExtraTableInfo mkRelationshipMetadataObject)
|
|
|
|
|
>-> (\info -> (info, computedFieldsByTable) >- alignExtraTableInfo mkComputedFieldMetadataObject)
|
2020-05-27 18:02:58 +03:00
|
|
|
|
>-> (\info -> (info, remoteRelationshipsByTable) >- alignExtraTableInfo mkRemoteRelationshipMetadataObject)
|
|
|
|
|
>-> (| Inc.keyed (\_ (((tableRawInfo, tableRelationships), tableComputedFields), tableRemoteRelationships) -> do
|
2019-11-27 01:49:42 +03:00
|
|
|
|
let columns = _tciFieldInfoMap tableRawInfo
|
2020-05-27 18:02:58 +03:00
|
|
|
|
(allFields, typeMap) <- addNonColumnFields -<
|
|
|
|
|
(tableRawInfos, columns, M.map fst remoteSchemaMap, tableRelationships, tableComputedFields, tableRemoteRelationships)
|
|
|
|
|
returnA -< (tableRawInfo { _tciFieldInfoMap = allFields }, typeMap)) |)
|
|
|
|
|
|
|
|
|
|
let tableCoreInfos = M.map fst rawTableCoreInfos
|
|
|
|
|
remoteRelationshipTypes = mconcat $ map snd $ M.elems rawTableCoreInfos
|
2019-11-20 21:21:30 +03:00
|
|
|
|
|
|
|
|
|
-- permissions and event triggers
|
2019-12-15 19:07:08 +03:00
|
|
|
|
tableCoreInfosDep <- Inc.newDependency -< tableCoreInfos
|
2019-11-27 01:49:42 +03:00
|
|
|
|
tableCache <- (tableCoreInfos >- returnA)
|
|
|
|
|
>-> (\info -> (info, M.groupOn _cpTable permissions) >- alignExtraTableInfo mkPermissionMetadataObject)
|
|
|
|
|
>-> (\info -> (info, M.groupOn _cetTable eventTriggers) >- alignExtraTableInfo mkEventTriggerMetadataObject)
|
|
|
|
|
>-> (| Inc.keyed (\_ ((tableCoreInfo, tablePermissions), tableEventTriggers) -> do
|
2019-12-15 19:07:08 +03:00
|
|
|
|
let tableName = _tciName tableCoreInfo
|
|
|
|
|
tableFields = _tciFieldInfoMap tableCoreInfo
|
2019-12-13 00:46:33 +03:00
|
|
|
|
permissionInfos <- buildTablePermissions -<
|
2019-12-15 19:07:08 +03:00
|
|
|
|
(tableCoreInfosDep, tableName, tableFields, HS.fromList tablePermissions)
|
2019-12-13 00:46:33 +03:00
|
|
|
|
eventTriggerInfos <- buildTableEventTriggers -< (tableCoreInfo, tableEventTriggers)
|
2019-11-27 01:49:42 +03:00
|
|
|
|
returnA -< TableInfo
|
|
|
|
|
{ _tiCoreInfo = tableCoreInfo
|
|
|
|
|
, _tiRolePermInfoMap = permissionInfos
|
|
|
|
|
, _tiEventTriggerInfoMap = eventTriggerInfos
|
|
|
|
|
}) |)
|
2019-11-20 21:21:30 +03:00
|
|
|
|
|
|
|
|
|
-- sql functions
|
2019-11-27 01:49:42 +03:00
|
|
|
|
functionCache <- (mapFromL _cfFunction functions >- returnA)
|
|
|
|
|
>-> (| Inc.keyed (\_ (CatalogFunction qf systemDefined config funcDefs) -> do
|
|
|
|
|
let definition = toJSON $ TrackFunction qf
|
|
|
|
|
metadataObject = MetadataObject (MOFunction qf) definition
|
|
|
|
|
schemaObject = SOFunction qf
|
|
|
|
|
addFunctionContext e = "in function " <> qf <<> ": " <> e
|
|
|
|
|
(| withRecordInconsistency (
|
|
|
|
|
(| modifyErrA (do
|
|
|
|
|
rawfi <- bindErrorA -< handleMultipleFunctions qf funcDefs
|
2020-01-09 02:19:02 +03:00
|
|
|
|
(fi, dep) <- bindErrorA -< mkFunctionInfo qf systemDefined config rawfi
|
2019-11-27 01:49:42 +03:00
|
|
|
|
recordDependencies -< (metadataObject, schemaObject, [dep])
|
|
|
|
|
returnA -< fi)
|
|
|
|
|
|) addFunctionContext)
|
|
|
|
|
|) metadataObject) |)
|
|
|
|
|
>-> (\infos -> M.catMaybes infos >- returnA)
|
2019-11-20 21:21:30 +03:00
|
|
|
|
|
|
|
|
|
-- allow list
|
|
|
|
|
let allowList = allowlistDefs
|
|
|
|
|
& concatMap _cdQueries
|
|
|
|
|
& map (queryWithoutTypeNames . getGQLQuery . _lqQuery)
|
|
|
|
|
& HS.fromList
|
|
|
|
|
|
2020-02-13 20:38:23 +03:00
|
|
|
|
-- custom types
|
2020-04-15 15:03:13 +03:00
|
|
|
|
let CatalogCustomTypes customTypes pgScalars = catalogCustomTypes
|
|
|
|
|
maybeResolvedCustomTypes <-
|
|
|
|
|
(| withRecordInconsistency
|
|
|
|
|
(bindErrorA -< resolveCustomTypes tableCache customTypes pgScalars)
|
|
|
|
|
|) (MetadataObject MOCustomTypes $ toJSON customTypes)
|
2020-02-13 20:38:23 +03:00
|
|
|
|
|
|
|
|
|
-- actions
|
2020-04-15 15:03:13 +03:00
|
|
|
|
actionCache <- case maybeResolvedCustomTypes of
|
|
|
|
|
Just resolvedCustomTypes -> buildActions -< ((resolvedCustomTypes, pgScalars), actions)
|
|
|
|
|
|
|
|
|
|
-- If the custom types themselves are inconsistent, we can’t really do
|
|
|
|
|
-- anything with actions, so just mark them all inconsistent.
|
|
|
|
|
Nothing -> do
|
|
|
|
|
recordInconsistencies -< ( map mkActionMetadataObject actions
|
|
|
|
|
, "custom types are inconsistent" )
|
|
|
|
|
returnA -< M.empty
|
2020-02-13 20:38:23 +03:00
|
|
|
|
|
2020-05-13 15:33:16 +03:00
|
|
|
|
cronTriggersMap <- buildCronTriggers -< ((),cronTriggers)
|
|
|
|
|
|
2019-11-27 01:49:42 +03:00
|
|
|
|
returnA -< BuildOutputs
|
2019-11-20 21:21:30 +03:00
|
|
|
|
{ _boTables = tableCache
|
2020-02-13 20:38:23 +03:00
|
|
|
|
, _boActions = actionCache
|
2019-11-20 21:21:30 +03:00
|
|
|
|
, _boFunctions = functionCache
|
|
|
|
|
, _boRemoteSchemas = remoteSchemaMap
|
|
|
|
|
, _boAllowlist = allowList
|
2020-04-15 15:03:13 +03:00
|
|
|
|
-- If 'maybeResolvedCustomTypes' is 'Nothing', then custom types are inconsinstent.
|
|
|
|
|
-- In such case, use empty resolved value of custom types.
|
|
|
|
|
, _boCustomTypes = fromMaybe (NonObjectTypeMap mempty, mempty) maybeResolvedCustomTypes
|
2020-05-27 18:02:58 +03:00
|
|
|
|
, _boRemoteRelationshipTypes = remoteRelationshipTypes
|
2020-05-13 15:33:16 +03:00
|
|
|
|
, _boCronTriggers = cronTriggersMap
|
2019-11-20 21:21:30 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mkEventTriggerMetadataObject (CatalogEventTrigger qt trn configuration) =
|
|
|
|
|
let objectId = MOTableObj qt $ MTOTrigger trn
|
|
|
|
|
definition = object ["table" .= qt, "configuration" .= configuration]
|
|
|
|
|
in MetadataObject objectId definition
|
|
|
|
|
|
2020-05-13 15:33:16 +03:00
|
|
|
|
mkCronTriggerMetadataObject catalogCronTrigger =
|
|
|
|
|
let definition = toJSON catalogCronTrigger
|
|
|
|
|
in MetadataObject (MOCronTrigger (_cctName catalogCronTrigger))
|
|
|
|
|
definition
|
|
|
|
|
|
2020-04-15 15:03:13 +03:00
|
|
|
|
mkActionMetadataObject (ActionMetadata name comment defn _) =
|
|
|
|
|
MetadataObject (MOAction name) (toJSON $ CreateAction name defn comment)
|
|
|
|
|
|
2020-01-29 23:15:53 +03:00
|
|
|
|
mkRemoteSchemaMetadataObject remoteSchema =
|
|
|
|
|
MetadataObject (MORemoteSchema (_arsqName remoteSchema)) (toJSON remoteSchema)
|
|
|
|
|
|
2019-11-20 21:21:30 +03:00
|
|
|
|
-- Given a map of table info, “folds in” another map of information, accumulating inconsistent
|
|
|
|
|
-- metadata objects for any entries in the second map that don’t appear in the first map. This
|
|
|
|
|
-- is used to “line up” the metadata for relationships, computed fields, permissions, etc. with
|
|
|
|
|
-- the tracked table info.
|
|
|
|
|
alignExtraTableInfo
|
2019-11-27 01:49:42 +03:00
|
|
|
|
:: forall a b arr
|
2019-12-11 04:46:34 +03:00
|
|
|
|
. (ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr)
|
2019-11-20 21:21:30 +03:00
|
|
|
|
=> (b -> MetadataObject)
|
2019-11-27 01:49:42 +03:00
|
|
|
|
-> ( M.HashMap QualifiedTable a
|
|
|
|
|
, M.HashMap QualifiedTable [b]
|
|
|
|
|
) `arr` M.HashMap QualifiedTable (a, [b])
|
|
|
|
|
alignExtraTableInfo mkMetadataObject = proc (baseInfo, extraInfo) -> do
|
|
|
|
|
combinedInfo <-
|
|
|
|
|
(| Inc.keyed (\tableName infos -> combine -< (tableName, infos))
|
|
|
|
|
|) (align baseInfo extraInfo)
|
|
|
|
|
returnA -< M.catMaybes combinedInfo
|
2019-11-20 21:21:30 +03:00
|
|
|
|
where
|
2019-11-27 01:49:42 +03:00
|
|
|
|
combine :: (QualifiedTable, These a [b]) `arr` Maybe (a, [b])
|
|
|
|
|
combine = proc (tableName, infos) -> case infos of
|
|
|
|
|
This base -> returnA -< Just (base, [])
|
|
|
|
|
These base extras -> returnA -< Just (base, extras)
|
2019-11-20 21:21:30 +03:00
|
|
|
|
That extras -> do
|
|
|
|
|
let errorMessage = "table " <> tableName <<> " does not exist"
|
2019-11-27 01:49:42 +03:00
|
|
|
|
recordInconsistencies -< (map mkMetadataObject extras, errorMessage)
|
|
|
|
|
returnA -< Nothing
|
2019-11-20 21:21:30 +03:00
|
|
|
|
|
|
|
|
|
buildTableEventTriggers
|
2019-12-11 04:46:34 +03:00
|
|
|
|
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
2020-07-14 22:00:58 +03:00
|
|
|
|
, Inc.ArrowCache m arr, MonadTx m, MonadReader BuildReason m, HasSQLGenCtx m )
|
2019-12-13 00:46:33 +03:00
|
|
|
|
=> (TableCoreInfo, [CatalogEventTrigger]) `arr` EventTriggerInfoMap
|
2020-01-31 02:55:09 +03:00
|
|
|
|
buildTableEventTriggers = buildInfoMap _cetName mkEventTriggerMetadataObject buildEventTrigger
|
2019-11-27 01:49:42 +03:00
|
|
|
|
where
|
2019-12-13 00:46:33 +03:00
|
|
|
|
buildEventTrigger = proc (tableInfo, eventTrigger) -> do
|
2019-11-27 01:49:42 +03:00
|
|
|
|
let CatalogEventTrigger qt trn configuration = eventTrigger
|
|
|
|
|
metadataObject = mkEventTriggerMetadataObject eventTrigger
|
|
|
|
|
schemaObjectId = SOTableObj qt $ TOTrigger trn
|
|
|
|
|
addTriggerContext e = "in event trigger " <> trn <<> ": " <> e
|
|
|
|
|
(| withRecordInconsistency (
|
|
|
|
|
(| modifyErrA (do
|
|
|
|
|
etc <- bindErrorA -< decodeValue configuration
|
2020-07-14 22:00:58 +03:00
|
|
|
|
(info, dependencies) <- bindErrorA -< subTableP2Setup env qt etc
|
2019-12-13 00:46:33 +03:00
|
|
|
|
let tableColumns = M.mapMaybe (^? _FIColumn) (_tciFieldInfoMap tableInfo)
|
|
|
|
|
recreateViewIfNeeded -< (qt, tableColumns, trn, etcDefinition etc)
|
2019-11-27 01:49:42 +03:00
|
|
|
|
recordDependencies -< (metadataObject, schemaObjectId, dependencies)
|
|
|
|
|
returnA -< info)
|
|
|
|
|
|) (addTableContext qt . addTriggerContext))
|
|
|
|
|
|) metadataObject
|
2019-11-20 21:21:30 +03:00
|
|
|
|
|
2019-12-13 00:46:33 +03:00
|
|
|
|
recreateViewIfNeeded = Inc.cache $
|
|
|
|
|
arrM \(tableName, tableColumns, triggerName, triggerDefinition) -> do
|
|
|
|
|
buildReason <- ask
|
2020-02-05 15:54:26 +03:00
|
|
|
|
when (buildReason == CatalogUpdate) $ do
|
|
|
|
|
liftTx $ delTriggerQ triggerName -- executes DROP IF EXISTS.. sql
|
2019-12-13 00:46:33 +03:00
|
|
|
|
mkAllTriggersQ triggerName tableName (M.elems tableColumns) triggerDefinition
|
|
|
|
|
|
2020-05-13 15:33:16 +03:00
|
|
|
|
buildCronTriggers
|
|
|
|
|
:: ( ArrowChoice arr
|
|
|
|
|
, Inc.ArrowDistribute arr
|
|
|
|
|
, ArrowWriter (Seq CollectedInfo) arr
|
|
|
|
|
, Inc.ArrowCache m arr
|
|
|
|
|
, MonadTx m)
|
|
|
|
|
=> ((),[CatalogCronTrigger])
|
|
|
|
|
`arr` HashMap TriggerName CronTriggerInfo
|
|
|
|
|
buildCronTriggers = buildInfoMap _cctName mkCronTriggerMetadataObject buildCronTrigger
|
|
|
|
|
where
|
|
|
|
|
buildCronTrigger = proc (_,cronTrigger) -> do
|
|
|
|
|
let triggerName = triggerNameToTxt $ _cctName cronTrigger
|
|
|
|
|
addCronTriggerContext e = "in cron trigger " <> triggerName <> ": " <> e
|
|
|
|
|
(| withRecordInconsistency (
|
2020-07-14 22:00:58 +03:00
|
|
|
|
(| modifyErrA (bindErrorA -< resolveCronTrigger env cronTrigger)
|
2020-05-13 15:33:16 +03:00
|
|
|
|
|) addCronTriggerContext)
|
|
|
|
|
|) (mkCronTriggerMetadataObject cronTrigger)
|
|
|
|
|
|
2020-04-15 15:03:13 +03:00
|
|
|
|
buildActions
|
|
|
|
|
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
2020-07-14 22:00:58 +03:00
|
|
|
|
, ArrowWriter (Seq CollectedInfo) arr)
|
2020-04-15 15:03:13 +03:00
|
|
|
|
=> ( ((NonObjectTypeMap, AnnotatedObjects), HashSet PGScalarType)
|
|
|
|
|
, [ActionMetadata]
|
|
|
|
|
) `arr` HashMap ActionName ActionInfo
|
|
|
|
|
buildActions = buildInfoMap _amName mkActionMetadataObject buildAction
|
|
|
|
|
where
|
|
|
|
|
buildAction = proc ((resolvedCustomTypes, pgScalars), action) -> do
|
|
|
|
|
let ActionMetadata name comment def actionPermissions = action
|
|
|
|
|
addActionContext e = "in action " <> name <<> "; " <> e
|
|
|
|
|
(| withRecordInconsistency (
|
|
|
|
|
(| modifyErrA (do
|
|
|
|
|
(resolvedDef, outObject, reusedPgScalars) <- liftEitherA <<< bindA -<
|
2020-07-14 22:00:58 +03:00
|
|
|
|
runExceptT $ resolveAction env resolvedCustomTypes pgScalars def
|
2020-04-15 15:03:13 +03:00
|
|
|
|
let permissionInfos = map (ActionPermissionInfo . _apmRole) actionPermissions
|
|
|
|
|
permissionMap = mapFromL _apiRole permissionInfos
|
|
|
|
|
returnA -< ActionInfo name outObject resolvedDef permissionMap reusedPgScalars comment)
|
|
|
|
|
|) addActionContext)
|
|
|
|
|
|) (mkActionMetadataObject action)
|
|
|
|
|
|
2020-01-29 23:15:53 +03:00
|
|
|
|
buildRemoteSchemas
|
|
|
|
|
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
|
|
|
|
, Inc.ArrowCache m arr , MonadIO m, HasHttpManager m )
|
2020-01-31 02:55:09 +03:00
|
|
|
|
=> ( Inc.Dependency (HashMap RemoteSchemaName Inc.InvalidationKey)
|
|
|
|
|
, [AddRemoteSchemaQuery]
|
|
|
|
|
) `arr` HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject)
|
|
|
|
|
buildRemoteSchemas =
|
|
|
|
|
buildInfoMapPreservingMetadata _arsqName mkRemoteSchemaMetadataObject buildRemoteSchema
|
2020-01-29 23:15:53 +03:00
|
|
|
|
where
|
|
|
|
|
-- We want to cache this call because it fetches the remote schema over HTTP, and we don’t
|
|
|
|
|
-- want to re-run that if the remote schema definition hasn’t changed.
|
2020-01-31 02:55:09 +03:00
|
|
|
|
buildRemoteSchema = Inc.cache proc (invalidationKeys, remoteSchema) -> do
|
2020-01-29 23:15:53 +03:00
|
|
|
|
Inc.dependOn -< Inc.selectKeyD (_arsqName remoteSchema) invalidationKeys
|
2020-01-31 02:55:09 +03:00
|
|
|
|
(| withRecordInconsistency (liftEitherA <<< bindA -<
|
2020-07-14 22:00:58 +03:00
|
|
|
|
runExceptT $ addRemoteSchemaP2Setup env remoteSchema)
|
2020-01-29 23:15:53 +03:00
|
|
|
|
|) (mkRemoteSchemaMetadataObject remoteSchema)
|
|
|
|
|
|
|
|
|
|
-- Builds the GraphQL schema and merges in remote schemas. This function is kind of gross, as
|
|
|
|
|
-- it’s possible for the remote schema merging to fail, at which point we have to mark them
|
|
|
|
|
-- inconsistent. This means we have to accumulate the consistent remote schemas as we go, in
|
2020-01-31 02:55:09 +03:00
|
|
|
|
-- addition to the built GraphQL context.
|
2020-01-29 23:15:53 +03:00
|
|
|
|
buildGQLSchema
|
|
|
|
|
:: ( ArrowChoice arr, ArrowWriter (Seq InconsistentMetadata) arr, ArrowKleisli m arr
|
|
|
|
|
, MonadError QErr m )
|
|
|
|
|
=> ( TableCache
|
|
|
|
|
, FunctionCache
|
2020-01-31 02:55:09 +03:00
|
|
|
|
, HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject)
|
2020-02-13 20:38:23 +03:00
|
|
|
|
, (NonObjectTypeMap, AnnotatedObjects)
|
|
|
|
|
, ActionCache
|
2020-05-27 18:02:58 +03:00
|
|
|
|
, VT.TypeMap
|
2019-11-27 01:49:42 +03:00
|
|
|
|
) `arr` (RemoteSchemaMap, GS.GCtxMap, GS.GCtx)
|
2020-05-27 18:02:58 +03:00
|
|
|
|
buildGQLSchema = proc (tableCache, functionCache, remoteSchemas, customTypes, actionCache, remoteRelationshipTypes) -> do
|
2020-04-15 15:03:13 +03:00
|
|
|
|
baseGQLSchema <- bindA -< GS.mkGCtxMap tableCache functionCache actionCache
|
2020-01-31 02:55:09 +03:00
|
|
|
|
(| foldlA' (\(remoteSchemaMap, gqlSchemas, remoteGQLSchemas)
|
|
|
|
|
(remoteSchemaName, (remoteSchema, metadataObject)) ->
|
2020-01-29 23:15:53 +03:00
|
|
|
|
(| withRecordInconsistency (do
|
2020-05-27 18:02:58 +03:00
|
|
|
|
let gqlSchema = rscGCtx remoteSchema
|
2020-01-29 23:15:53 +03:00
|
|
|
|
mergedGQLSchemas <- bindErrorA -< mergeRemoteSchema gqlSchemas gqlSchema
|
|
|
|
|
mergedRemoteGQLSchemas <- bindErrorA -< mergeGCtx remoteGQLSchemas gqlSchema
|
2020-01-31 02:55:09 +03:00
|
|
|
|
let mergedRemoteSchemaMap = M.insert remoteSchemaName remoteSchema remoteSchemaMap
|
2020-01-29 23:15:53 +03:00
|
|
|
|
returnA -< (mergedRemoteSchemaMap, mergedGQLSchemas, mergedRemoteGQLSchemas))
|
2020-01-31 02:55:09 +03:00
|
|
|
|
|) metadataObject
|
2020-01-29 23:15:53 +03:00
|
|
|
|
>-> (| onNothingA ((remoteSchemaMap, gqlSchemas, remoteGQLSchemas) >- returnA) |))
|
2020-01-31 02:55:09 +03:00
|
|
|
|
|) (M.empty, baseGQLSchema, GC.emptyGCtx) (M.toList remoteSchemas)
|
2020-02-13 20:38:23 +03:00
|
|
|
|
-- merge the custom types into schema
|
2020-05-27 18:02:58 +03:00
|
|
|
|
>-> (\(remoteSchemaMap, gqlSchema', defGqlCtx') -> do
|
|
|
|
|
(gqlSchema, defGqlCtx) <- bindA -< mergeCustomTypes gqlSchema' defGqlCtx' customTypes
|
|
|
|
|
returnA -< ( remoteSchemaMap
|
|
|
|
|
, M.map (mergeRemoteTypesWithGCtx remoteRelationshipTypes <$>) gqlSchema
|
|
|
|
|
, mergeRemoteTypesWithGCtx remoteRelationshipTypes defGqlCtx
|
|
|
|
|
)
|
2020-02-13 20:38:23 +03:00
|
|
|
|
)
|
2019-08-14 02:34:37 +03:00
|
|
|
|
|
|
|
|
|
-- | @'withMetadataCheck' cascade action@ runs @action@ and checks if the schema changed as a
|
|
|
|
|
-- result. If it did, it checks to ensure the changes do not violate any integrity constraints, and
|
|
|
|
|
-- if not, incorporates them into the schema cache.
|
2020-02-05 15:54:26 +03:00
|
|
|
|
withMetadataCheck :: (MonadTx m, CacheRWM m, HasSQLGenCtx m) => Bool -> m a -> m a
|
2019-08-14 02:34:37 +03:00
|
|
|
|
withMetadataCheck cascade action = do
|
|
|
|
|
-- Drop hdb_views so no interference is caused to the sql query
|
|
|
|
|
liftTx $ Q.catchE defaultTxErrorHandler clearHdbViews
|
|
|
|
|
|
|
|
|
|
-- Get the metadata before the sql query, everything, need to filter this
|
|
|
|
|
oldMetaU <- liftTx $ Q.catchE defaultTxErrorHandler fetchTableMeta
|
|
|
|
|
oldFuncMetaU <- liftTx $ Q.catchE defaultTxErrorHandler fetchFunctionMeta
|
|
|
|
|
|
|
|
|
|
-- Run the action
|
|
|
|
|
res <- action
|
|
|
|
|
|
|
|
|
|
-- Get the metadata after the sql query
|
|
|
|
|
newMeta <- liftTx $ Q.catchE defaultTxErrorHandler fetchTableMeta
|
|
|
|
|
newFuncMeta <- liftTx $ Q.catchE defaultTxErrorHandler fetchFunctionMeta
|
|
|
|
|
sc <- askSchemaCache
|
|
|
|
|
let existingInconsistentObjs = scInconsistentObjs sc
|
|
|
|
|
existingTables = M.keys $ scTables sc
|
|
|
|
|
oldMeta = flip filter oldMetaU $ \tm -> tmTable tm `elem` existingTables
|
|
|
|
|
schemaDiff = getSchemaDiff oldMeta newMeta
|
|
|
|
|
existingFuncs = M.keys $ scFunctions sc
|
2019-10-18 11:29:47 +03:00
|
|
|
|
oldFuncMeta = flip filter oldFuncMetaU $ \fm -> fmFunction fm `elem` existingFuncs
|
2019-08-14 02:34:37 +03:00
|
|
|
|
FunctionDiff droppedFuncs alteredFuncs = getFuncDiff oldFuncMeta newFuncMeta
|
|
|
|
|
overloadedFuncs = getOverloadedFuncs existingFuncs newFuncMeta
|
|
|
|
|
|
|
|
|
|
-- Do not allow overloading functions
|
|
|
|
|
unless (null overloadedFuncs) $
|
|
|
|
|
throw400 NotSupported $ "the following tracked function(s) cannot be overloaded: "
|
|
|
|
|
<> reportFuncs overloadedFuncs
|
|
|
|
|
|
|
|
|
|
indirectDeps <- getSchemaChangeDeps schemaDiff
|
|
|
|
|
|
|
|
|
|
-- Report back with an error if cascade is not set
|
|
|
|
|
when (indirectDeps /= [] && not cascade) $ reportDepsExt indirectDeps []
|
|
|
|
|
|
|
|
|
|
-- Purge all the indirect dependents from state
|
|
|
|
|
mapM_ purgeDependentObject indirectDeps
|
|
|
|
|
|
|
|
|
|
-- Purge all dropped functions
|
|
|
|
|
let purgedFuncs = flip mapMaybe indirectDeps $ \dep ->
|
|
|
|
|
case dep of
|
|
|
|
|
SOFunction qf -> Just qf
|
|
|
|
|
_ -> Nothing
|
|
|
|
|
|
|
|
|
|
forM_ (droppedFuncs \\ purgedFuncs) $ \qf -> do
|
|
|
|
|
liftTx $ delFunctionFromCatalog qf
|
|
|
|
|
|
|
|
|
|
-- Process altered functions
|
2019-11-20 21:21:30 +03:00
|
|
|
|
forM_ alteredFuncs $ \(qf, newTy) -> do
|
2019-08-14 02:34:37 +03:00
|
|
|
|
when (newTy == FTVOLATILE) $
|
|
|
|
|
throw400 NotSupported $
|
|
|
|
|
"type of function " <> qf <<> " is altered to \"VOLATILE\" which is not supported now"
|
|
|
|
|
|
|
|
|
|
-- update the schema cache and hdb_catalog with the changes
|
2019-11-20 21:21:30 +03:00
|
|
|
|
processSchemaChanges schemaDiff
|
|
|
|
|
|
|
|
|
|
buildSchemaCache
|
2020-02-05 15:54:26 +03:00
|
|
|
|
postSc <- askSchemaCache
|
|
|
|
|
|
|
|
|
|
-- Recreate event triggers in hdb_views
|
|
|
|
|
forM_ (M.elems $ scTables postSc) $ \(TableInfo coreInfo _ eventTriggers) -> do
|
|
|
|
|
let table = _tciName coreInfo
|
|
|
|
|
columns = getCols $ _tciFieldInfoMap coreInfo
|
|
|
|
|
forM_ (M.toList eventTriggers) $ \(triggerName, eti) -> do
|
|
|
|
|
let opsDefinition = etiOpsDef eti
|
|
|
|
|
mkAllTriggersQ triggerName table columns opsDefinition
|
|
|
|
|
|
|
|
|
|
let currentInconsistentObjs = scInconsistentObjs postSc
|
2019-11-20 21:21:30 +03:00
|
|
|
|
checkNewInconsistentMeta existingInconsistentObjs currentInconsistentObjs
|
2019-08-14 02:34:37 +03:00
|
|
|
|
|
|
|
|
|
return res
|
|
|
|
|
where
|
|
|
|
|
reportFuncs = T.intercalate ", " . map dquoteTxt
|
|
|
|
|
|
2019-11-20 21:21:30 +03:00
|
|
|
|
processSchemaChanges :: (MonadTx m, CacheRM m) => SchemaDiff -> m ()
|
2019-08-14 02:34:37 +03:00
|
|
|
|
processSchemaChanges schemaDiff = do
|
|
|
|
|
-- Purge the dropped tables
|
|
|
|
|
mapM_ delTableAndDirectDeps droppedTables
|
|
|
|
|
|
|
|
|
|
sc <- askSchemaCache
|
2019-11-20 21:21:30 +03:00
|
|
|
|
for_ alteredTables $ \(oldQtn, tableDiff) -> do
|
2019-08-14 02:34:37 +03:00
|
|
|
|
ti <- case M.lookup oldQtn $ scTables sc of
|
|
|
|
|
Just ti -> return ti
|
|
|
|
|
Nothing -> throw500 $ "old table metadata not found in cache : " <>> oldQtn
|
2019-11-20 21:21:30 +03:00
|
|
|
|
processTableChanges (_tiCoreInfo ti) tableDiff
|
2019-08-14 02:34:37 +03:00
|
|
|
|
where
|
|
|
|
|
SchemaDiff droppedTables alteredTables = schemaDiff
|
|
|
|
|
|
2019-11-27 01:49:42 +03:00
|
|
|
|
checkNewInconsistentMeta
|
|
|
|
|
:: (QErrM m)
|
|
|
|
|
=> [InconsistentMetadata] -> [InconsistentMetadata] -> m ()
|
|
|
|
|
checkNewInconsistentMeta originalInconsMeta currentInconsMeta =
|
|
|
|
|
unless (null newInconsistentObjects) $
|
|
|
|
|
throwError (err500 Unexpected "cannot continue due to newly found inconsistent metadata")
|
|
|
|
|
{ qeInternal = Just $ toJSON newInconsistentObjects }
|
|
|
|
|
where
|
|
|
|
|
diffInconsistentObjects = M.difference `on` groupInconsistentMetadataById
|
|
|
|
|
newInconsistentObjects = nub $ concatMap toList $
|
|
|
|
|
M.elems (currentInconsMeta `diffInconsistentObjects` originalInconsMeta)
|
2020-03-26 14:52:20 +03:00
|
|
|
|
|
|
|
|
|
{- Note [Keep invalidation keys for inconsistent objects]
|
|
|
|
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
|
|
|
After building the schema cache, we prune InvalidationKeys for objects
|
|
|
|
|
that no longer exist in the schema to avoid leaking memory for objects
|
|
|
|
|
that have been dropped. However, note that we *don’t* want to drop
|
|
|
|
|
keys for objects that are simply inconsistent!
|
|
|
|
|
|
|
|
|
|
Why? The object is still in the metadata, so next time we reload it,
|
|
|
|
|
we’ll reprocess that object. We want to reuse the cache if its
|
|
|
|
|
definition hasn’t changed, but if we dropped the invalidation key, it
|
|
|
|
|
will incorrectly be reprocessed (since the invalidation key changed
|
|
|
|
|
from present to absent). -}
|