Refactor non-table functions out of Hasura.RQL.DDL.Table

This commit is contained in:
Alexis King 2019-08-13 18:34:37 -05:00
parent d4dcd28baa
commit c46ecc72dc
15 changed files with 606 additions and 487 deletions

View File

@ -190,8 +190,11 @@ library
, Hasura.RQL.DDL.Relationship
, Hasura.RQL.DDL.Relationship.Rename
, Hasura.RQL.DDL.Relationship.Types
, Hasura.RQL.DDL.Schema.Enum
, Hasura.RQL.DDL.Schema
, Hasura.RQL.DDL.Schema.Cache
, Hasura.RQL.DDL.Schema.Catalog
, Hasura.RQL.DDL.Schema.Diff
, Hasura.RQL.DDL.Schema.Enum
, Hasura.RQL.DDL.Schema.Function
, Hasura.RQL.DDL.Schema.Rename
, Hasura.RQL.DDL.Schema.Table
@ -285,6 +288,7 @@ library
default-extensions: ApplicativeDo
BangPatterns
ConstraintKinds
DeriveDataTypeable
DeriveFoldable
DeriveFunctor
@ -305,6 +309,7 @@ library
ScopedTypeVariables
TemplateHaskell
TupleSections
TypeApplications
TypeFamilies
@ -325,6 +330,7 @@ library
executable graphql-engine
default-extensions: ApplicativeDo
BangPatterns
ConstraintKinds
DeriveDataTypeable
DeriveFoldable
DeriveFunctor
@ -345,6 +351,7 @@ executable graphql-engine
ScopedTypeVariables
TemplateHaskell
TupleSections
TypeApplications
TypeFamilies
main-is: Main.hs

View File

@ -4,19 +4,19 @@ module Migrate
)
where
import Data.Time.Clock (UTCTime)
import Language.Haskell.TH.Syntax (Q, TExp, unTypeQ)
import Data.Time.Clock (UTCTime)
import Language.Haskell.TH.Syntax (Q, TExp, unTypeQ)
import Hasura.Prelude
import Hasura.RQL.DDL.Schema.Table
import Hasura.RQL.DDL.Schema
import Hasura.RQL.Types
import Hasura.Server.Query
import qualified Data.Aeson as A
import qualified Data.Text as T
import qualified Data.Yaml.TH as Y
import qualified Data.Aeson as A
import qualified Data.Text as T
import qualified Data.Yaml.TH as Y
import qualified Database.PG.Query as Q
import qualified Database.PG.Query as Q
curCatalogVer :: T.Text
curCatalogVer = "20"

View File

@ -10,7 +10,7 @@ import Migrate (curCatalogVer)
import Hasura.EncJSON
import Hasura.Prelude
import Hasura.RQL.DDL.Schema.Table
import Hasura.RQL.DDL.Schema
import Hasura.RQL.Types
import Hasura.Server.Query
import Hasura.SQL.Types

View File

@ -47,8 +47,7 @@ import qualified Hasura.RQL.DDL.Permission.Internal as DP
import qualified Hasura.RQL.DDL.QueryCollection as DQC
import qualified Hasura.RQL.DDL.Relationship as DR
import qualified Hasura.RQL.DDL.RemoteSchema as DRS
import qualified Hasura.RQL.DDL.Schema.Function as DF
import qualified Hasura.RQL.DDL.Schema.Table as DT
import qualified Hasura.RQL.DDL.Schema as DS
import qualified Hasura.RQL.Types.EventTrigger as DTS
import qualified Hasura.RQL.Types.RemoteSchema as TRS
@ -139,7 +138,7 @@ runClearMetadata
runClearMetadata _ = do
adminOnly
liftTx clearMetadata
DT.buildSchemaCacheStrict
DS.buildSchemaCacheStrict
return successMsg
data ReplaceMetadata
@ -223,16 +222,16 @@ applyQP2
applyQP2 (ReplaceMetadata tables mFunctions mSchemas mCollections mAllowlist) = do
liftTx clearMetadata
DT.buildSchemaCacheStrict
DS.buildSchemaCacheStrict
withPathK "tables" $ do
-- tables and views
indexedForM_ tables $ \tableMeta -> do
let trackQuery = DT.TrackTable
{ DT.tName = tableMeta ^. tmTable
, DT.tIsEnum = tableMeta ^. tmIsEnum }
void $ DT.trackExistingTableOrViewP2 trackQuery
let trackQuery = DS.TrackTable
{ DS.tName = tableMeta ^. tmTable
, DS.tIsEnum = tableMeta ^. tmIsEnum }
void $ DS.trackExistingTableOrViewP2 trackQuery
-- Relationships
indexedForM_ tables $ \table -> do
@ -263,7 +262,7 @@ applyQP2 (ReplaceMetadata tables mFunctions mSchemas mCollections mAllowlist) =
-- sql functions
withPathK "functions" $
indexedMapM_ (void . DF.trackFunctionP2) functions
indexedMapM_ (void . DS.trackFunctionP2) functions
-- query collections
withPathK "query_collections" $
@ -444,7 +443,7 @@ runReloadMetadata
=> ReloadMetadata -> m EncJSON
runReloadMetadata _ = do
adminOnly
DT.buildSchemaCache
DS.buildSchemaCache
return successMsg
data DumpInternalState
@ -506,9 +505,8 @@ runDropInconsistentMetadata _ = do
purgeMetadataObj :: MonadTx m => MetadataObjId -> m ()
purgeMetadataObj = liftTx . \case
(MOTable qt) ->
Q.catchE defaultTxErrorHandler $ DT.delTableFromCatalog qt
(MOFunction qf) -> DF.delFunctionFromCatalog qf
(MOTable qt) -> DS.deleteTableFromCatalog qt
(MOFunction qf) -> DS.delFunctionFromCatalog qf
(MORemoteSchema rsn) -> DRS.removeRemoteSchemaFromCatalog rsn
(MOTableObj qt (MTORel rn _)) -> DR.delRelFromCatalog qt rn
(MOTableObj qt (MTOPerm rn pt)) -> DP.dropPermFromCatalog qt rn pt

View File

@ -6,9 +6,9 @@ import Hasura.EncJSON
import Hasura.Prelude
import Hasura.RQL.DDL.Relationship (validateRelP1)
import Hasura.RQL.DDL.Relationship.Types
import Hasura.RQL.DDL.Schema.Rename (renameRelInCatalog)
import Hasura.RQL.DDL.Schema.Table (buildSchemaCache,
checkNewInconsistentMeta)
import Hasura.RQL.DDL.Schema (buildSchemaCache,
renameRelInCatalog,
withNewInconsistentObjsCheck)
import Hasura.RQL.Types
import Hasura.SQL.Types
@ -23,8 +23,7 @@ renameRelP2
, HasSQLGenCtx m
)
=> QualifiedTable -> RelName -> RelInfo -> m ()
renameRelP2 qt newRN relInfo = do
oldSC <- askSchemaCache
renameRelP2 qt newRN relInfo = withNewInconsistentObjsCheck $ do
tabInfo <- askTabInfo qt
-- check for conflicts in fieldInfoMap
case HM.lookup (fromRel newRN) $ _tiFieldInfoMap tabInfo of
@ -37,9 +36,6 @@ renameRelP2 qt newRN relInfo = do
renameRelInCatalog qt oldRN newRN
-- update schema cache
buildSchemaCache
newSC <- askSchemaCache
-- check for new inconsistency
checkNewInconsistentMeta oldSC newSC
where
oldRN = riName relInfo

View File

@ -0,0 +1,117 @@
{-| This module (along with the various @Hasura.RQL.DDL.Schema.*@ modules) provides operations to
load and modify the Hasura catalog and schema cache.
* The /catalog/ refers to the set of PostgreSQL tables and views that store all schema information
known by Hasura. This includes any tracked Postgres tables, views, and functions, all remote
schemas, and any additionaly Hasura-specific information such as permissions and relationships.
Primitive functions for loading and modifying the catalog are defined in
"Hasura.RQL.DDL.Schema.Catalog", but most uses are wrapped by other functions to synchronize
catalog information with the information in the schema cache.
* The /schema cache/ is a process-global value of type 'SchemaCache' that stores an in-memory
representation of the data stored in the catalog. The in-memory representation is not identical
to the data in the catalog, since it has some post-processing applied to it in order to make it
easier to consume for other parts of the system, such as GraphQL schema generation. For example,
although column information is represented by 'PGRawColumnInfo', the schema cache contains
processed 'PGColumnInfo' values, instead.
Ultimately, the catalog is the source of truth for all information contained in the schema
cache, but to avoid rebuilding the entire schema cache on every change to the catalog, various
functions incrementally update the cache when they modify the catalog.
-}
module Hasura.RQL.DDL.Schema
( module Hasura.RQL.DDL.Schema.Cache
, module Hasura.RQL.DDL.Schema.Catalog
, module Hasura.RQL.DDL.Schema.Function
, module Hasura.RQL.DDL.Schema.Rename
, module Hasura.RQL.DDL.Schema.Table
, RunSQL(..)
, runRunSQL
) where
import Hasura.Prelude
import qualified Data.Text as T
import qualified Data.Text.Encoding as TE
import qualified Database.PG.Query as Q
import qualified Database.PostgreSQL.LibPQ as PQ
import Data.Aeson
import Data.Aeson.Casing
import Data.Aeson.TH
import Language.Haskell.TH.Syntax (Lift)
import Hasura.EncJSON
import Hasura.RQL.DDL.Schema.Cache
import Hasura.RQL.DDL.Schema.Catalog
import Hasura.RQL.DDL.Schema.Function
import Hasura.RQL.DDL.Schema.Rename
import Hasura.RQL.DDL.Schema.Table
import Hasura.RQL.Instances ()
import Hasura.RQL.Types
import Hasura.Server.Utils (matchRegex)
data RunSQL
= RunSQL
{ rSql :: Text
, rCascade :: !(Maybe Bool)
, rCheckMetadataConsistency :: !(Maybe Bool)
} deriving (Show, Eq, Lift)
$(deriveJSON (aesonDrop 1 snakeCase){omitNothingFields=True} ''RunSQL)
runRunSQL
:: (QErrM m, UserInfoM m, CacheRWM m, MonadTx m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
=> RunSQL -> m EncJSON
runRunSQL (RunSQL t cascade mChkMDCnstcy) = do
adminOnly
isMDChkNeeded <- maybe (isAltrDropReplace t) return mChkMDCnstcy
bool (execRawSQL t) (withMetadataCheck (or cascade) $ execRawSQL t) isMDChkNeeded
where
execRawSQL :: (MonadTx m) => Text -> m EncJSON
execRawSQL =
fmap (encJFromJValue @RunSQLRes) .
liftTx . Q.multiQE rawSqlErrHandler . Q.fromText
where
rawSqlErrHandler txe =
let e = err400 PostgresError "query execution failed"
in e {qeInternal = Just $ toJSON txe}
isAltrDropReplace :: QErrM m => T.Text -> m Bool
isAltrDropReplace = either throwErr return . matchRegex regex False
where
throwErr s = throw500 $ "compiling regex failed: " <> T.pack s
regex = "alter|drop|replace|create function"
data RunSQLRes
= RunSQLRes
{ rrResultType :: !Text
, rrResult :: !Value
} deriving (Show, Eq)
$(deriveJSON (aesonDrop 2 snakeCase) ''RunSQLRes)
instance Q.FromRes RunSQLRes where
fromRes (Q.ResultOkEmpty _) =
return $ RunSQLRes "CommandOk" Null
fromRes (Q.ResultOkData res) = do
csvRows <- resToCSV res
return $ RunSQLRes "TuplesOk" $ toJSON csvRows
where
resToCSV :: PQ.Result -> ExceptT T.Text IO [[Text]]
resToCSV r = do
nr <- liftIO $ PQ.ntuples r
nc <- liftIO $ PQ.nfields r
hdr <- forM [0..pred nc] $ \ic -> do
colNameBS <- liftIO $ PQ.fname r ic
maybe (return "unknown") decodeBS colNameBS
rows <- forM [0..pred nr] $ \ir ->
forM [0..pred nc] $ \ic -> do
cellValBS <- liftIO $ PQ.getvalue r ir ic
maybe (return "NULL") decodeBS cellValBS
return $ hdr:rows
decodeBS = either (throwError . T.pack . show) return . TE.decodeUtf8'

View File

@ -0,0 +1,353 @@
{-| Top-level functions concerned specifically with operations on the schema cache, such as
rebuilding it from the catalog and incorporating schema changes. See the module documentation for
"Hasura.RQL.DDL.Schema" for more details.
__Note__: this module is __mutually recursive__ with other @Hasura.RQL.DDL.Schema.*@ modules, which
both define pieces of the implementation of building the schema cache and define handlers that
trigger schema cache rebuilds. -}
module Hasura.RQL.DDL.Schema.Cache
( CacheBuildM
, buildSchemaCache
, buildSchemaCacheFor
, buildSchemaCacheStrict
, buildSchemaCacheWithoutSetup
, withNewInconsistentObjsCheck
, withMetadataCheck
, purgeDependentObject
, withSchemaObject
, withSchemaObject_
) where
import Hasura.Prelude
import qualified Data.HashMap.Strict as M
import qualified Data.HashSet as HS
import qualified Data.Text as T
import qualified Database.PG.Query as Q
import Data.Aeson
import qualified Hasura.GraphQL.Schema as GS
import Hasura.Db
import Hasura.GraphQL.RemoteServer
import Hasura.RQL.DDL.Deps
import Hasura.RQL.DDL.EventTrigger
import Hasura.RQL.DDL.Permission
import Hasura.RQL.DDL.Permission.Internal
import Hasura.RQL.DDL.Relationship
import Hasura.RQL.DDL.RemoteSchema
import Hasura.RQL.DDL.Schema.Catalog
import Hasura.RQL.DDL.Schema.Diff
import Hasura.RQL.DDL.Schema.Function
import Hasura.RQL.DDL.Schema.Table
import Hasura.RQL.DDL.Utils
import Hasura.RQL.Types
import Hasura.RQL.Types.Catalog
import Hasura.RQL.Types.QueryCollection
import Hasura.SQL.Types
type CacheBuildM m = (CacheRWM m, MonadTx m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
buildSchemaCache :: (CacheBuildM m) => m ()
buildSchemaCache = buildSchemaCacheWithOptions True
buildSchemaCacheWithoutSetup :: (CacheBuildM m) => m ()
buildSchemaCacheWithoutSetup = buildSchemaCacheWithOptions False
buildSchemaCacheWithOptions :: (CacheBuildM m) => Bool -> m ()
buildSchemaCacheWithOptions withSetup = do
-- clean hdb_views
when withSetup $ liftTx $ Q.catchE defaultTxErrorHandler clearHdbViews
-- reset the current schemacache
writeSchemaCache emptySchemaCache
sqlGenCtx <- askSQLGenCtx
-- fetch all catalog metadata
CatalogMetadata tables relationships permissions
eventTriggers remoteSchemas functions fkeys' allowlistDefs
<- liftTx fetchCatalogData
let fkeys = HS.fromList fkeys'
-- tables
modTableCache =<< buildTableCache tables
-- relationships
forM_ relationships $ \(CatalogRelation qt rn rt rDef cmnt) -> do
let objId = MOTableObj qt $ MTORel rn rt
def = toJSON $ WithTable qt $ RelDef rn rDef cmnt
mkInconsObj = InconsistentMetadataObj objId (MOTRel rt) def
modifyErr (\e -> "table " <> qt <<> "; rel " <> rn <<> "; " <> e) $
withSchemaObject_ mkInconsObj $
case rt of
ObjRel -> do
using <- decodeValue rDef
let relDef = RelDef rn using Nothing
validateObjRel qt relDef
objRelP2Setup qt fkeys relDef
ArrRel -> do
using <- decodeValue rDef
let relDef = RelDef rn using Nothing
validateArrRel qt relDef
arrRelP2Setup qt fkeys relDef
-- permissions
forM_ permissions $ \(CatalogPermission qt rn pt pDef cmnt) -> do
let objId = MOTableObj qt $ MTOPerm rn pt
def = toJSON $ WithTable qt $ PermDef rn pDef cmnt
mkInconsObj = InconsistentMetadataObj objId (MOTPerm pt) def
modifyErr (\e -> "table " <> qt <<> "; role " <> rn <<> "; " <> e) $
withSchemaObject_ mkInconsObj $
case pt of
PTInsert -> permHelper withSetup sqlGenCtx qt rn pDef PAInsert
PTSelect -> permHelper withSetup sqlGenCtx qt rn pDef PASelect
PTUpdate -> permHelper withSetup sqlGenCtx qt rn pDef PAUpdate
PTDelete -> permHelper withSetup sqlGenCtx qt rn pDef PADelete
-- event triggers
forM_ eventTriggers $ \(CatalogEventTrigger qt trn configuration) -> do
let objId = MOTableObj qt $ MTOTrigger trn
def = object ["table" .= qt, "configuration" .= configuration]
mkInconsObj = InconsistentMetadataObj objId MOTEventTrigger def
withSchemaObject_ mkInconsObj $ do
etc <- decodeValue configuration
subTableP2Setup qt etc
allCols <- getCols . _tiFieldInfoMap <$> askTabInfo qt
when withSetup $ liftTx $
mkAllTriggersQ trn qt allCols (stringifyNum sqlGenCtx) (etcDefinition etc)
-- sql functions
forM_ functions $ \(CatalogFunction qf rawfiM) -> do
let def = toJSON $ TrackFunction qf
mkInconsObj =
InconsistentMetadataObj (MOFunction qf) MOTFunction def
modifyErr (\e -> "function " <> qf <<> "; " <> e) $
withSchemaObject_ mkInconsObj $ do
rawfi <- onNothing rawfiM $
throw400 NotExists $ "no such function exists in postgres : " <>> qf
trackFunctionP2Setup qf rawfi
-- allow list
replaceAllowlist $ concatMap _cdQueries allowlistDefs
-- build GraphQL context with tables and functions
GS.buildGCtxMapPG
-- remote schemas
forM_ remoteSchemas resolveSingleRemoteSchema
where
permHelper setup sqlGenCtx qt rn pDef pa = do
qCtx <- mkAdminQCtx sqlGenCtx <$> askSchemaCache
perm <- decodeValue pDef
let permDef = PermDef rn perm Nothing
createPerm = WithTable qt permDef
(permInfo, deps) <- liftP1WithQCtx qCtx $ createPermP1 createPerm
when setup $ addPermP2Setup qt permDef permInfo
addPermToCache qt rn pa permInfo deps
-- p2F qt rn p1Res
resolveSingleRemoteSchema rs = do
let AddRemoteSchemaQuery name _ _ = rs
mkInconsObj = InconsistentMetadataObj (MORemoteSchema name)
MOTRemoteSchema (toJSON rs)
withSchemaObject_ mkInconsObj $ do
rsCtx <- addRemoteSchemaP2Setup rs
sc <- askSchemaCache
let gCtxMap = scGCtxMap sc
defGCtx = scDefaultRemoteGCtx sc
rGCtx = convRemoteGCtx $ rscGCtx rsCtx
mergedGCtxMap <- mergeRemoteSchema gCtxMap rGCtx
mergedDefGCtx <- mergeGCtx defGCtx rGCtx
writeSchemaCache sc { scGCtxMap = mergedGCtxMap
, scDefaultRemoteGCtx = mergedDefGCtx
}
-- | Rebuilds the schema cache. If an object with the given object id became newly inconsistent,
-- raises an error about it specifically. Otherwise, raises a generic metadata inconsistency error.
buildSchemaCacheFor :: (CacheBuildM m) => MetadataObjId -> m ()
buildSchemaCacheFor objectId = do
oldSchemaCache <- askSchemaCache
buildSchemaCache
newSchemaCache <- askSchemaCache
let diffInconsistentObjects = getDifference _moId `on` scInconsistentObjs
newInconsistentObjects = newSchemaCache `diffInconsistentObjects` oldSchemaCache
for_ (find ((== objectId) . _moId) newInconsistentObjects) $ \matchingObject ->
throw400 ConstraintViolation (_moReason matchingObject)
unless (null newInconsistentObjects) $
throwError (err400 Unexpected "cannot continue due to new inconsistent metadata")
{ qeInternal = Just $ toJSON newInconsistentObjects }
-- | Like 'buildSchemaCache', but fails if there is any inconsistent metadata.
buildSchemaCacheStrict :: (CacheBuildM m) => m ()
buildSchemaCacheStrict = do
buildSchemaCache
sc <- askSchemaCache
let inconsObjs = scInconsistentObjs sc
unless (null inconsObjs) $ do
let err = err400 Unexpected "cannot continue due to inconsistent metadata"
throwError err{qeInternal = Just $ toJSON inconsObjs}
-- | Executes the given action, and if any new 'InconsistentMetadataObj's are added to the schema
-- cache as a result of its execution, raises an error.
withNewInconsistentObjsCheck :: (QErrM m, CacheRM m) => m a -> m a
withNewInconsistentObjsCheck action = do
originalObjects <- scInconsistentObjs <$> askSchemaCache
result <- action
currentObjects <- scInconsistentObjs <$> askSchemaCache
checkNewInconsistentMeta originalObjects currentObjects
pure result
-- | @'withMetadataCheck' cascade action@ runs @action@ and checks if the schema changed as a
-- result. If it did, it checks to ensure the changes do not violate any integrity constraints, and
-- if not, incorporates them into the schema cache.
withMetadataCheck :: (CacheBuildM m) => Bool -> m a -> m a
withMetadataCheck cascade action = do
-- Drop hdb_views so no interference is caused to the sql query
liftTx $ Q.catchE defaultTxErrorHandler clearHdbViews
-- Get the metadata before the sql query, everything, need to filter this
oldMetaU <- liftTx $ Q.catchE defaultTxErrorHandler fetchTableMeta
oldFuncMetaU <- liftTx $ Q.catchE defaultTxErrorHandler fetchFunctionMeta
-- Run the action
res <- action
-- Get the metadata after the sql query
newMeta <- liftTx $ Q.catchE defaultTxErrorHandler fetchTableMeta
newFuncMeta <- liftTx $ Q.catchE defaultTxErrorHandler fetchFunctionMeta
sc <- askSchemaCache
let existingInconsistentObjs = scInconsistentObjs sc
existingTables = M.keys $ scTables sc
oldMeta = flip filter oldMetaU $ \tm -> tmTable tm `elem` existingTables
schemaDiff = getSchemaDiff oldMeta newMeta
existingFuncs = M.keys $ scFunctions sc
oldFuncMeta = flip filter oldFuncMetaU $ \fm -> funcFromMeta fm `elem` existingFuncs
FunctionDiff droppedFuncs alteredFuncs = getFuncDiff oldFuncMeta newFuncMeta
overloadedFuncs = getOverloadedFuncs existingFuncs newFuncMeta
-- Do not allow overloading functions
unless (null overloadedFuncs) $
throw400 NotSupported $ "the following tracked function(s) cannot be overloaded: "
<> reportFuncs overloadedFuncs
indirectDeps <- getSchemaChangeDeps schemaDiff
-- Report back with an error if cascade is not set
when (indirectDeps /= [] && not cascade) $ reportDepsExt indirectDeps []
-- Purge all the indirect dependents from state
mapM_ purgeDependentObject indirectDeps
-- Purge all dropped functions
let purgedFuncs = flip mapMaybe indirectDeps $ \dep ->
case dep of
SOFunction qf -> Just qf
_ -> Nothing
forM_ (droppedFuncs \\ purgedFuncs) $ \qf -> do
liftTx $ delFunctionFromCatalog qf
delFunctionFromCache qf
-- Process altered functions
forM_ alteredFuncs $ \(qf, newTy) ->
when (newTy == FTVOLATILE) $
throw400 NotSupported $
"type of function " <> qf <<> " is altered to \"VOLATILE\" which is not supported now"
-- update the schema cache and hdb_catalog with the changes
reloadRequired <- processSchemaChanges schemaDiff
let withReload = do -- in case of any rename
buildSchemaCache
currentInconsistentObjs <- scInconsistentObjs <$> askSchemaCache
checkNewInconsistentMeta existingInconsistentObjs currentInconsistentObjs
withoutReload = do
postSc <- askSchemaCache
-- recreate the insert permission infra
forM_ (M.elems $ scTables postSc) $ \ti -> do
let tn = _tiName ti
forM_ (M.elems $ _tiRolePermInfoMap ti) $ \rpi ->
maybe (return ()) (liftTx . buildInsInfra tn) $ _permIns rpi
strfyNum <- stringifyNum <$> askSQLGenCtx
--recreate triggers
forM_ (M.elems $ scTables postSc) $ \ti -> do
let tn = _tiName ti
cols = getCols $ _tiFieldInfoMap ti
forM_ (M.toList $ _tiEventTriggerInfoMap ti) $ \(trn, eti) -> do
let fullspec = etiOpsDef eti
liftTx $ mkAllTriggersQ trn tn cols strfyNum fullspec
bool withoutReload withReload reloadRequired
return res
where
reportFuncs = T.intercalate ", " . map dquoteTxt
processSchemaChanges :: (MonadTx m, CacheRWM m) => SchemaDiff -> m Bool
processSchemaChanges schemaDiff = do
-- Purge the dropped tables
mapM_ delTableAndDirectDeps droppedTables
sc <- askSchemaCache
fmap or $ forM alteredTables $ \(oldQtn, tableDiff) -> do
ti <- case M.lookup oldQtn $ scTables sc of
Just ti -> return ti
Nothing -> throw500 $ "old table metadata not found in cache : " <>> oldQtn
processTableChanges ti tableDiff
where
SchemaDiff droppedTables alteredTables = schemaDiff
checkNewInconsistentMeta
:: (QErrM m)
=> [InconsistentMetadataObj] -> [InconsistentMetadataObj] -> m ()
checkNewInconsistentMeta originalInconsMeta currentInconsMeta =
unless (null newInconsMetaObjects) $
throwError (err500 Unexpected "cannot continue due to newly found inconsistent metadata")
{ qeInternal = Just $ toJSON newInconsMetaObjects }
where
newInconsMetaObjects = getDifference _moId currentInconsMeta originalInconsMeta
purgeDependentObject :: (CacheRWM m, MonadTx m) => SchemaObjId -> m ()
purgeDependentObject schemaObjId = case schemaObjId of
(SOTableObj tn (TOPerm rn pt)) -> do
liftTx $ dropPermFromCatalog tn rn pt
withPermType pt delPermFromCache rn tn
(SOTableObj qt (TORel rn)) -> do
liftTx $ delRelFromCatalog qt rn
delRelFromCache rn qt
(SOFunction qf) -> do
liftTx $ delFunctionFromCatalog qf
delFunctionFromCache qf
(SOTableObj qt (TOTrigger trn)) -> do
liftTx $ delEventTriggerFromCatalog trn
delEventTriggerFromCache qt trn
_ -> throw500 $
"unexpected dependent object : " <> reportSchemaObj schemaObjId
-- | @'withSchemaObject' f action@ runs @action@, and if it raises any errors, applies @f@ to the
-- error message to produce an 'InconsistentMetadataObj', then adds the object to the schema cache
-- and returns 'Nothing' instead of aborting.
withSchemaObject :: (QErrM m, CacheRWM m) => (Text -> InconsistentMetadataObj) -> m a -> m (Maybe a)
withSchemaObject f action =
(Just <$> action) `catchError` \err -> do
sc <- askSchemaCache
let inconsObj = f $ qeError err
allInconsObjs = inconsObj:scInconsistentObjs sc
writeSchemaCache sc { scInconsistentObjs = allInconsObjs }
pure Nothing
withSchemaObject_ :: (QErrM m, CacheRWM m) => (Text -> InconsistentMetadataObj) -> m () -> m ()
withSchemaObject_ f = void . withSchemaObject f

View File

@ -0,0 +1,20 @@
module Hasura.RQL.DDL.Schema.Cache where
import Hasura.Prelude
import Hasura.Db
import Hasura.RQL.Types
type CacheBuildM m = (CacheRWM m, MonadTx m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
buildSchemaCacheStrict :: (CacheBuildM m) => m ()
buildSchemaCacheFor :: (CacheBuildM m) => MetadataObjId -> m ()
buildSchemaCache :: (CacheBuildM m) => m ()
buildSchemaCacheWithoutSetup :: (CacheBuildM m) => m ()
withNewInconsistentObjsCheck :: (QErrM m, CacheRM m) => m a -> m a
withMetadataCheck :: (CacheBuildM m) => Bool -> m a -> m a
purgeDependentObject :: (CacheRWM m, MonadTx m) => SchemaObjId -> m ()
withSchemaObject :: (QErrM m, CacheRWM m) => (Text -> InconsistentMetadataObj) -> m a -> m (Maybe a)
withSchemaObject_ :: (QErrM m, CacheRWM m) => (Text -> InconsistentMetadataObj) -> m () -> m ()

View File

@ -0,0 +1,39 @@
-- | Functions for loading and modifying the catalog. See the module documentation for
-- "Hasura.RQL.DDL.Schema" for more details.
module Hasura.RQL.DDL.Schema.Catalog
( fetchCatalogData
, saveTableToCatalog
, updateTableIsEnumInCatalog
, deleteTableFromCatalog
) where
import Hasura.Prelude
import qualified Database.PG.Query as Q
import Hasura.Db
import Hasura.RQL.Types.Catalog
import Hasura.SQL.Types
fetchCatalogData :: (MonadTx m) => m CatalogMetadata
fetchCatalogData = liftTx $ Q.getAltJ . runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler
$(Q.sqlFromFile "src-rsr/catalog_metadata.sql") () True
saveTableToCatalog :: (MonadTx m) => QualifiedTable -> Bool -> m ()
saveTableToCatalog (QualifiedObject sn tn) isEnum = liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
INSERT INTO "hdb_catalog"."hdb_table" (table_schema, table_name, is_enum)
VALUES ($1, $2, $3)
|] (sn, tn, isEnum) False
updateTableIsEnumInCatalog :: (MonadTx m) => QualifiedTable -> Bool -> m ()
updateTableIsEnumInCatalog (QualifiedObject sn tn) isEnum =
liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
UPDATE "hdb_catalog"."hdb_table" SET is_enum = $3
WHERE table_schema = $1 AND table_name = $2
|] (sn, tn, isEnum) False
deleteTableFromCatalog :: (MonadTx m) => QualifiedTable -> m ()
deleteTableFromCatalog (QualifiedObject sn tn) = liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
DELETE FROM "hdb_catalog"."hdb_table"
WHERE table_schema = $1 AND table_name = $2
|] (sn, tn) False

View File

@ -1,61 +1,46 @@
{- |
Description: Create/delete SQL tables to/from Hasura metadata.
-}
-- | Description: Create/delete SQL tables to/from Hasura metadata.
module Hasura.RQL.DDL.Schema.Table
( TrackTable(..)
, runTrackTableQ
, trackExistingTableOrViewP2
{-# LANGUAGE TypeApplications #-}
, UntrackTable(..)
, runUntrackTableQ
module Hasura.RQL.DDL.Schema.Table where
, SetTableIsEnum(..)
, runSetExistingTableIsEnumQ
, buildTableCache
, delTableAndDirectDeps
, processTableChanges
) where
import Hasura.EncJSON
import Hasura.GraphQL.RemoteServer
import Hasura.Prelude
import Hasura.RQL.DDL.Deps
import Hasura.RQL.DDL.EventTrigger
import Hasura.RQL.DDL.Permission
import Hasura.RQL.DDL.Permission.Internal
import Hasura.RQL.DDL.Relationship
import Hasura.RQL.DDL.RemoteSchema
import {-# SOURCE #-} Hasura.RQL.DDL.Schema.Cache
import Hasura.RQL.DDL.Schema.Catalog
import Hasura.RQL.DDL.Schema.Diff
import Hasura.RQL.DDL.Schema.Enum
import Hasura.RQL.DDL.Schema.Function
import Hasura.RQL.DDL.Schema.Rename
import Hasura.RQL.DDL.Utils
import Hasura.RQL.Types
import Hasura.RQL.Types.Catalog
import Hasura.RQL.Types.QueryCollection
import Hasura.Server.Utils (matchRegex)
import Hasura.SQL.Types
import qualified Database.PG.Query as Q
import qualified Hasura.GraphQL.Schema as GS
import qualified Database.PG.Query as Q
import qualified Hasura.GraphQL.Schema as GS
import Control.Lens.Extended hiding ((.=))
import Control.Lens.Extended hiding ((.=))
import Data.Aeson
import Data.Aeson.Casing
import Data.Aeson.TH
import Instances.TH.Lift ()
import Language.Haskell.TH.Syntax (Lift)
import Network.URI.Extended ()
import Instances.TH.Lift ()
import Language.Haskell.TH.Syntax (Lift)
import Network.URI.Extended ()
import qualified Data.HashMap.Strict as M
import qualified Data.HashSet as HS
import qualified Data.Text as T
import qualified Data.Text.Encoding as TE
import qualified Database.PostgreSQL.LibPQ as PQ
delTableFromCatalog :: QualifiedTable -> Q.Tx ()
delTableFromCatalog (QualifiedObject sn tn) =
Q.unitQ [Q.sql|
DELETE FROM "hdb_catalog"."hdb_table"
WHERE table_schema = $1 AND table_name = $2
|] (sn, tn) False
saveTableToCatalog :: TrackTable -> Q.Tx ()
saveTableToCatalog (TrackTable (QualifiedObject sn tn) isEnum) =
Q.unitQ [Q.sql|
INSERT INTO "hdb_catalog"."hdb_table" (table_schema, table_name, is_enum)
VALUES ($1, $2, $3)
|] (sn, tn, isEnum) False
import qualified Data.HashMap.Strict as M
import qualified Data.Text as T
data TrackTable
= TrackTable
@ -107,14 +92,12 @@ trackExistingTableOrViewP1 TrackTable { tName = vn } = do
trackExistingTableOrViewP2
:: (QErrM m, CacheRWM m, MonadTx m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
=> TrackTable -> m EncJSON
trackExistingTableOrViewP2 query@TrackTable { tName = tableName } = do
trackExistingTableOrViewP2 (TrackTable tableName isEnum) = do
sc <- askSchemaCache
let defGCtx = scDefaultRemoteGCtx sc
GS.checkConflictingNode defGCtx $ GS.qualObjectToName tableName
liftTx $ Q.catchE defaultTxErrorHandler $ saveTableToCatalog query
saveTableToCatalog tableName isEnum
buildSchemaCacheFor (MOTable tableName)
return successMsg
runTrackTableQ
@ -130,37 +113,10 @@ runSetExistingTableIsEnumQ
runSetExistingTableIsEnumQ (SetTableIsEnum tableName isEnum) = do
adminOnly
void $ askTabInfo tableName -- assert that table is tracked
liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
UPDATE "hdb_catalog"."hdb_table" SET is_enum = $3
WHERE table_schema = $1 AND table_name = $2
|] (qSchema tableName, qName tableName, isEnum) False
updateTableIsEnumInCatalog tableName isEnum
buildSchemaCacheFor (MOTable tableName)
return successMsg
purgeDep :: (CacheRWM m, MonadTx m)
=> SchemaObjId -> m ()
purgeDep schemaObjId = case schemaObjId of
(SOTableObj tn (TOPerm rn pt)) -> do
liftTx $ dropPermFromCatalog tn rn pt
withPermType pt delPermFromCache rn tn
(SOTableObj qt (TORel rn)) -> do
liftTx $ delRelFromCatalog qt rn
delRelFromCache rn qt
(SOFunction qf) -> do
liftTx $ delFunctionFromCatalog qf
delFunctionFromCache qf
(SOTableObj qt (TOTrigger trn)) -> do
liftTx $ delEventTriggerFromCatalog trn
delEventTriggerFromCache qt trn
_ -> throw500 $
"unexpected dependent object : " <> reportSchemaObj schemaObjId
unTrackExistingTableOrViewP1
:: (CacheRM m, UserInfoM m, QErrM m) => UntrackTable -> m ()
unTrackExistingTableOrViewP1 (UntrackTable vn _) = do
@ -187,8 +143,8 @@ unTrackExistingTableOrViewP2 (UntrackTable qtn cascade) = do
-- Report bach with an error if cascade is not set
when (indirectDeps /= [] && not (or cascade)) $ reportDepsExt indirectDeps []
-- Purge all the dependants from state
mapM_ purgeDep indirectDeps
-- Purge all the dependents from state
mapM_ purgeDependentObject indirectDeps
-- delete the table and its direct dependencies
delTableAndDirectDeps qtn
@ -299,23 +255,9 @@ delTableAndDirectDeps qtn@(QualifiedObject sn tn) = do
DELETE FROM "hdb_catalog"."event_triggers"
WHERE schema_name = $1 AND table_name = $2
|] (sn, tn) False
delTableFromCatalog qtn
deleteTableFromCatalog qtn
delTableFromCache qtn
processSchemaChanges :: (MonadTx m, CacheRWM m) => SchemaDiff -> m Bool
processSchemaChanges schemaDiff = do
-- Purge the dropped tables
mapM_ delTableAndDirectDeps droppedTables
sc <- askSchemaCache
fmap or $ forM alteredTables $ \(oldQtn, tableDiff) -> do
ti <- case M.lookup oldQtn $ scTables sc of
Just ti -> return ti
Nothing -> throw500 $ "old table metadata not found in cache : " <>> oldQtn
processTableChanges ti tableDiff
where
SchemaDiff droppedTables alteredTables = schemaDiff
-- | Builds an initial @'TableCache' 'PGColumnInfo'@ from catalog information. Does not fill in
-- '_tiRolePermInfoMap' or '_tiEventTriggerInfoMap' at all, and '_tiFieldInfoMap' only contains
-- columns, not relationships; those pieces of information are filled in by later stages.
@ -404,340 +346,3 @@ processColumnInfoUsingCache :: (CacheRM m, QErrM m) => QualifiedTable -> PGRawCo
processColumnInfoUsingCache tableName rawInfo = do
tables <- scTables <$> askSchemaCache
processColumnInfo (M.mapMaybe _tiEnumValues tables) tableName rawInfo
withSchemaObject
:: (QErrM m, CacheRWM m)
=> (T.Text -> InconsistentMetadataObj)
-> m a
-> m (Maybe a)
withSchemaObject f action =
(Just <$> action) `catchError` \err -> do
sc <- askSchemaCache
let inconsObj = f $ qeError err
allInconsObjs = inconsObj:scInconsistentObjs sc
writeSchemaCache sc { scInconsistentObjs = allInconsObjs }
pure Nothing
withSchemaObject_ :: (QErrM m, CacheRWM m) => (T.Text -> InconsistentMetadataObj) -> m () -> m ()
withSchemaObject_ f = void . withSchemaObject f
checkNewInconsistentMeta
:: (QErrM m)
=> SchemaCache -- old schema cache
-> SchemaCache -- new schema cache
-> m ()
checkNewInconsistentMeta oldSC newSC =
unless (null newInconsMetaObjects) $ do
let err = err500 Unexpected
"cannot continue due to newly found inconsistent metadata"
throwError err{qeInternal = Just $ toJSON newInconsMetaObjects}
where
oldInconsMeta = scInconsistentObjs oldSC
newInconsMeta = scInconsistentObjs newSC
newInconsMetaObjects = getDifference _moId newInconsMeta oldInconsMeta
buildSchemaCacheStrict
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
=> m ()
buildSchemaCacheStrict = do
buildSchemaCache
sc <- askSchemaCache
let inconsObjs = scInconsistentObjs sc
unless (null inconsObjs) $ do
let err = err400 Unexpected "cannot continue due to inconsistent metadata"
throwError err{qeInternal = Just $ toJSON inconsObjs}
-- | Rebuilds the schema cache. If an object with the given object id became newly inconsistent,
-- raises an error about it specifically. Otherwise, raises a generic metadata inconsistency error.
buildSchemaCacheFor
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
=> MetadataObjId -> m ()
buildSchemaCacheFor objectId = do
oldSchemaCache <- askSchemaCache
buildSchemaCache
newSchemaCache <- askSchemaCache
let diffInconsistentObjects = getDifference _moId `on` scInconsistentObjs
newInconsistentObjects = newSchemaCache `diffInconsistentObjects` oldSchemaCache
for_ (find ((== objectId) . _moId) newInconsistentObjects) $ \matchingObject ->
throw400 ConstraintViolation (_moReason matchingObject)
unless (null newInconsistentObjects) $
throwError (err400 Unexpected "cannot continue due to new inconsistent metadata")
{ qeInternal = Just $ toJSON newInconsistentObjects }
buildSchemaCache
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
=> m ()
buildSchemaCache = buildSchemaCacheG True
buildSCWithoutSetup
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
=> m ()
buildSCWithoutSetup = buildSchemaCacheG False
buildSchemaCacheG
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
=> Bool -> m ()
buildSchemaCacheG withSetup = do
-- clean hdb_views
when withSetup $ liftTx $ Q.catchE defaultTxErrorHandler clearHdbViews
-- reset the current schemacache
writeSchemaCache emptySchemaCache
sqlGenCtx <- askSQLGenCtx
-- fetch all catalog metadata
CatalogMetadata tables relationships permissions
eventTriggers remoteSchemas functions fkeys' allowlistDefs
<- liftTx fetchCatalogData
let fkeys = HS.fromList fkeys'
-- tables
modTableCache =<< buildTableCache tables
-- relationships
forM_ relationships $ \(CatalogRelation qt rn rt rDef cmnt) -> do
let objId = MOTableObj qt $ MTORel rn rt
def = toJSON $ WithTable qt $ RelDef rn rDef cmnt
mkInconsObj = InconsistentMetadataObj objId (MOTRel rt) def
modifyErr (\e -> "table " <> qt <<> "; rel " <> rn <<> "; " <> e) $
withSchemaObject_ mkInconsObj $
case rt of
ObjRel -> do
using <- decodeValue rDef
let relDef = RelDef rn using Nothing
validateObjRel qt relDef
objRelP2Setup qt fkeys relDef
ArrRel -> do
using <- decodeValue rDef
let relDef = RelDef rn using Nothing
validateArrRel qt relDef
arrRelP2Setup qt fkeys relDef
-- permissions
forM_ permissions $ \(CatalogPermission qt rn pt pDef cmnt) -> do
let objId = MOTableObj qt $ MTOPerm rn pt
def = toJSON $ WithTable qt $ PermDef rn pDef cmnt
mkInconsObj = InconsistentMetadataObj objId (MOTPerm pt) def
modifyErr (\e -> "table " <> qt <<> "; role " <> rn <<> "; " <> e) $
withSchemaObject_ mkInconsObj $
case pt of
PTInsert -> permHelper withSetup sqlGenCtx qt rn pDef PAInsert
PTSelect -> permHelper withSetup sqlGenCtx qt rn pDef PASelect
PTUpdate -> permHelper withSetup sqlGenCtx qt rn pDef PAUpdate
PTDelete -> permHelper withSetup sqlGenCtx qt rn pDef PADelete
-- event triggers
forM_ eventTriggers $ \(CatalogEventTrigger qt trn configuration) -> do
let objId = MOTableObj qt $ MTOTrigger trn
def = object ["table" .= qt, "configuration" .= configuration]
mkInconsObj = InconsistentMetadataObj objId MOTEventTrigger def
withSchemaObject_ mkInconsObj $ do
etc <- decodeValue configuration
subTableP2Setup qt etc
allCols <- getCols . _tiFieldInfoMap <$> askTabInfo qt
when withSetup $ liftTx $
mkAllTriggersQ trn qt allCols (stringifyNum sqlGenCtx) (etcDefinition etc)
-- sql functions
forM_ functions $ \(CatalogFunction qf rawfiM) -> do
let def = toJSON $ TrackFunction qf
mkInconsObj =
InconsistentMetadataObj (MOFunction qf) MOTFunction def
modifyErr (\e -> "function " <> qf <<> "; " <> e) $
withSchemaObject_ mkInconsObj $ do
rawfi <- onNothing rawfiM $
throw400 NotExists $ "no such function exists in postgres : " <>> qf
trackFunctionP2Setup qf rawfi
-- allow list
replaceAllowlist $ concatMap _cdQueries allowlistDefs
-- build GraphQL context with tables and functions
GS.buildGCtxMapPG
-- remote schemas
forM_ remoteSchemas resolveSingleRemoteSchema
where
permHelper setup sqlGenCtx qt rn pDef pa = do
qCtx <- mkAdminQCtx sqlGenCtx <$> askSchemaCache
perm <- decodeValue pDef
let permDef = PermDef rn perm Nothing
createPerm = WithTable qt permDef
(permInfo, deps) <- liftP1WithQCtx qCtx $ createPermP1 createPerm
when setup $ addPermP2Setup qt permDef permInfo
addPermToCache qt rn pa permInfo deps
-- p2F qt rn p1Res
resolveSingleRemoteSchema rs = do
let AddRemoteSchemaQuery name _ _ = rs
mkInconsObj = InconsistentMetadataObj (MORemoteSchema name)
MOTRemoteSchema (toJSON rs)
withSchemaObject_ mkInconsObj $ do
rsCtx <- addRemoteSchemaP2Setup rs
sc <- askSchemaCache
let gCtxMap = scGCtxMap sc
defGCtx = scDefaultRemoteGCtx sc
rGCtx = convRemoteGCtx $ rscGCtx rsCtx
mergedGCtxMap <- mergeRemoteSchema gCtxMap rGCtx
mergedDefGCtx <- mergeGCtx defGCtx rGCtx
writeSchemaCache sc { scGCtxMap = mergedGCtxMap
, scDefaultRemoteGCtx = mergedDefGCtx
}
data RunSQL
= RunSQL
{ rSql :: T.Text
, rCascade :: !(Maybe Bool)
, rCheckMetadataConsistency :: !(Maybe Bool)
} deriving (Show, Eq, Lift)
$(deriveJSON (aesonDrop 1 snakeCase){omitNothingFields=True} ''RunSQL)
data RunSQLRes
= RunSQLRes
{ rrResultType :: !T.Text
, rrResult :: !Value
} deriving (Show, Eq)
$(deriveJSON (aesonDrop 2 snakeCase){omitNothingFields=True} ''RunSQLRes)
instance Q.FromRes RunSQLRes where
fromRes (Q.ResultOkEmpty _) =
return $ RunSQLRes "CommandOk" Null
fromRes (Q.ResultOkData res) = do
csvRows <- resToCSV res
return $ RunSQLRes "TuplesOk" $ toJSON csvRows
execRawSQL :: (MonadTx m) => T.Text -> m EncJSON
execRawSQL =
fmap (encJFromJValue @RunSQLRes) .
liftTx . Q.multiQE rawSqlErrHandler . Q.fromText
where
rawSqlErrHandler txe =
let e = err400 PostgresError "query execution failed"
in e {qeInternal = Just $ toJSON txe}
execWithMDCheck
:: (QErrM m, CacheRWM m, MonadTx m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
=> RunSQL -> m EncJSON
execWithMDCheck (RunSQL t cascade _) = do
-- Drop hdb_views so no interference is caused to the sql query
liftTx $ Q.catchE defaultTxErrorHandler clearHdbViews
-- Get the metadata before the sql query, everything, need to filter this
oldMetaU <- liftTx $ Q.catchE defaultTxErrorHandler fetchTableMeta
oldFuncMetaU <-
liftTx $ Q.catchE defaultTxErrorHandler fetchFunctionMeta
-- Run the SQL
res <- execRawSQL t
-- Get the metadata after the sql query
newMeta <- liftTx $ Q.catchE defaultTxErrorHandler fetchTableMeta
newFuncMeta <- liftTx $ Q.catchE defaultTxErrorHandler fetchFunctionMeta
sc <- askSchemaCache
let existingTables = M.keys $ scTables sc
oldMeta = flip filter oldMetaU $ \tm -> tmTable tm `elem` existingTables
schemaDiff = getSchemaDiff oldMeta newMeta
existingFuncs = M.keys $ scFunctions sc
oldFuncMeta = flip filter oldFuncMetaU $ \fm -> funcFromMeta fm `elem` existingFuncs
FunctionDiff droppedFuncs alteredFuncs = getFuncDiff oldFuncMeta newFuncMeta
overloadedFuncs = getOverloadedFuncs existingFuncs newFuncMeta
-- Do not allow overloading functions
unless (null overloadedFuncs) $
throw400 NotSupported $ "the following tracked function(s) cannot be overloaded: "
<> reportFuncs overloadedFuncs
indirectDeps <- getSchemaChangeDeps schemaDiff
-- Report back with an error if cascade is not set
when (indirectDeps /= [] && not (or cascade)) $ reportDepsExt indirectDeps []
-- Purge all the indirect dependents from state
mapM_ purgeDep indirectDeps
-- Purge all dropped functions
let purgedFuncs = flip mapMaybe indirectDeps $ \dep ->
case dep of
SOFunction qf -> Just qf
_ -> Nothing
forM_ (droppedFuncs \\ purgedFuncs) $ \qf -> do
liftTx $ delFunctionFromCatalog qf
delFunctionFromCache qf
-- Process altered functions
forM_ alteredFuncs $ \(qf, newTy) ->
when (newTy == FTVOLATILE) $
throw400 NotSupported $
"type of function " <> qf <<> " is altered to \"VOLATILE\" which is not supported now"
-- update the schema cache and hdb_catalog with the changes
reloadRequired <- processSchemaChanges schemaDiff
let withReload = do -- in case of any rename
buildSchemaCache
newSC <- askSchemaCache
checkNewInconsistentMeta sc newSC
withoutReload = do
postSc <- askSchemaCache
-- recreate the insert permission infra
forM_ (M.elems $ scTables postSc) $ \ti -> do
let tn = _tiName ti
forM_ (M.elems $ _tiRolePermInfoMap ti) $ \rpi ->
maybe (return ()) (liftTx . buildInsInfra tn) $ _permIns rpi
strfyNum <- stringifyNum <$> askSQLGenCtx
--recreate triggers
forM_ (M.elems $ scTables postSc) $ \ti -> do
let tn = _tiName ti
cols = getCols $ _tiFieldInfoMap ti
forM_ (M.toList $ _tiEventTriggerInfoMap ti) $ \(trn, eti) -> do
let fullspec = etiOpsDef eti
liftTx $ mkAllTriggersQ trn tn cols strfyNum fullspec
bool withoutReload withReload reloadRequired
return res
where
reportFuncs = T.intercalate ", " . map dquoteTxt
isAltrDropReplace :: QErrM m => T.Text -> m Bool
isAltrDropReplace = either throwErr return . matchRegex regex False
where
throwErr s = throw500 $ "compiling regex failed: " <> T.pack s
regex = "alter|drop|replace|create function"
runRunSQL
:: (QErrM m, UserInfoM m, CacheRWM m, MonadTx m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
=> RunSQL -> m EncJSON
runRunSQL q@(RunSQL t _ mChkMDCnstcy) = do
adminOnly
isMDChkNeeded <- maybe (isAltrDropReplace t) return mChkMDCnstcy
bool (execRawSQL t) (execWithMDCheck q) isMDChkNeeded
-- Should be used only after checking the status
resToCSV :: PQ.Result -> ExceptT T.Text IO [[T.Text]]
resToCSV r = do
nr <- liftIO $ PQ.ntuples r
nc <- liftIO $ PQ.nfields r
hdr <- forM [0..pred nc] $ \ic -> do
colNameBS <- liftIO $ PQ.fname r ic
maybe (return "unknown") decodeBS colNameBS
rows <- forM [0..pred nr] $ \ir ->
forM [0..pred nc] $ \ic -> do
cellValBS <- liftIO $ PQ.getvalue r ir ic
maybe (return "NULL") decodeBS cellValBS
return $ hdr:rows
where
decodeBS = either (throwError . T.pack . show) return . TE.decodeUtf8'

View File

@ -1,9 +1,7 @@
-- | This module provides 'fetchCatalogData', which loads the entire catalog in one go from the
-- database, consulting tables such as @hdb_catalog.hdb_table@. It is used by
-- 'Hasura.RQL.Schema.Table.buildSchemaCache' to seed or reload the schema cache.
-- | Types that represent the raw data stored in the catalog. See also: the module documentation for
-- "Hasura.RQL.DDL.Schema".
module Hasura.RQL.Types.Catalog
( fetchCatalogData
, CatalogMetadata(..)
( CatalogMetadata(..)
, CatalogTable(..)
, CatalogTableInfo(..)
@ -16,13 +14,10 @@ module Hasura.RQL.Types.Catalog
import Hasura.Prelude
import qualified Database.PG.Query as Q
import Data.Aeson
import Data.Aeson.Casing
import Data.Aeson.TH
import Hasura.Db
import Hasura.RQL.DDL.Schema.Function
import Hasura.RQL.Types.Column
import Hasura.RQL.Types.Common
@ -98,9 +93,3 @@ data CatalogMetadata
, _cmAllowlistCollections :: ![CollectionDef]
} deriving (Show, Eq)
$(deriveJSON (aesonDrop 3 snakeCase) ''CatalogMetadata)
-- | See "Hasura.RQL.Types.Catalog".
fetchCatalogData :: (MonadTx m) => m CatalogMetadata
fetchCatalogData =
liftTx $ Q.getAltJ . runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler
$(Q.sqlFromFile "src-rsr/catalog_metadata.sql") () True

View File

@ -447,16 +447,12 @@ modDepMapInCache f = do
writeSchemaCache $ sc { scDepMap = f (scDepMap sc)}
class (Monad m) => CacheRM m where
-- Get the schema cache
askSchemaCache :: m SchemaCache
instance (Monad m) => CacheRM (StateT SchemaCache m) where
askSchemaCache = get
class (CacheRM m) => CacheRWM m where
-- Get the schema cache
writeSchemaCache :: SchemaCache -> m ()
instance (Monad m) => CacheRWM (StateT SchemaCache m) where

View File

@ -45,7 +45,7 @@ import qualified Hasura.Server.PGDump as PGD
import Hasura.EncJSON
import Hasura.Prelude hiding (get, put)
import Hasura.RQL.DDL.Schema.Table
import Hasura.RQL.DDL.Schema
import Hasura.RQL.Types
import Hasura.Server.Auth (AuthMode (..),
getUserInfo)

View File

@ -16,8 +16,7 @@ import Hasura.RQL.DDL.QueryCollection
import Hasura.RQL.DDL.Relationship
import Hasura.RQL.DDL.Relationship.Rename
import Hasura.RQL.DDL.RemoteSchema
import Hasura.RQL.DDL.Schema.Function
import Hasura.RQL.DDL.Schema.Table
import Hasura.RQL.DDL.Schema
import Hasura.RQL.DML.Count
import Hasura.RQL.DML.Delete
import Hasura.RQL.DML.Insert

View File

@ -5,10 +5,10 @@ where
import Hasura.Prelude
import Hasura.Logging
import Hasura.RQL.DDL.Schema.Table (buildSCWithoutSetup)
import Hasura.RQL.DDL.Schema (buildSchemaCacheWithoutSetup)
import Hasura.RQL.Types
import Hasura.Server.App (SchemaCacheRef (..), withSCUpdate)
import Hasura.Server.Init (InstanceId (..))
import Hasura.Server.App (SchemaCacheRef (..), withSCUpdate)
import Hasura.Server.Init (InstanceId (..))
import Hasura.Server.Logging
import Hasura.Server.Query
@ -16,13 +16,13 @@ import Data.Aeson
import Data.Aeson.Casing
import Data.Aeson.TH
import qualified Control.Concurrent as C
import qualified Control.Concurrent.STM as STM
import qualified Data.Text as T
import qualified Data.Time as UTC
import qualified Database.PG.Query as PG
import qualified Database.PostgreSQL.LibPQ as PQ
import qualified Network.HTTP.Client as HTTP
import qualified Control.Concurrent as C
import qualified Control.Concurrent.STM as STM
import qualified Data.Text as T
import qualified Data.Time as UTC
import qualified Database.PG.Query as PG
import qualified Database.PostgreSQL.LibPQ as PQ
import qualified Network.HTTP.Client as HTTP
pgChannel :: PG.PGChannel
pgChannel = "hasura_schema_update"
@ -204,7 +204,7 @@ refreshSchemaCache sqlGenCtx pool logger httpManager cacheRef threadType msg = d
-- Reload schema cache from catalog
resE <- liftIO $ runExceptT $ withSCUpdate cacheRef logger $
peelRun emptySchemaCache adminUserInfo
httpManager sqlGenCtx (PGExecCtx pool PG.Serializable) buildSCWithoutSetup
httpManager sqlGenCtx (PGExecCtx pool PG.Serializable) buildSchemaCacheWithoutSetup
case resE of
Left e -> logError logger threadType $ TEQueryError e
Right _ ->