server: mssql: apply schema changes by mssql_run_sql DDL on metadata (fix #779)

Co-authored-by: Antoine Leblanc <1618949+nicuveo@users.noreply.github.com>
GitOrigin-RevId: 6905d5914c8a698445c0ef03d6a8303747701e1c
This commit is contained in:
Rakesh Emmadi 2021-05-27 20:36:13 +05:30 committed by hasura-bot
parent 8ebf5eef2b
commit e43d0273e0
25 changed files with 908 additions and 600 deletions

View File

@ -19,6 +19,7 @@
### Bug fixes and improvements
- server: detect and apply metadata changes by `mssql_run_sql` API if required
- server: fix bug with creation of new cron events when cron trigger is imported via metadata
- server: log warning for deprecated environment variables.
- server: initialise `hdb_catalog` tables only when required, and only run the event loop for sources where it is required

View File

@ -490,9 +490,9 @@ library
, Hasura.RQL.DDL.Schema.Cache.Fields
, Hasura.RQL.DDL.Schema.Cache.Permission
, Hasura.RQL.DDL.Schema.Catalog
, Hasura.RQL.DDL.Schema.Diff
, Hasura.RQL.DDL.Schema.LegacyCatalog
, Hasura.RQL.DDL.Schema.Common
, Hasura.RQL.DDL.Schema.Diff
, Hasura.RQL.DDL.Schema.Enum
, Hasura.RQL.DDL.Schema.Function
, Hasura.RQL.DDL.Schema.Rename

View File

@ -2,14 +2,16 @@ module Data.List.Extended
( duplicates
, uniques
, getDifference
, getDifferenceOn
, getOverlapWith
, module L
) where
import Data.Hashable (Hashable)
import Data.Function (on)
import Data.Hashable (Hashable)
import Prelude
import qualified Data.HashMap.Strict as Map
import qualified Data.HashMap.Strict.Extended as Map
import qualified Data.HashSet as Set
import qualified Data.List as L
import qualified Data.List.NonEmpty as NE
@ -23,3 +25,12 @@ uniques = map NE.head . NE.group
getDifference :: (Eq a, Hashable a) => [a] -> [a] -> Set.HashSet a
getDifference = Set.difference `on` Set.fromList
getDifferenceOn :: (Eq k, Hashable k) => (v -> k) -> [v] -> [v] -> [v]
getDifferenceOn f l = Map.elems . Map.differenceOn f l
getOverlapWith :: (Eq k, Hashable k) => (v -> k) -> [v] -> [v] -> [(v, v)]
getOverlapWith getKey left right =
Map.elems $ Map.intersectionWith (,) (mkMap left) (mkMap right)
where
mkMap = Map.fromList . map (\v -> (getKey v, v))

View File

@ -150,7 +150,7 @@ odbcExceptionToJSONValue =
runJSONPathQuery
:: (MonadError QErr m, MonadIO m)
=> MSSQLPool -> ODBC.Query -> m Text
runJSONPathQuery pool query = do
runJSONPathQuery pool query =
mconcat <$> withMSSQLPool pool (`ODBC.query` query)
withMSSQLPool

View File

@ -1,23 +1,32 @@
{-# LANGUAGE ViewPatterns #-}
module Hasura.Backends.MSSQL.DDL.RunSQL
( runSQL
, MSSQLRunSQL
, MSSQLRunSQL(..)
, sqlContainsDDLKeyword
)
where
import Hasura.Prelude
import qualified Data.Aeson as J
import qualified Data.HashMap.Strict as M
import qualified Data.HashSet as HS
import qualified Data.Text as T
import qualified Database.ODBC.Internal as ODBC
import qualified Text.Regex.TDFA as TDFA
import Data.Aeson.TH
import Data.String (fromString)
import Hasura.Backends.MSSQL.Connection
import Hasura.Backends.MSSQL.Meta
import Hasura.Base.Error
import Hasura.EncJSON
import Hasura.RQL.DDL.Schema (RunSQLRes (..))
import Hasura.RQL.Types
import Hasura.RQL.DDL.Schema
import Hasura.RQL.DDL.Schema.Diff
import Hasura.RQL.Types hiding (TableName, tmTable)
import Hasura.Server.Utils (quoteRegex)
odbcValueToJValue :: ODBC.Value -> J.Value
@ -44,11 +53,42 @@ $(deriveJSON hasuraJSON ''MSSQLRunSQL)
runSQL
:: (MonadIO m, CacheRWM m, MonadError QErr m, MetadataM m)
=> MSSQLRunSQL -> m EncJSON
=> MSSQLRunSQL
-> m EncJSON
runSQL (MSSQLRunSQL sqlText source) = do
pool <- _mscConnectionPool <$> askSourceConfig @'MSSQL source
results <- withMSSQLPool pool $ \conn -> ODBC.query conn $ fromString $ T.unpack sqlText
SourceInfo _ tableCache _ sourceConfig <- askSourceInfo @'MSSQL source
let pool = _mscConnectionPool sourceConfig
results <- if sqlContainsDDLKeyword sqlText then withMetadataCheck tableCache pool else runSQLQuery pool
pure $ encJFromJValue $ toResult results
where
runSQLQuery pool = withMSSQLPool pool $ \conn ->
ODBC.query conn $ fromString $ T.unpack sqlText
toTableMeta dbTablesMeta = M.toList dbTablesMeta <&> \(table, dbTableMeta) ->
TableMeta table dbTableMeta [] -- No computed fields
withMetadataCheck tableCache pool = do
-- If the SQL modifies the schema of the database then check for any metadata changes
preActionTablesMeta <- toTableMeta <$> loadDBMetadata pool
results <- runSQLQuery pool
postActionTablesMeta <- toTableMeta <$> loadDBMetadata pool
let trackedTablesMeta = filter (flip M.member tableCache . tmTable) preActionTablesMeta
schemaDiff = getSchemaDiff trackedTablesMeta postActionTablesMeta
metadataUpdater <- execWriterT $ processSchemaDiff source tableCache schemaDiff
-- Build schema cache with updated metadata
withNewInconsistentObjsCheck $
buildSchemaCacheWithInvalidations mempty{ciSources = HS.singleton source} metadataUpdater
pure results
sqlContainsDDLKeyword :: Text -> Bool
sqlContainsDDLKeyword = TDFA.match $$(quoteRegex
TDFA.defaultCompOpt
{ TDFA.caseSensitive = False
, TDFA.multiline = True
, TDFA.lastStarGreedy = True }
TDFA.defaultExecOpt
{ TDFA.captureGroups = False }
"\\balter\\b|\\bdrop\\b|\\bsp_rename\\b")
toResult :: [[(ODBC.Column, ODBC.Value)]] -> RunSQLRes
toResult result = case result of

View File

@ -25,7 +25,6 @@ import Hasura.RQL.Types.Common (OID (..))
import Hasura.RQL.Types.Table
import Hasura.SQL.Backend
--------------------------------------------------------------------------------
-- Loader
@ -39,7 +38,6 @@ loadDBMetadata pool = do
Left e -> throw500 $ T.pack $ "error loading sql server database schema: " <> e
Right sysTables -> pure $ HM.fromList $ map transformTable sysTables
--------------------------------------------------------------------------------
-- Local types

View File

@ -1,62 +1,110 @@
module Hasura.Backends.Postgres.DDL.RunSQL
(withMetadataCheck) where
( runRunSQL
, RunSQL(..)
, isSchemaCacheBuildRequiredRunSQL
) where
import Hasura.Prelude
import qualified Data.HashMap.Strict as M
import qualified Data.HashMap.Strict.InsOrd as OMap
import qualified Data.HashSet as HS
import qualified Data.List.NonEmpty as NE
import qualified Database.PG.Query as Q
import qualified Text.Regex.TDFA as TDFA
import Control.Lens ((.~))
import Control.Monad.Trans.Control (MonadBaseControl)
import Data.Aeson.TH
import Data.List.Extended (duplicates)
import Data.Aeson
import Data.Text.Extended
import qualified Hasura.SQL.AnyBackend as AB
import Hasura.Backends.Postgres.DDL.Source (ToMetadataFetchQuery, fetchTableMetadata)
import Hasura.Backends.Postgres.DDL.Source (ToMetadataFetchQuery, fetchFunctionMetadata,
fetchTableMetadata)
import Hasura.Backends.Postgres.DDL.Table
import Hasura.Backends.Postgres.SQL.Types hiding (TableName)
import Hasura.Base.Error
import Hasura.EncJSON
import Hasura.RQL.DDL.Deps (reportDepsExt)
import Hasura.RQL.DDL.Schema
import Hasura.RQL.DDL.Schema.Common
import Hasura.RQL.DDL.Schema.Function
import Hasura.RQL.DDL.Schema.Rename
import Hasura.RQL.DDL.Schema.Table
import Hasura.RQL.DDL.Schema.Diff
import Hasura.RQL.Types hiding (ConstraintName, fmFunction,
tmComputedFields, tmTable)
import Hasura.Server.Utils (quoteRegex)
data FunctionMeta
= FunctionMeta
{ fmOid :: !OID
, fmFunction :: !QualifiedFunction
, fmType :: !FunctionVolatility
data RunSQL
= RunSQL
{ rSql :: Text
, rSource :: !SourceName
, rCascade :: !Bool
, rCheckMetadataConsistency :: !(Maybe Bool)
, rTxAccessMode :: !Q.TxAccess
} deriving (Show, Eq)
$(deriveJSON hasuraJSON ''FunctionMeta)
data ComputedFieldMeta
= ComputedFieldMeta
{ ccmName :: !ComputedFieldName
, ccmFunctionMeta :: !FunctionMeta
} deriving (Show, Eq)
$(deriveJSON hasuraJSON{omitNothingFields=True} ''ComputedFieldMeta)
instance FromJSON RunSQL where
parseJSON = withObject "RunSQL" $ \o -> do
rSql <- o .: "sql"
rSource <- o .:? "source" .!= defaultSource
rCascade <- o .:? "cascade" .!= False
rCheckMetadataConsistency <- o .:? "check_metadata_consistency"
isReadOnly <- o .:? "read_only" .!= False
let rTxAccessMode = if isReadOnly then Q.ReadOnly else Q.ReadWrite
pure RunSQL{..}
data TableMeta (b :: BackendType)
= TableMeta
{ tmTable :: !QualifiedTable
, tmInfo :: !(DBTableMetadata b)
, tmComputedFields :: ![ComputedFieldMeta]
} deriving (Show, Eq)
instance ToJSON RunSQL where
toJSON RunSQL {..} =
object
[ "sql" .= rSql
, "source" .= rSource
, "cascade" .= rCascade
, "check_metadata_consistency" .= rCheckMetadataConsistency
, "read_only" .=
case rTxAccessMode of
Q.ReadOnly -> True
Q.ReadWrite -> False
]
-- | see Note [Checking metadata consistency in run_sql]
isSchemaCacheBuildRequiredRunSQL :: RunSQL -> Bool
isSchemaCacheBuildRequiredRunSQL RunSQL {..} =
case rTxAccessMode of
Q.ReadOnly -> False
Q.ReadWrite -> fromMaybe (containsDDLKeyword rSql) rCheckMetadataConsistency
where
containsDDLKeyword = TDFA.match $$(quoteRegex
TDFA.defaultCompOpt
{ TDFA.caseSensitive = False
, TDFA.multiline = True
, TDFA.lastStarGreedy = True }
TDFA.defaultExecOpt
{ TDFA.captureGroups = False }
"\\balter\\b|\\bdrop\\b|\\breplace\\b|\\bcreate function\\b|\\bcomment on\\b")
{- Note [Checking metadata consistency in run_sql]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SQL queries executed by run_sql may change the Postgres schema in arbitrary
ways. We attempt to automatically update the metadata to reflect those changes
as much as possible---for example, if a table is renamed, we want to update the
metadata to track the table under its new name instead of its old one. This
schema diffing (plus some integrity checking) is handled by withMetadataCheck.
But this process has overhead---it involves reloading the metadata, diffing it,
and rebuilding the schema cache---so we dont want to do it if it isnt
necessary. The user can explicitly disable the check via the
check_metadata_consistency option, and we also skip it if the current
transaction is in READ ONLY mode, since the schema cant be modified in that
case, anyway.
However, even if neither read_only or check_metadata_consistency is passed, lots
of queries may not modify the schema at all. As a (fairly stupid) heuristic, we
check if the query contains any keywords for DDL operations, and if not, we skip
the metadata check as well. -}
fetchMeta
:: (ToMetadataFetchQuery pgKind, BackendMetadata ('Postgres pgKind), MonadTx m)
=> TableCache ('Postgres pgKind)
-> FunctionCache ('Postgres pgKind)
-> m ([TableMeta ('Postgres pgKind)], [FunctionMeta])
-> m ([TableMeta ('Postgres pgKind)], [FunctionMeta ('Postgres pgKind)])
fetchMeta tables functions = do
tableMetaInfos <- fetchTableMetadata
functionMetaInfos <- fetchFunctionMetadata
@ -81,206 +129,41 @@ fetchMeta tables functions = do
pure (tableMetas, functionMetas)
getOverlap :: (Eq k, Hashable k) => (v -> k) -> [v] -> [v] -> [(v, v)]
getOverlap getKey left right =
M.elems $ M.intersectionWith (,) (mkMap left) (mkMap right)
runRunSQL
:: forall (pgKind :: PostgresKind) m
. ( BackendMetadata ('Postgres pgKind)
, ToMetadataFetchQuery pgKind
, CacheRWM m
, HasServerConfigCtx m
, MetadataM m
, MonadBaseControl IO m
, MonadError QErr m
, MonadIO m
)
=> RunSQL
-> m EncJSON
runRunSQL q@RunSQL {..}
-- see Note [Checking metadata consistency in run_sql]
| isSchemaCacheBuildRequiredRunSQL q
= withMetadataCheck @pgKind rSource rCascade rTxAccessMode $ execRawSQL rSql
| otherwise
= askSourceConfig @('Postgres pgKind) rSource >>= \sourceConfig ->
liftEitherM $ runExceptT $
runLazyTx (_pscExecCtx sourceConfig) rTxAccessMode $ execRawSQL rSql
where
mkMap = M.fromList . map (\v -> (getKey v, v))
getDifference :: (Eq k, Hashable k) => (v -> k) -> [v] -> [v] -> [v]
getDifference getKey left right =
M.elems $ M.difference (mkMap left) (mkMap right)
execRawSQL :: (MonadTx n) => Text -> n EncJSON
execRawSQL =
fmap (encJFromJValue @RunSQLRes) . liftTx . Q.multiQE rawSqlErrHandler . Q.fromText
where
mkMap = M.fromList . map (\v -> (getKey v, v))
data ComputedFieldDiff
= ComputedFieldDiff
{ _cfdDropped :: [ComputedFieldName]
, _cfdAltered :: [(ComputedFieldMeta, ComputedFieldMeta)]
, _cfdOverloaded :: [(ComputedFieldName, QualifiedFunction)]
} deriving (Show, Eq)
data TableDiff (b :: BackendType)
= TableDiff
{ _tdNewName :: !(Maybe QualifiedTable)
, _tdDroppedCols :: ![Column b]
, _tdAddedCols :: ![RawColumnInfo b]
, _tdAlteredCols :: ![(RawColumnInfo b, RawColumnInfo b)]
, _tdDroppedFKeyCons :: ![ConstraintName]
, _tdComputedFields :: !ComputedFieldDiff
-- The final list of uniq/primary constraint names
-- used for generating types on_conflict clauses
-- TODO: this ideally should't be part of TableDiff
, _tdUniqOrPriCons :: ![ConstraintName]
, _tdNewDescription :: !(Maybe PGDescription)
}
getTableDiff
:: (Backend ('Postgres pgKind), BackendMetadata ('Postgres pgKind))
=> TableMeta ('Postgres pgKind)
-> TableMeta ('Postgres pgKind)
-> TableDiff ('Postgres pgKind)
getTableDiff oldtm newtm =
TableDiff mNewName droppedCols addedCols alteredCols
droppedFKeyConstraints computedFieldDiff uniqueOrPrimaryCons mNewDesc
where
mNewName = bool (Just $ tmTable newtm) Nothing $ tmTable oldtm == tmTable newtm
oldCols = _ptmiColumns $ tmInfo oldtm
newCols = _ptmiColumns $ tmInfo newtm
uniqueOrPrimaryCons = map _cName $
maybeToList (_pkConstraint <$> _ptmiPrimaryKey (tmInfo newtm))
<> toList (_ptmiUniqueConstraints $ tmInfo newtm)
mNewDesc = _ptmiDescription $ tmInfo newtm
droppedCols = map prciName $ getDifference prciPosition oldCols newCols
addedCols = getDifference prciPosition newCols oldCols
existingCols = getOverlap prciPosition oldCols newCols
alteredCols = filter (uncurry (/=)) existingCols
-- foreign keys are considered dropped only if their oid
-- and (ref-table, column mapping) are changed
droppedFKeyConstraints = map (_cName . _fkConstraint) $ HS.toList $
droppedFKeysWithOid `HS.intersection` droppedFKeysWithUniq
tmForeignKeys = fmap unForeignKeyMetadata . toList . _ptmiForeignKeys . tmInfo
droppedFKeysWithOid = HS.fromList $
(getDifference (_cOid . _fkConstraint) `on` tmForeignKeys) oldtm newtm
droppedFKeysWithUniq = HS.fromList $
(getDifference mkFKeyUniqId `on` tmForeignKeys) oldtm newtm
mkFKeyUniqId (ForeignKey _ reftn colMap) = (reftn, colMap)
-- calculate computed field diff
oldComputedFieldMeta = tmComputedFields oldtm
newComputedFieldMeta = tmComputedFields newtm
droppedComputedFields = map ccmName $
getDifference (fmOid . ccmFunctionMeta) oldComputedFieldMeta newComputedFieldMeta
alteredComputedFields =
getOverlap (fmOid . ccmFunctionMeta) oldComputedFieldMeta newComputedFieldMeta
overloadedComputedFieldFunctions =
let getFunction = fmFunction . ccmFunctionMeta
getSecondElement (_ NE.:| list) = listToMaybe list
in mapMaybe (fmap ((&&&) ccmName getFunction) . getSecondElement) $
flip NE.groupBy newComputedFieldMeta $ \l r ->
ccmName l == ccmName r && getFunction l == getFunction r
computedFieldDiff = ComputedFieldDiff droppedComputedFields alteredComputedFields
overloadedComputedFieldFunctions
getTableChangeDeps
:: forall pgKind m. (Backend ('Postgres pgKind), QErrM m, CacheRM m)
=> SourceName -> QualifiedTable -> TableDiff ('Postgres pgKind) -> m [SchemaObjId]
getTableChangeDeps source tn tableDiff = do
sc <- askSchemaCache
-- for all the dropped columns
droppedColDeps <- fmap concat $ forM droppedCols $ \droppedCol -> do
let objId = SOSourceObj source
$ AB.mkAnyBackend
$ SOITableObj @('Postgres pgKind) tn
$ TOCol @('Postgres pgKind) droppedCol
return $ getDependentObjs sc objId
-- for all dropped constraints
droppedConsDeps <- fmap concat $ forM droppedFKeyConstraints $ \droppedCons -> do
let objId = SOSourceObj source
$ AB.mkAnyBackend
$ SOITableObj @('Postgres pgKind) tn
$ TOForeignKey @('Postgres pgKind) droppedCons
return $ getDependentObjs sc objId
return $ droppedConsDeps <> droppedColDeps <> droppedComputedFieldDeps
where
TableDiff _ droppedCols _ _ droppedFKeyConstraints computedFieldDiff _ _ = tableDiff
droppedComputedFieldDeps =
map
(SOSourceObj source
. AB.mkAnyBackend
. SOITableObj @('Postgres pgKind) tn
. TOComputedField)
$ _cfdDropped computedFieldDiff
data SchemaDiff (b :: BackendType)
= SchemaDiff
{ _sdDroppedTables :: ![QualifiedTable]
, _sdAlteredTables :: ![(QualifiedTable, TableDiff b)]
}
getSchemaDiff
:: BackendMetadata ('Postgres pgKind)
=> [TableMeta ('Postgres pgKind)]
-> [TableMeta ('Postgres pgKind)]
-> SchemaDiff ('Postgres pgKind)
getSchemaDiff oldMeta newMeta =
SchemaDiff droppedTables survivingTables
where
droppedTables = map tmTable $ getDifference (_ptmiOid . tmInfo) oldMeta newMeta
survivingTables =
flip map (getOverlap (_ptmiOid . tmInfo) oldMeta newMeta) $ \(oldtm, newtm) ->
(tmTable oldtm, getTableDiff oldtm newtm)
getSchemaChangeDeps
:: forall pgKind m. (Backend ('Postgres pgKind), QErrM m, CacheRM m)
=> SourceName -> SchemaDiff ('Postgres pgKind) -> m [SourceObjId ('Postgres pgKind)]
getSchemaChangeDeps source schemaDiff = do
-- Get schema cache
sc <- askSchemaCache
let tableIds =
map
(SOSourceObj source . AB.mkAnyBackend . SOITable @('Postgres pgKind))
droppedTables
-- Get the dependent of the dropped tables
let tableDropDeps = concatMap (getDependentObjs sc) tableIds
tableModDeps <- concat <$> traverse (uncurry (getTableChangeDeps source)) alteredTables
-- return $ filter (not . isDirectDep) $
return $ mapMaybe getIndirectDep $
HS.toList $ HS.fromList $ tableDropDeps <> tableModDeps
where
SchemaDiff droppedTables alteredTables = schemaDiff
getIndirectDep :: SchemaObjId -> Maybe (SourceObjId ('Postgres pgKind))
getIndirectDep (SOSourceObj s exists) =
AB.unpackAnyBackend exists >>= \case
srcObjId@(SOITableObj tn _) ->
-- Indirect dependancy shouldn't be of same source and not among dropped tables
if not (s == source && tn `HS.member` HS.fromList droppedTables)
then Just srcObjId
else Nothing
srcObjId -> Just srcObjId
getIndirectDep _ = Nothing
data FunctionDiff
= FunctionDiff
{ fdDropped :: ![QualifiedFunction]
, fdAltered :: ![(QualifiedFunction, FunctionVolatility)]
} deriving (Show, Eq)
getFuncDiff :: [FunctionMeta] -> [FunctionMeta] -> FunctionDiff
getFuncDiff oldMeta newMeta =
FunctionDiff droppedFuncs alteredFuncs
where
droppedFuncs = map fmFunction $ getDifference fmOid oldMeta newMeta
alteredFuncs = mapMaybe mkAltered $ getOverlap fmOid oldMeta newMeta
mkAltered (oldfm, newfm) =
let isTypeAltered = fmType oldfm /= fmType newfm
alteredFunc = (fmFunction oldfm, fmType newfm)
in bool Nothing (Just alteredFunc) $ isTypeAltered
getOverloadedFuncs
:: [QualifiedFunction] -> [FunctionMeta] -> [QualifiedFunction]
getOverloadedFuncs trackedFuncs newFuncMeta =
toList $ duplicates $ map fmFunction trackedMeta
where
trackedMeta = flip filter newFuncMeta $ \fm ->
fmFunction fm `elem` trackedFuncs
rawSqlErrHandler txe =
(err400 PostgresError "query execution failed") { qeInternal = Just $ toJSON txe }
-- | @'withMetadataCheck' cascade action@ runs @action@ and checks if the schema changed as a
-- result. If it did, it checks to ensure the changes do not violate any integrity constraints, and
-- if not, incorporates them into the schema cache.
withMetadataCheck
:: forall (pgKind :: PostgresKind) a m
. ( Backend ('Postgres pgKind)
, BackendMetadata ('Postgres pgKind)
. ( BackendMetadata ('Postgres pgKind)
, ToMetadataFetchQuery pgKind
, CacheRWM m
, HasServerConfigCtx m
@ -345,7 +228,7 @@ withMetadataCheck source cascade txAccess action = do
"type of function " <> qf <<> " is altered to \"VOLATILE\" which is not supported now"
-- update the metadata with the changes
processSchemaChanges preActionTables schemaDiff
processSchemaDiff source preActionTables schemaDiff
pure (actionResult, metadataUpdater)
@ -367,97 +250,3 @@ withMetadataCheck source cascade txAccess action = do
flip runReaderT serverConfigCtx $ mkAllTriggersQ triggerName table columns opsDefinition
pure actionResult
where
processSchemaChanges
:: ( MonadError QErr m'
, CacheRM m'
, MonadWriter MetadataModifier m'
)
=> TableCache ('Postgres pgKind) -> SchemaDiff ('Postgres pgKind) -> m' ()
processSchemaChanges preActionTables schemaDiff = do
-- Purge the dropped tables
forM_ droppedTables $
\tn -> tell $ MetadataModifier $ metaSources.ix source.(toSourceMetadata @('Postgres pgKind)).smTables %~ OMap.delete tn
for_ alteredTables $ \(oldQtn, tableDiff) -> do
ti <- onNothing
(M.lookup oldQtn preActionTables)
(throw500 $ "old table metadata not found in cache : " <>> oldQtn)
processTableChanges source (_tiCoreInfo ti) tableDiff
where
SchemaDiff droppedTables alteredTables = schemaDiff
processTableChanges
:: forall pgKind m
. ( Backend ('Postgres pgKind)
, BackendMetadata ('Postgres pgKind)
, MonadError QErr m
, CacheRM m
, MonadWriter MetadataModifier m
)
=> SourceName -> TableCoreInfo ('Postgres pgKind) -> TableDiff ('Postgres pgKind) -> m ()
processTableChanges source ti tableDiff = do
-- If table rename occurs then don't replace constraints and
-- process dropped/added columns, because schema reload happens eventually
sc <- askSchemaCache
let tn = _tciName ti
withOldTabName = do
procAlteredCols sc tn
withNewTabName newTN = do
let tnGQL = snakeCaseQualifiedObject newTN
-- check for GraphQL schema conflicts on new name
checkConflictingNode sc tnGQL
procAlteredCols sc tn
-- update new table in metadata
renameTableInMetadata @('Postgres pgKind) source newTN tn
-- Process computed field diff
processComputedFieldDiff tn
-- Drop custom column names for dropped columns
possiblyDropCustomColumnNames tn
maybe withOldTabName withNewTabName mNewName
where
TableDiff mNewName droppedCols _ alteredCols _ computedFieldDiff _ _ = tableDiff
possiblyDropCustomColumnNames tn = do
let TableConfig customFields customColumnNames customName = _tciCustomConfig ti
modifiedCustomColumnNames = foldl' (flip M.delete) customColumnNames droppedCols
when (modifiedCustomColumnNames /= customColumnNames) $
tell $ MetadataModifier $
tableMetadataSetter @('Postgres pgKind) source tn.tmConfiguration .~ TableConfig customFields modifiedCustomColumnNames customName
procAlteredCols sc tn = for_ alteredCols $
\( RawColumnInfo oldName _ oldType _ _
, RawColumnInfo newName _ newType _ _ ) -> do
if | oldName /= newName ->
renameColumnInMetadata oldName newName source tn (_tciFieldInfoMap ti)
| oldType /= newType -> do
let colId =
SOSourceObj source
$ AB.mkAnyBackend
$ SOITableObj @('Postgres pgKind) tn
$ TOCol @('Postgres pgKind) oldName
typeDepObjs = getDependentObjsWith (== DROnType) sc colId
unless (null typeDepObjs) $ throw400 DependencyError $
"cannot change type of column " <> oldName <<> " in table "
<> tn <<> " because of the following dependencies : " <>
reportSchemaObjs typeDepObjs
| otherwise -> pure ()
processComputedFieldDiff table = do
let ComputedFieldDiff _ altered overloaded = computedFieldDiff
getFunction = fmFunction . ccmFunctionMeta
forM_ overloaded $ \(columnName, function) ->
throw400 NotSupported $ "The function " <> function
<<> " associated with computed field" <> columnName
<<> " of table " <> table <<> " is being overloaded"
forM_ altered $ \(old, new) ->
if | (fmType . ccmFunctionMeta) new == FTVOLATILE ->
throw400 NotSupported $ "The type of function " <> getFunction old
<<> " associated with computed field " <> ccmName old
<<> " of table " <> table <<> " is being altered to \"VOLATILE\""
| otherwise -> pure ()

View File

@ -1,8 +1,8 @@
module Hasura.Backends.Postgres.DDL.Source
( ToMetadataFetchQuery
, fetchFunctionMetadata
, fetchPgScalars
, fetchTableMetadata
, fetchFunctionMetadata
, initCatalogForSource
, postDropSourceHook
, resolveDatabaseMetadata

View File

@ -22,16 +22,7 @@ load and modify the Hasura catalog and schema cache.
-}
module Hasura.RQL.DDL.Schema
( module Hasura.RQL.DDL.Schema.Cache
, module Hasura.RQL.DDL.Schema.Catalog
, module Hasura.RQL.DDL.Schema.Function
, module Hasura.RQL.DDL.Schema.Rename
, module Hasura.RQL.DDL.Schema.Table
, RunSQL(..)
, runRunSQL
, isSchemaCacheBuildRequiredRunSQL
( module M
, RunSQLRes(..)
) where
@ -40,124 +31,18 @@ import Hasura.Prelude
import qualified Data.Text.Encoding as TE
import qualified Database.PG.Query as Q
import qualified Database.PostgreSQL.LibPQ as PQ
import qualified Text.Regex.TDFA as TDFA
import Control.Monad.Trans.Control (MonadBaseControl)
import Data.Aeson
import Data.Aeson.TH
import Hasura.Backends.Postgres.DDL.RunSQL
import Hasura.Backends.Postgres.DDL.Source (ToMetadataFetchQuery)
import Hasura.Base.Error
import Hasura.Base.Instances ()
import Hasura.EncJSON
import Hasura.RQL.DDL.Schema.Cache
import Hasura.RQL.DDL.Schema.Catalog
import Hasura.RQL.DDL.Schema.Function
import Hasura.RQL.DDL.Schema.Rename
import Hasura.RQL.DDL.Schema.Table
import Hasura.RQL.Types
import Hasura.Server.Utils (quoteRegex)
import Hasura.RQL.DDL.Schema.Cache as M
import Hasura.RQL.DDL.Schema.Catalog as M
import Hasura.RQL.DDL.Schema.Function as M
import Hasura.RQL.DDL.Schema.Rename as M
import Hasura.RQL.DDL.Schema.Table as M
data RunSQL
= RunSQL
{ rSql :: Text
, rSource :: !SourceName
, rCascade :: !Bool
, rCheckMetadataConsistency :: !(Maybe Bool)
, rTxAccessMode :: !Q.TxAccess
} deriving (Show, Eq)
instance FromJSON RunSQL where
parseJSON = withObject "RunSQL" $ \o -> do
rSql <- o .: "sql"
rSource <- o .:? "source" .!= defaultSource
rCascade <- o .:? "cascade" .!= False
rCheckMetadataConsistency <- o .:? "check_metadata_consistency"
isReadOnly <- o .:? "read_only" .!= False
let rTxAccessMode = if isReadOnly then Q.ReadOnly else Q.ReadWrite
pure RunSQL{..}
instance ToJSON RunSQL where
toJSON RunSQL {..} =
object
[ "sql" .= rSql
, "source" .= rSource
, "cascade" .= rCascade
, "check_metadata_consistency" .= rCheckMetadataConsistency
, "read_only" .=
case rTxAccessMode of
Q.ReadOnly -> True
Q.ReadWrite -> False
]
-- | see Note [Checking metadata consistency in run_sql]
isSchemaCacheBuildRequiredRunSQL :: RunSQL -> Bool
isSchemaCacheBuildRequiredRunSQL RunSQL {..} =
case rTxAccessMode of
Q.ReadOnly -> False
Q.ReadWrite -> fromMaybe (containsDDLKeyword rSql) rCheckMetadataConsistency
where
containsDDLKeyword :: Text -> Bool
containsDDLKeyword = TDFA.match $$(quoteRegex
TDFA.defaultCompOpt
{ TDFA.caseSensitive = False
, TDFA.multiline = True
, TDFA.lastStarGreedy = True }
TDFA.defaultExecOpt
{ TDFA.captureGroups = False }
"\\balter\\b|\\bdrop\\b|\\breplace\\b|\\bcreate function\\b|\\bcomment on\\b")
runRunSQL
:: forall (pgKind :: PostgresKind) m
. ( BackendMetadata ('Postgres pgKind)
, ToMetadataFetchQuery pgKind
, CacheRWM m
, HasServerConfigCtx m
, MetadataM m
, MonadBaseControl IO m
, MonadError QErr m
, MonadIO m
)
=> RunSQL
-> m EncJSON
runRunSQL q@RunSQL {..}
-- see Note [Checking metadata consistency in run_sql]
| isSchemaCacheBuildRequiredRunSQL q
= withMetadataCheck @pgKind rSource rCascade rTxAccessMode $ execRawSQL rSql
| otherwise
= askSourceConfig @('Postgres pgKind) rSource >>= \sourceConfig ->
liftEitherM $ runExceptT $
runLazyTx (_pscExecCtx sourceConfig) rTxAccessMode $ execRawSQL rSql
where
execRawSQL :: (MonadTx n) => Text -> n EncJSON
execRawSQL =
fmap (encJFromJValue @RunSQLRes) . liftTx . Q.multiQE rawSqlErrHandler . Q.fromText
where
rawSqlErrHandler txe =
(err400 PostgresError "query execution failed") { qeInternal = Just $ toJSON txe }
{- Note [Checking metadata consistency in run_sql]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SQL queries executed by run_sql may change the Postgres schema in arbitrary
ways. We attempt to automatically update the metadata to reflect those changes
as much as possible---for example, if a table is renamed, we want to update the
metadata to track the table under its new name instead of its old one. This
schema diffing (plus some integrity checking) is handled by withMetadataCheck.
But this process has overhead---it involves reloading the metadata, diffing it,
and rebuilding the schema cache---so we dont want to do it if it isnt
necessary. The user can explicitly disable the check via the
check_metadata_consistency option, and we also skip it if the current
transaction is in READ ONLY mode, since the schema cant be modified in that
case, anyway.
However, even if neither read_only or check_metadata_consistency is passed, lots
of queries may not modify the schema at all. As a (fairly stupid) heuristic, we
check if the query contains any keywords for DDL operations, and if not, we skip
the metadata check as well. -}
data RunSQLRes
= RunSQLRes
{ rrResultType :: !Text

View File

@ -2,14 +2,8 @@ module Hasura.RQL.DDL.Schema.Common where
import Hasura.Prelude
import qualified Data.HashMap.Strict as HM
import qualified Database.PG.Query as Q
import Data.FileEmbed (makeRelativeToProject)
import qualified Hasura.SQL.AnyBackend as AB
import Hasura.Backends.Postgres.SQL.Types
import Hasura.Base.Error
import Hasura.RQL.DDL.ComputedField
import Hasura.RQL.DDL.EventTrigger
@ -38,21 +32,3 @@ purgeDependentObject source sourceObjId = case sourceObjId of
throw500
$ "unexpected dependent object: "
<> reportSchemaObj (SOSourceObj source $ AB.mkAnyBackend sourceObjId)
-- | Fetch Postgres metadata for all user functions
fetchFunctionMetadata :: (MonadTx m) => m (DBFunctionsMetadata ('Postgres 'Vanilla))
fetchFunctionMetadata = do
results <- liftTx $ Q.withQE defaultTxErrorHandler
$(makeRelativeToProject "src-rsr/pg_function_metadata.sql" >>= Q.sqlFromFile) () True
pure $ HM.fromList $ flip map results $
\(schema, table, Q.AltJ infos) -> (QualifiedObject schema table, infos)
-- | Fetch all scalar types from Postgres
fetchPgScalars :: MonadTx m => m (HashSet PGScalarType)
fetchPgScalars =
liftTx $ Q.getAltJ . runIdentity . Q.getRow
<$> Q.withQE defaultTxErrorHandler
[Q.sql|
SELECT coalesce(json_agg(typname), '[]')
FROM pg_catalog.pg_type where typtype = 'b'
|] () True

View File

@ -1,100 +1,89 @@
module Hasura.RQL.DDL.Schema.Diff
( TableMeta(..)
, ComputedFieldMeta(..)
, getDifference
, TableDiff(..)
, getTableDiff
, getTableChangeDeps
, ComputedFieldDiff(..)
, SchemaDiff(..)
, getSchemaDiff
, getSchemaChangeDeps
, FunctionMeta(..)
, FunctionDiff(..)
, getFuncDiff
, getOverloadedFuncs
) where
module Hasura.RQL.DDL.Schema.Diff where
import Hasura.Prelude
import qualified Data.HashMap.Strict as M
import qualified Data.HashMap.Strict.InsOrd as OMap
import qualified Data.HashSet as HS
import qualified Data.List.NonEmpty as NE
import qualified Language.GraphQL.Draft.Syntax as G
import Data.Aeson.TH
import Data.List.Extended (duplicates)
import Control.Lens ((.~))
import Data.Aeson
import Data.List.Extended
import Data.Text.Extended
import qualified Hasura.SQL.AnyBackend as AB
import Hasura.Backends.Postgres.SQL.Types hiding (TableName)
import Hasura.Backends.Postgres.SQL.Types hiding (ConstraintName, FunctionName, TableName)
import Hasura.Base.Error
import Hasura.RQL.Types hiding (ConstraintName, fmFunction,
tmComputedFields, tmTable)
import Hasura.RQL.DDL.Schema.Rename
import Hasura.RQL.DDL.Schema.Table
import Hasura.RQL.Types hiding (fmFunction, tmComputedFields, tmTable)
data FunctionMeta
data FunctionMeta b
= FunctionMeta
{ fmOid :: !OID
, fmFunction :: !QualifiedFunction
, fmFunction :: !(FunctionName b)
, fmType :: !FunctionVolatility
} deriving (Show, Eq)
$(deriveJSON hasuraJSON ''FunctionMeta)
} deriving (Generic)
deriving instance (Backend b) => Show (FunctionMeta b)
deriving instance (Backend b) => Eq (FunctionMeta b)
data ComputedFieldMeta
instance (Backend b) => FromJSON (FunctionMeta b) where
parseJSON = genericParseJSON hasuraJSON
instance (Backend b) => ToJSON (FunctionMeta b) where
toJSON = genericToJSON hasuraJSON
data ComputedFieldMeta b
= ComputedFieldMeta
{ ccmName :: !ComputedFieldName
, ccmFunctionMeta :: !FunctionMeta
} deriving (Show, Eq)
$(deriveJSON hasuraJSON{omitNothingFields=True} ''ComputedFieldMeta)
, ccmFunctionMeta :: !(FunctionMeta b)
} deriving (Generic, Show, Eq)
instance (Backend b) => FromJSON (ComputedFieldMeta b) where
parseJSON = genericParseJSON hasuraJSON{omitNothingFields=True}
instance (Backend b) => ToJSON (ComputedFieldMeta b) where
toJSON = genericToJSON hasuraJSON{omitNothingFields=True}
data TableMeta (b :: BackendType)
= TableMeta
{ tmTable :: !QualifiedTable
{ tmTable :: !(TableName b)
, tmInfo :: !(DBTableMetadata b)
, tmComputedFields :: ![ComputedFieldMeta]
, tmComputedFields :: ![ComputedFieldMeta b]
} deriving (Show, Eq)
getOverlap :: (Eq k, Hashable k) => (v -> k) -> [v] -> [v] -> [(v, v)]
getOverlap getKey left right =
M.elems $ M.intersectionWith (,) (mkMap left) (mkMap right)
where
mkMap = M.fromList . map (\v -> (getKey v, v))
getDifference :: (Eq k, Hashable k) => (v -> k) -> [v] -> [v] -> [v]
getDifference getKey left right =
M.elems $ M.difference (mkMap left) (mkMap right)
where
mkMap = M.fromList . map (\v -> (getKey v, v))
data ComputedFieldDiff
data ComputedFieldDiff (b :: BackendType)
= ComputedFieldDiff
{ _cfdDropped :: [ComputedFieldName]
, _cfdAltered :: [(ComputedFieldMeta, ComputedFieldMeta)]
, _cfdOverloaded :: [(ComputedFieldName, QualifiedFunction)]
} deriving (Show, Eq)
, _cfdAltered :: [(ComputedFieldMeta b, ComputedFieldMeta b)]
, _cfdOverloaded :: [(ComputedFieldName, FunctionName b)]
}
deriving instance (Backend b) => Show (ComputedFieldDiff b)
deriving instance (Backend b) => Eq (ComputedFieldDiff b)
data TableDiff (b :: BackendType)
= TableDiff
{ _tdNewName :: !(Maybe QualifiedTable)
{ _tdNewName :: !(Maybe (TableName b))
, _tdDroppedCols :: ![Column b]
, _tdAddedCols :: ![RawColumnInfo b]
, _tdAlteredCols :: ![(RawColumnInfo b, RawColumnInfo b)]
, _tdDroppedFKeyCons :: ![ConstraintName]
, _tdComputedFields :: !ComputedFieldDiff
, _tdDroppedFKeyCons :: ![ConstraintName b]
, _tdComputedFields :: !(ComputedFieldDiff b)
-- The final list of uniq/primary constraint names
-- used for generating types on_conflict clauses
-- TODO: this ideally should't be part of TableDiff
, _tdUniqOrPriCons :: ![ConstraintName]
, _tdUniqOrPriCons :: ![ConstraintName b]
, _tdNewDescription :: !(Maybe PGDescription)
}
getTableDiff :: TableMeta ('Postgres 'Vanilla) -> TableMeta ('Postgres 'Vanilla) -> TableDiff ('Postgres 'Vanilla)
getTableDiff
:: Backend b
=> TableMeta b
-> TableMeta b
-> TableDiff b
getTableDiff oldtm newtm =
TableDiff mNewName droppedCols addedCols alteredCols
TableDiff mNewName droppedCols alteredCols
droppedFKeyConstraints computedFieldDiff uniqueOrPrimaryCons mNewDesc
where
mNewName = bool (Just $ tmTable newtm) Nothing $ tmTable oldtm == tmTable newtm
@ -107,9 +96,8 @@ getTableDiff oldtm newtm =
mNewDesc = _ptmiDescription $ tmInfo newtm
droppedCols = map prciName $ getDifference prciPosition oldCols newCols
addedCols = getDifference prciPosition newCols oldCols
existingCols = getOverlap prciPosition oldCols newCols
droppedCols = map prciName $ getDifferenceOn prciPosition oldCols newCols
existingCols = getOverlapWith prciPosition oldCols newCols
alteredCols = filter (uncurry (/=)) existingCols
-- foreign keys are considered dropped only if their oid
@ -118,9 +106,9 @@ getTableDiff oldtm newtm =
droppedFKeysWithOid `HS.intersection` droppedFKeysWithUniq
tmForeignKeys = fmap unForeignKeyMetadata . toList . _ptmiForeignKeys . tmInfo
droppedFKeysWithOid = HS.fromList $
(getDifference (_cOid . _fkConstraint) `on` tmForeignKeys) oldtm newtm
(getDifferenceOn (_cOid . _fkConstraint) `on` tmForeignKeys) oldtm newtm
droppedFKeysWithUniq = HS.fromList $
(getDifference mkFKeyUniqId `on` tmForeignKeys) oldtm newtm
(getDifferenceOn mkFKeyUniqId `on` tmForeignKeys) oldtm newtm
mkFKeyUniqId (ForeignKey _ reftn colMap) = (reftn, colMap)
-- calculate computed field diff
@ -128,10 +116,10 @@ getTableDiff oldtm newtm =
newComputedFieldMeta = tmComputedFields newtm
droppedComputedFields = map ccmName $
getDifference (fmOid . ccmFunctionMeta) oldComputedFieldMeta newComputedFieldMeta
getDifferenceOn (fmOid . ccmFunctionMeta) oldComputedFieldMeta newComputedFieldMeta
alteredComputedFields =
getOverlap (fmOid . ccmFunctionMeta) oldComputedFieldMeta newComputedFieldMeta
getOverlapWith (fmOid . ccmFunctionMeta) oldComputedFieldMeta newComputedFieldMeta
overloadedComputedFieldFunctions =
let getFunction = fmFunction . ccmFunctionMeta
@ -144,96 +132,241 @@ getTableDiff oldtm newtm =
overloadedComputedFieldFunctions
getTableChangeDeps
:: (QErrM m, CacheRM m)
=> SourceName -> QualifiedTable -> TableDiff ('Postgres 'Vanilla) -> m [SchemaObjId]
:: forall b m
. (QErrM m, CacheRM m, Backend b)
=> SourceName
-> TableName b
-> TableDiff b
-> m [SchemaObjId]
getTableChangeDeps source tn tableDiff = do
sc <- askSchemaCache
-- for all the dropped columns
droppedColDeps <- fmap concat $ forM droppedCols $ \droppedCol -> do
let objId = SOSourceObj source
$ AB.mkAnyBackend
$ SOITableObj @('Postgres 'Vanilla) tn
$ TOCol @('Postgres 'Vanilla) droppedCol
$ SOITableObj @b tn
$ TOCol @b droppedCol
return $ getDependentObjs sc objId
-- for all dropped constraints
droppedConsDeps <- fmap concat $ forM droppedFKeyConstraints $ \droppedCons -> do
let objId = SOSourceObj source
$ AB.mkAnyBackend
$ SOITableObj @('Postgres 'Vanilla) tn
$ TOForeignKey @('Postgres 'Vanilla) droppedCons
$ SOITableObj @b tn
$ TOForeignKey @b droppedCons
return $ getDependentObjs sc objId
return $ droppedConsDeps <> droppedColDeps <> droppedComputedFieldDeps
where
TableDiff _ droppedCols _ _ droppedFKeyConstraints computedFieldDiff _ _ = tableDiff
TableDiff _ droppedCols _ droppedFKeyConstraints computedFieldDiff _ _ = tableDiff
droppedComputedFieldDeps =
map
(SOSourceObj source
. AB.mkAnyBackend
. SOITableObj @('Postgres 'Vanilla) tn
. SOITableObj @b tn
. TOComputedField)
$ _cfdDropped computedFieldDiff
data SchemaDiff (b :: BackendType)
= SchemaDiff
{ _sdDroppedTables :: ![QualifiedTable]
, _sdAlteredTables :: ![(QualifiedTable, TableDiff b)]
{ _sdDroppedTables :: ![TableName b]
, _sdAlteredTables :: ![(TableName b, TableDiff b)]
}
getSchemaDiff :: [TableMeta ('Postgres 'Vanilla)] -> [TableMeta ('Postgres 'Vanilla)] -> SchemaDiff ('Postgres 'Vanilla)
getSchemaDiff
:: (Backend b) => [TableMeta b] -> [TableMeta b] -> SchemaDiff b
getSchemaDiff oldMeta newMeta =
SchemaDiff droppedTables survivingTables
where
droppedTables = map tmTable $ getDifference (_ptmiOid . tmInfo) oldMeta newMeta
droppedTables = map tmTable $ getDifferenceOn (_ptmiOid . tmInfo) oldMeta newMeta
survivingTables =
flip map (getOverlap (_ptmiOid . tmInfo) oldMeta newMeta) $ \(oldtm, newtm) ->
flip map (getOverlapWith (_ptmiOid . tmInfo) oldMeta newMeta) $ \(oldtm, newtm) ->
(tmTable oldtm, getTableDiff oldtm newtm)
getSchemaChangeDeps
:: (QErrM m, CacheRM m)
=> SourceName -> SchemaDiff ('Postgres 'Vanilla) -> m [SchemaObjId]
:: forall b m. (QErrM m, CacheRM m, Backend b)
=> SourceName -> SchemaDiff b -> m [SourceObjId b]
getSchemaChangeDeps source schemaDiff = do
-- Get schema cache
sc <- askSchemaCache
let tableIds =
map
(SOSourceObj source . AB.mkAnyBackend . SOITable @('Postgres 'Vanilla))
(SOSourceObj source . AB.mkAnyBackend . SOITable @b)
droppedTables
-- Get the dependent of the dropped tables
let tableDropDeps = concatMap (getDependentObjs sc) tableIds
tableModDeps <- concat <$> traverse (uncurry (getTableChangeDeps source)) alteredTables
return $ filter (not . isDirectDep) $
-- return $ filter (not . isDirectDep) $
return $ mapMaybe getIndirectDep $
HS.toList $ HS.fromList $ tableDropDeps <> tableModDeps
where
SchemaDiff droppedTables alteredTables = schemaDiff
isDirectDep (SOSourceObj s exists) =
case AB.unpackAnyBackend @('Postgres 'Vanilla) exists of
Just (SOITableObj pgTable _) ->
s == source && pgTable `HS.member` HS.fromList droppedTables
_ -> False
isDirectDep _ = False
getIndirectDep :: SchemaObjId -> Maybe (SourceObjId b)
getIndirectDep (SOSourceObj s exists) =
AB.unpackAnyBackend exists >>= \case
srcObjId@(SOITableObj tn _) ->
-- Indirect dependancy shouldn't be of same source and not among dropped tables
if not (s == source && tn `HS.member` HS.fromList droppedTables)
then Just srcObjId
else Nothing
srcObjId -> Just srcObjId
getIndirectDep _ = Nothing
data FunctionDiff
data FunctionDiff b
= FunctionDiff
{ fdDropped :: ![QualifiedFunction]
, fdAltered :: ![(QualifiedFunction, FunctionVolatility)]
} deriving (Show, Eq)
{ fdDropped :: ![FunctionName b]
, fdAltered :: ![(FunctionName b, FunctionVolatility)]
}
deriving instance (Backend b) => Show (FunctionDiff b)
deriving instance (Backend b) => Eq (FunctionDiff b)
getFuncDiff :: [FunctionMeta] -> [FunctionMeta] -> FunctionDiff
getFuncDiff :: [FunctionMeta b] -> [FunctionMeta b] -> FunctionDiff b
getFuncDiff oldMeta newMeta =
FunctionDiff droppedFuncs alteredFuncs
where
droppedFuncs = map fmFunction $ getDifference fmOid oldMeta newMeta
alteredFuncs = mapMaybe mkAltered $ getOverlap fmOid oldMeta newMeta
droppedFuncs = map fmFunction $ getDifferenceOn fmOid oldMeta newMeta
alteredFuncs = mapMaybe mkAltered $ getOverlapWith fmOid oldMeta newMeta
mkAltered (oldfm, newfm) =
let isTypeAltered = fmType oldfm /= fmType newfm
alteredFunc = (fmFunction oldfm, fmType newfm)
in bool Nothing (Just alteredFunc) $ isTypeAltered
in bool Nothing (Just alteredFunc) isTypeAltered
getOverloadedFuncs
:: [QualifiedFunction] -> [FunctionMeta] -> [QualifiedFunction]
:: (Backend b) => [FunctionName b] -> [FunctionMeta b] -> [FunctionName b]
getOverloadedFuncs trackedFuncs newFuncMeta =
toList $ duplicates $ map fmFunction trackedMeta
where
trackedMeta = flip filter newFuncMeta $ \fm ->
fmFunction fm `elem` trackedFuncs
processSchemaDiff
:: forall b m
. ( MonadError QErr m
, CacheRM m
, MonadWriter MetadataModifier m
, BackendMetadata b
)
=> SourceName
-> TableCache b
-> SchemaDiff b
-> m ()
processSchemaDiff source preActionTables schemaDiff = do
-- Purge the dropped tables
dropTablesInMetadata @b source droppedTables
for_ alteredTables $ \(oldQtn, tableDiff) -> do
ti <- onNothing
(M.lookup oldQtn preActionTables)
(throw500 $ "old table metadata not found in cache : " <>> oldQtn)
alterTableInMetadata source (_tiCoreInfo ti) tableDiff
where
SchemaDiff droppedTables alteredTables = schemaDiff
alterTableInMetadata
:: forall m b
. ( MonadError QErr m
, CacheRM m
, MonadWriter MetadataModifier m
, BackendMetadata b
)
=> SourceName -> TableCoreInfo b -> TableDiff b -> m ()
alterTableInMetadata source ti tableDiff = do
-- If table rename occurs then don't replace constraints and
-- process dropped/added columns, because schema reload happens eventually
sc <- askSchemaCache
let tn = _tciName ti
withOldTabName = do
alterColumnsInMetadata source alteredCols tableFields sc tn
withNewTabName :: TableName b -> m ()
withNewTabName newTN = do
-- check for GraphQL schema conflicts on new name
liftEither (tableGraphQLName @b newTN) >>= checkConflictingNode sc . G.unName
alterColumnsInMetadata source alteredCols tableFields sc tn
-- update new table in metadata
renameTableInMetadata @b source newTN tn
-- Process computed field diff
processComputedFieldDiff tn
-- Drop custom column names for dropped columns
alterCustomColumnNamesInMetadata source droppedCols ti
maybe withOldTabName withNewTabName mNewName
where
TableDiff mNewName droppedCols alteredCols _ computedFieldDiff _ _ = tableDiff
tableFields = _tciFieldInfoMap ti
processComputedFieldDiff :: TableName b -> m ()
processComputedFieldDiff table = do
let ComputedFieldDiff _ altered overloaded = computedFieldDiff
getFunction = fmFunction . ccmFunctionMeta
forM_ overloaded $ \(columnName, function) ->
throw400 NotSupported $ "The function " <> function
<<> " associated with computed field" <> columnName
<<> " of table " <> table <<> " is being overloaded"
forM_ altered $ \(old, new) ->
if | (fmType . ccmFunctionMeta) new == FTVOLATILE ->
throw400 NotSupported $ "The type of function " <> getFunction old
<<> " associated with computed field " <> ccmName old
<<> " of table " <> table <<> " is being altered to \"VOLATILE\""
| otherwise -> pure ()
dropTablesInMetadata
:: forall b m
. ( MonadWriter MetadataModifier m
, BackendMetadata b
)
=> SourceName
-> [TableName b]
-> m ()
dropTablesInMetadata source droppedTables =
forM_ droppedTables $
\tn -> tell $ MetadataModifier $ metaSources.ix source.toSourceMetadata.(smTables @b) %~ OMap.delete tn
alterColumnsInMetadata
:: forall b m
. ( MonadError QErr m
, CacheRM m
, MonadWriter MetadataModifier m
, BackendMetadata b
)
=> SourceName
-> [(RawColumnInfo b, RawColumnInfo b)]
-> FieldInfoMap (FieldInfo b)
-> SchemaCache
-> TableName b
-> m ()
alterColumnsInMetadata source alteredCols fields sc tn = for_ alteredCols $
\( RawColumnInfo oldName _ oldType _ _
, RawColumnInfo newName _ newType _ _ ) -> do
if | oldName /= newName ->
renameColumnInMetadata oldName newName source tn fields
| oldType /= newType -> do
let colId =
SOSourceObj source
$ AB.mkAnyBackend
$ SOITableObj @b tn
$ TOCol @b oldName
typeDepObjs = getDependentObjsWith (== DROnType) sc colId
unless (null typeDepObjs) $ throw400 DependencyError $
"cannot change type of column " <> oldName <<> " in table "
<> tn <<> " because of the following dependencies : " <>
reportSchemaObjs typeDepObjs
| otherwise -> pure ()
alterCustomColumnNamesInMetadata
:: forall b m
. (MonadWriter MetadataModifier m, BackendMetadata b)
=> SourceName
-> [Column b]
-> TableCoreInfo b
-> m ()
alterCustomColumnNamesInMetadata source droppedCols ti = do
let TableConfig customFields customColumnNames customName = _tciCustomConfig ti
tn = _tciName ti
modifiedCustomColumnNames = foldl' (flip M.delete) customColumnNames droppedCols
when (modifiedCustomColumnNames /= customColumnNames) $
tell $ MetadataModifier $
tableMetadataSetter @b source tn.tmConfiguration .~
TableConfig @b customFields modifiedCustomColumnNames customName

View File

@ -544,8 +544,7 @@ instance Backend b => FromJSON (ForeignKeyMetadata b) where
else fail "columns and foreign_columns differ in length"
-- | Metadata of a Postgres table which is being extracted from
-- database via 'src-rsr/pg_table_metadata.sql'
-- | Metadata of any Backend table which is being extracted from source database
data DBTableMetadata (b :: BackendType)
= DBTableMetadata
{ _ptmiOid :: !OID

View File

@ -18,6 +18,7 @@ import Network.HTTP.Client.Extended
import qualified Hasura.Tracing as Tracing
import Hasura.Backends.Postgres.DDL.RunSQL
import Hasura.Base.Error
import Hasura.EncJSON
import Hasura.Metadata.Class

View File

@ -14,6 +14,7 @@ import Data.Aeson.TH
import qualified Hasura.Backends.BigQuery.DDL.RunSQL as BigQuery
import qualified Hasura.Backends.MSSQL.DDL.RunSQL as MSSQL
import qualified Hasura.Backends.Postgres.DDL.RunSQL as Postgres
import qualified Hasura.Tracing as Tracing
import Hasura.Base.Error
@ -39,9 +40,9 @@ data RQLQuery
| RQUpdate !UpdateQuery
| RQDelete !DeleteQuery
| RQCount !CountQuery
| RQRunSql !RunSQL
| RQRunSql !Postgres.RunSQL
| RQMssqlRunSql !MSSQL.MSSQLRunSQL
| RQCitusRunSql !RunSQL
| RQCitusRunSql !Postgres.RunSQL
| RQBigqueryRunSql !BigQuery.BigQueryRunSQL
| RQBigqueryDatabaseInspection !BigQuery.BigQueryRunSQL
| RQBulk ![RQLQuery]
@ -97,7 +98,8 @@ runQuery env instanceId userInfo schemaCache httpManager serverConfigCtx rqlQuer
queryModifiesSchema :: RQLQuery -> Bool
queryModifiesSchema = \case
RQRunSql q -> isSchemaCacheBuildRequiredRunSQL q
RQRunSql q -> Postgres.isSchemaCacheBuildRequiredRunSQL q
RQMssqlRunSql q -> MSSQL.sqlContainsDDLKeyword $ MSSQL._mrsSql q
RQBulk l -> any queryModifiesSchema l
_ -> False
@ -119,9 +121,9 @@ runQueryM env = \case
RQUpdate q -> runUpdate env q
RQDelete q -> runDelete env q
RQCount q -> runCount q
RQRunSql q -> runRunSQL @'Vanilla q
RQRunSql q -> Postgres.runRunSQL @'Vanilla q
RQMssqlRunSql q -> MSSQL.runSQL q
RQCitusRunSql q -> runRunSQL @'Citus q
RQCitusRunSql q -> Postgres.runRunSQL @'Citus q
RQBigqueryRunSql q -> BigQuery.runSQL q
RQBigqueryDatabaseInspection q -> BigQuery.runDatabaseInspection q
RQBulk l -> encJFromList <$> indexedMapM (runQueryM env) l

View File

@ -0,0 +1,13 @@
type: bulk
args:
- type: mssql_run_sql
args:
source: mssql
sql: |
CREATE TABLE author (
id INT NOT NULL IDENTITY PRIMARY KEY,
name TEXT
);
INSERT INTO author (name) VALUES ('Bob'), ('Alice');

View File

@ -0,0 +1,8 @@
type: bulk
args:
- type: mssql_run_sql
args:
source: mssql
sql: |
DROP TABLE author;

View File

@ -0,0 +1,8 @@
type: bulk
args:
- type: mssql_track_table
args:
source: mssql
table:
name: author

View File

@ -0,0 +1,85 @@
# Create a table
- url: /v2/query
status: 200
query:
type: mssql_run_sql
args:
source: mssql
sql: |
CREATE TABLE test (
id INT NOT NULL IDENTITY PRIMARY KEY,
name TEXT
);
INSERT INTO test (name) VALUES ('Bob'), ('Alice');
# Track table
- url: /v1/metadata
status: 200
query:
type: mssql_track_table
args:
source: mssql
table:
name: test
# GraphQL Query to fetch data from 'test' table
- url: /v1/graphql
status: 200
response:
data:
test:
- id: 1
name: Bob
- id: 2
name: Alice
query:
query: |
query {
test{
id
name
}
}
# Add a column in SQL
- url: /v2/query
status: 200
query:
type: mssql_run_sql
args:
source: mssql
sql: |
ALTER TABLE test ADD age INT NOT NULL CONSTRAINT age_def DEFAULT 0;
# GraphQL Query to fetch data from 'test' table
- url: /v1/graphql
status: 200
response:
data:
test:
- id: 1
name: Bob
age: 0
- id: 2
name: Alice
age: 0
query:
query: |
query {
test{
id
name
age
}
}
# Now drop the 'test' table
- url: /v2/query
status: 200
query:
type: mssql_run_sql
args:
source: mssql
sql: |
DROP TABLE test;

View File

@ -0,0 +1,81 @@
# Create a table
- url: /v2/query
status: 200
query:
type: mssql_run_sql
args:
source: mssql
sql: |
CREATE TABLE test (
id INT NOT NULL IDENTITY PRIMARY KEY,
name TEXT
);
INSERT INTO test (name) VALUES ('Bob'), ('Alice');
# Track table
- url: /v1/metadata
status: 200
query:
type: mssql_track_table
args:
source: mssql
table:
name: test
# GraphQL Query to fetch data from 'test' table
- url: /v1/graphql
status: 200
response:
data:
test:
- id: 1
name: Bob
- id: 2
name: Alice
query:
query: |
query {
test{
id
name
}
}
# Drop a column in SQL
- url: /v2/query
status: 200
query:
type: mssql_run_sql
args:
source: mssql
sql: |
ALTER TABLE test DROP COLUMN name;
# Try to fetch the column data via GraphQL
- url: /v1/graphql
status: 200
response:
errors:
- extensions:
path: $.selectionSet.test.selectionSet.name
code: validation-failed
message: "field \"name\" not found in type: 'test'"
query:
query: |
query {
test{
id
name
}
}
# Now drop the 'test' table
- url: /v2/query
status: 200
query:
type: mssql_run_sql
args:
source: mssql
sql: |
DROP TABLE test;

View File

@ -0,0 +1,71 @@
# Create a table
- url: /v2/query
status: 200
query:
type: mssql_run_sql
args:
source: mssql
sql: |
CREATE TABLE test (
id INT NOT NULL IDENTITY PRIMARY KEY,
name TEXT
);
INSERT INTO test (name) VALUES ('Bob'), ('Alice');
# Track table
- url: /v1/metadata
status: 200
query:
type: mssql_track_table
args:
source: mssql
table:
name: test
# GraphQL Query to fetch data from 'test' table
- url: /v1/graphql
status: 200
response:
data:
test:
- id: 1
name: Bob
- id: 2
name: Alice
query:
query: |
query {
test{
id
name
}
}
# Drop the table in SQL
- url: /v2/query
status: 200
query:
type: mssql_run_sql
args:
source: mssql
sql: |
DROP TABLE test;
# Try to fetch data from dropped table
- url: /v1/graphql
status: 200
response:
errors:
- extensions:
path: $.selectionSet.test
code: validation-failed
message: "field \"test\" not found in type: 'query_root'"
query:
query: |
query {
test{
id
name
}
}

View File

@ -0,0 +1,82 @@
# Create a table
- url: /v2/query
status: 200
query:
type: mssql_run_sql
args:
source: mssql
sql: |
CREATE TABLE test (
id INT NOT NULL IDENTITY PRIMARY KEY,
name TEXT
);
INSERT INTO test (name) VALUES ('Bob'), ('Alice');
# Track table
- url: /v1/metadata
status: 200
query:
type: mssql_track_table
args:
source: mssql
table:
name: test
# GraphQL Query to fetch data from 'test' table
- url: /v1/graphql
status: 200
response:
data:
test:
- id: 1
name: Bob
- id: 2
name: Alice
query:
query: |
query {
test{
id
name
}
}
# Add a column in SQL
- url: /v2/query
status: 200
query:
type: mssql_run_sql
args:
source: mssql
sql: |
EXEC sp_rename 'test.name', 'name_new', 'COLUMN';
# GraphQL Query to fetch data from 'test' table
- url: /v1/graphql
status: 200
response:
data:
test:
- id: 1
name_new: Bob
- id: 2
name_new: Alice
query:
query: |
query {
test{
id
name_new
}
}
# Now drop the 'test' table
- url: /v2/query
status: 200
query:
type: mssql_run_sql
args:
source: mssql
sql: |
DROP TABLE test;

View File

@ -0,0 +1,82 @@
# Create a table
- url: /v2/query
status: 200
query:
type: mssql_run_sql
args:
source: mssql
sql: |
CREATE TABLE test (
id INT NOT NULL IDENTITY PRIMARY KEY,
name TEXT
);
INSERT INTO test (name) VALUES ('Bob'), ('Alice');
# Track table
- url: /v1/metadata
status: 200
query:
type: mssql_track_table
args:
source: mssql
table:
name: test
# GraphQL Query to fetch data from 'test' table
- url: /v1/graphql
status: 200
response:
data:
test:
- id: 1
name: Bob
- id: 2
name: Alice
query:
query: |
query {
test{
id
name
}
}
# Rename the table in SQL
- url: /v2/query
status: 200
query:
type: mssql_run_sql
args:
source: mssql
sql: |
EXEC sp_rename 'dbo.test', 'test_new'
# GraphQL Query to fetch data from renamed 'test_new' table
- url: /v1/graphql
status: 200
response:
data:
test_new:
- id: 1
name: Bob
- id: 2
name: Alice
query:
query: |
query {
test_new{
id
name
}
}
# Now drop the 'test_new' table
- url: /v2/query
status: 200
query:
type: mssql_run_sql
args:
source: mssql
sql: |
DROP TABLE test_new;

View File

@ -0,0 +1,17 @@
url: /v2/query
status: 200
response:
result_type: TuplesOk
result:
- - id
- name
- - 1
- Bob
- - 2
- Alice
query:
type: mssql_run_sql
args:
source: mssql
sql: |
SELECT * FROM author;

View File

@ -0,0 +1,2 @@
type: bulk
args: [] # Nothing to teardown as schema_teardown_mssql.yaml drops the table in metadata

View File

@ -536,6 +536,30 @@ class TestRunSQL:
def dir(cls):
return "queries/v1/run_sql"
@pytest.mark.parametrize("backend", ['mssql'])
@usefixtures('per_class_tests_db_state')
class TestRunSQLMssql:
@classmethod
def dir(cls):
return "queries/v1/run_sql"
def test_select_query(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/sql_select_query_mssql.yaml')
def test_drop_table(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/sql_drop_table_mssql.yaml')
def test_rename_table(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/sql_rename_table_mssql.yaml')
def test_drop_column(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/sql_drop_column_mssql.yaml')
def test_add_column(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/sql_add_column_mssql.yaml')
def test_rename_column(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/sql_rename_column_mssql.yaml')
@usefixtures('per_method_tests_db_state')
class TestRelationships: