2019-07-11 10:41:20 +03:00
|
|
|
|
{- |
|
|
|
|
|
Description: Create/delete SQL tables to/from Hasura metadata.
|
|
|
|
|
-}
|
|
|
|
|
|
2019-03-18 19:22:21 +03:00
|
|
|
|
{-# LANGUAGE TypeApplications #-}
|
|
|
|
|
|
2018-06-27 16:11:32 +03:00
|
|
|
|
module Hasura.RQL.DDL.Schema.Table where
|
|
|
|
|
|
2019-03-22 10:08:42 +03:00
|
|
|
|
import Hasura.EncJSON
|
2018-11-23 16:02:46 +03:00
|
|
|
|
import Hasura.GraphQL.RemoteServer
|
2018-06-27 16:11:32 +03:00
|
|
|
|
import Hasura.Prelude
|
|
|
|
|
import Hasura.RQL.DDL.Deps
|
2019-04-17 19:29:39 +03:00
|
|
|
|
import Hasura.RQL.DDL.EventTrigger
|
2018-06-27 16:11:32 +03:00
|
|
|
|
import Hasura.RQL.DDL.Permission
|
|
|
|
|
import Hasura.RQL.DDL.Permission.Internal
|
|
|
|
|
import Hasura.RQL.DDL.Relationship
|
2018-11-23 16:02:46 +03:00
|
|
|
|
import Hasura.RQL.DDL.RemoteSchema
|
2018-06-27 16:11:32 +03:00
|
|
|
|
import Hasura.RQL.DDL.Schema.Diff
|
2019-07-22 15:47:13 +03:00
|
|
|
|
import Hasura.RQL.DDL.Schema.Enum
|
2019-01-25 06:31:54 +03:00
|
|
|
|
import Hasura.RQL.DDL.Schema.Function
|
2019-03-01 12:17:22 +03:00
|
|
|
|
import Hasura.RQL.DDL.Schema.Rename
|
2018-06-27 16:11:32 +03:00
|
|
|
|
import Hasura.RQL.DDL.Utils
|
|
|
|
|
import Hasura.RQL.Types
|
2019-05-08 10:36:43 +03:00
|
|
|
|
import Hasura.RQL.Types.Catalog
|
2019-05-16 09:13:25 +03:00
|
|
|
|
import Hasura.RQL.Types.QueryCollection
|
2019-01-18 13:45:59 +03:00
|
|
|
|
import Hasura.Server.Utils (matchRegex)
|
2018-06-27 16:11:32 +03:00
|
|
|
|
import Hasura.SQL.Types
|
|
|
|
|
|
|
|
|
|
import qualified Database.PG.Query as Q
|
2018-11-23 16:02:46 +03:00
|
|
|
|
import qualified Hasura.GraphQL.Schema as GS
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
2019-07-22 15:47:13 +03:00
|
|
|
|
import Control.Lens.Extended hiding ((.=))
|
2018-06-27 16:11:32 +03:00
|
|
|
|
import Data.Aeson
|
|
|
|
|
import Data.Aeson.Casing
|
|
|
|
|
import Data.Aeson.TH
|
|
|
|
|
import Instances.TH.Lift ()
|
|
|
|
|
import Language.Haskell.TH.Syntax (Lift)
|
2018-11-23 16:02:46 +03:00
|
|
|
|
import Network.URI.Extended ()
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
|
|
|
|
import qualified Data.HashMap.Strict as M
|
2019-05-08 10:36:43 +03:00
|
|
|
|
import qualified Data.HashSet as HS
|
2018-06-27 16:11:32 +03:00
|
|
|
|
import qualified Data.Text as T
|
|
|
|
|
import qualified Data.Text.Encoding as TE
|
|
|
|
|
import qualified Database.PostgreSQL.LibPQ as PQ
|
|
|
|
|
|
|
|
|
|
delTableFromCatalog :: QualifiedTable -> Q.Tx ()
|
2019-01-25 06:31:54 +03:00
|
|
|
|
delTableFromCatalog (QualifiedObject sn tn) =
|
2018-06-27 16:11:32 +03:00
|
|
|
|
Q.unitQ [Q.sql|
|
|
|
|
|
DELETE FROM "hdb_catalog"."hdb_table"
|
|
|
|
|
WHERE table_schema = $1 AND table_name = $2
|
|
|
|
|
|] (sn, tn) False
|
|
|
|
|
|
2019-07-22 15:47:13 +03:00
|
|
|
|
saveTableToCatalog :: TrackTable -> Q.Tx ()
|
|
|
|
|
saveTableToCatalog (TrackTable (QualifiedObject sn tn) isEnum) =
|
2018-06-27 16:11:32 +03:00
|
|
|
|
Q.unitQ [Q.sql|
|
2019-07-22 15:47:13 +03:00
|
|
|
|
INSERT INTO "hdb_catalog"."hdb_table" (table_schema, table_name, is_enum)
|
|
|
|
|
VALUES ($1, $2, $3)
|
|
|
|
|
|] (sn, tn, isEnum) False
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
2019-07-22 15:47:13 +03:00
|
|
|
|
data TrackTable
|
2018-06-27 16:11:32 +03:00
|
|
|
|
= TrackTable
|
2019-07-22 15:47:13 +03:00
|
|
|
|
{ tName :: !QualifiedTable
|
|
|
|
|
, tIsEnum :: !Bool
|
|
|
|
|
} deriving (Show, Eq, Lift)
|
|
|
|
|
|
|
|
|
|
instance FromJSON TrackTable where
|
|
|
|
|
parseJSON v = withOptions <|> withoutOptions
|
|
|
|
|
where
|
|
|
|
|
withOptions = flip (withObject "TrackTable") v $ \o -> TrackTable
|
|
|
|
|
<$> o .: "table"
|
|
|
|
|
<*> o .:? "is_enum" .!= False
|
|
|
|
|
withoutOptions = TrackTable <$> parseJSON v <*> pure False
|
|
|
|
|
|
|
|
|
|
instance ToJSON TrackTable where
|
|
|
|
|
toJSON (TrackTable name isEnum)
|
|
|
|
|
| isEnum = object [ "table" .= name, "is_enum" .= isEnum ]
|
|
|
|
|
| otherwise = toJSON name
|
|
|
|
|
|
|
|
|
|
data SetTableIsEnum
|
|
|
|
|
= SetTableIsEnum
|
|
|
|
|
{ stieTable :: !QualifiedTable
|
|
|
|
|
, stieIsEnum :: !Bool
|
|
|
|
|
} deriving (Show, Eq, Lift)
|
|
|
|
|
$(deriveJSON (aesonDrop 4 snakeCase) ''SetTableIsEnum)
|
|
|
|
|
|
|
|
|
|
data UntrackTable =
|
|
|
|
|
UntrackTable
|
|
|
|
|
{ utTable :: !QualifiedTable
|
|
|
|
|
, utCascade :: !(Maybe Bool)
|
|
|
|
|
} deriving (Show, Eq, Lift)
|
|
|
|
|
$(deriveJSON (aesonDrop 2 snakeCase){omitNothingFields=True} ''UntrackTable)
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
2019-07-11 10:41:20 +03:00
|
|
|
|
-- | Track table/view, Phase 1:
|
|
|
|
|
-- Validate table tracking operation. Fails if table is already being tracked,
|
|
|
|
|
-- or if a function with the same name is being tracked.
|
2018-12-13 10:26:15 +03:00
|
|
|
|
trackExistingTableOrViewP1
|
|
|
|
|
:: (CacheRM m, UserInfoM m, QErrM m) => TrackTable -> m ()
|
2019-07-22 15:47:13 +03:00
|
|
|
|
trackExistingTableOrViewP1 TrackTable { tName = vn } = do
|
2018-06-27 16:11:32 +03:00
|
|
|
|
adminOnly
|
2018-12-13 10:26:15 +03:00
|
|
|
|
rawSchemaCache <- askSchemaCache
|
2018-06-27 16:11:32 +03:00
|
|
|
|
when (M.member vn $ scTables rawSchemaCache) $
|
|
|
|
|
throw400 AlreadyTracked $ "view/table already tracked : " <>> vn
|
2019-07-11 10:41:20 +03:00
|
|
|
|
let qf = fmap (FunctionName . getTableTxt) vn
|
|
|
|
|
when (M.member qf $ scFunctions rawSchemaCache) $
|
|
|
|
|
throw400 NotSupported $ "function with name " <> vn <<> " already exists"
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
2018-06-29 14:00:22 +03:00
|
|
|
|
trackExistingTableOrViewP2
|
2019-07-22 15:47:13 +03:00
|
|
|
|
:: (QErrM m, CacheRWM m, MonadTx m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
|
|
|
|
=> TrackTable -> m EncJSON
|
|
|
|
|
trackExistingTableOrViewP2 query@TrackTable { tName = tableName } = do
|
2018-11-23 16:02:46 +03:00
|
|
|
|
sc <- askSchemaCache
|
|
|
|
|
let defGCtx = scDefaultRemoteGCtx sc
|
2019-07-22 15:47:13 +03:00
|
|
|
|
GS.checkConflictingNode defGCtx $ GS.qualObjectToName tableName
|
2018-11-23 16:02:46 +03:00
|
|
|
|
|
2019-07-22 15:47:13 +03:00
|
|
|
|
liftTx $ Q.catchE defaultTxErrorHandler $ saveTableToCatalog query
|
|
|
|
|
buildSchemaCacheFor (MOTable tableName)
|
2018-11-23 16:02:46 +03:00
|
|
|
|
|
2018-06-27 16:11:32 +03:00
|
|
|
|
return successMsg
|
|
|
|
|
|
2018-12-13 10:26:15 +03:00
|
|
|
|
runTrackTableQ
|
2019-07-22 15:47:13 +03:00
|
|
|
|
:: (QErrM m, CacheRWM m, MonadTx m, UserInfoM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
2019-03-18 19:22:21 +03:00
|
|
|
|
=> TrackTable -> m EncJSON
|
2018-12-13 10:26:15 +03:00
|
|
|
|
runTrackTableQ q = do
|
|
|
|
|
trackExistingTableOrViewP1 q
|
2019-07-22 15:47:13 +03:00
|
|
|
|
trackExistingTableOrViewP2 q
|
|
|
|
|
|
|
|
|
|
runSetExistingTableIsEnumQ
|
|
|
|
|
:: (QErrM m, CacheRWM m, MonadTx m, UserInfoM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
|
|
|
|
=> SetTableIsEnum -> m EncJSON
|
|
|
|
|
runSetExistingTableIsEnumQ (SetTableIsEnum tableName isEnum) = do
|
|
|
|
|
adminOnly
|
|
|
|
|
void $ askTabInfo tableName -- assert that table is tracked
|
|
|
|
|
|
|
|
|
|
liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
|
|
|
|
|
UPDATE "hdb_catalog"."hdb_table" SET is_enum = $3
|
|
|
|
|
WHERE table_schema = $1 AND table_name = $2
|
|
|
|
|
|] (qSchema tableName, qName tableName, isEnum) False
|
|
|
|
|
|
|
|
|
|
buildSchemaCacheFor (MOTable tableName)
|
|
|
|
|
return successMsg
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
2018-12-13 10:26:15 +03:00
|
|
|
|
purgeDep :: (CacheRWM m, MonadTx m)
|
2018-06-27 16:11:32 +03:00
|
|
|
|
=> SchemaObjId -> m ()
|
|
|
|
|
purgeDep schemaObjId = case schemaObjId of
|
|
|
|
|
(SOTableObj tn (TOPerm rn pt)) -> do
|
|
|
|
|
liftTx $ dropPermFromCatalog tn rn pt
|
|
|
|
|
withPermType pt delPermFromCache rn tn
|
|
|
|
|
|
|
|
|
|
(SOTableObj qt (TORel rn)) -> do
|
|
|
|
|
liftTx $ delRelFromCatalog qt rn
|
2018-11-16 15:40:23 +03:00
|
|
|
|
delRelFromCache rn qt
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
2019-01-25 06:31:54 +03:00
|
|
|
|
(SOFunction qf) -> do
|
|
|
|
|
liftTx $ delFunctionFromCatalog qf
|
|
|
|
|
delFunctionFromCache qf
|
|
|
|
|
|
2018-09-05 14:26:46 +03:00
|
|
|
|
(SOTableObj qt (TOTrigger trn)) -> do
|
|
|
|
|
liftTx $ delEventTriggerFromCatalog trn
|
|
|
|
|
delEventTriggerFromCache qt trn
|
|
|
|
|
|
2018-06-27 16:11:32 +03:00
|
|
|
|
_ -> throw500 $
|
|
|
|
|
"unexpected dependent object : " <> reportSchemaObj schemaObjId
|
|
|
|
|
|
2019-07-22 15:47:13 +03:00
|
|
|
|
unTrackExistingTableOrViewP1
|
|
|
|
|
:: (CacheRM m, UserInfoM m, QErrM m) => UntrackTable -> m ()
|
|
|
|
|
unTrackExistingTableOrViewP1 (UntrackTable vn _) = do
|
|
|
|
|
adminOnly
|
|
|
|
|
rawSchemaCache <- askSchemaCache
|
|
|
|
|
case M.lookup vn (scTables rawSchemaCache) of
|
|
|
|
|
Just ti ->
|
|
|
|
|
-- Check if table/view is system defined
|
|
|
|
|
when (_tiSystemDefined ti) $ throw400 NotSupported $
|
|
|
|
|
vn <<> " is system defined, cannot untrack"
|
|
|
|
|
Nothing -> throw400 AlreadyUntracked $
|
|
|
|
|
"view/table already untracked : " <>> vn
|
|
|
|
|
|
|
|
|
|
unTrackExistingTableOrViewP2
|
|
|
|
|
:: (QErrM m, CacheRWM m, MonadTx m)
|
|
|
|
|
=> UntrackTable -> m EncJSON
|
|
|
|
|
unTrackExistingTableOrViewP2 (UntrackTable qtn cascade) = do
|
|
|
|
|
sc <- askSchemaCache
|
|
|
|
|
|
|
|
|
|
-- Get relational, query template and function dependants
|
|
|
|
|
let allDeps = getDependentObjs sc (SOTable qtn)
|
|
|
|
|
indirectDeps = filter (not . isDirectDep) allDeps
|
|
|
|
|
|
|
|
|
|
-- Report bach with an error if cascade is not set
|
|
|
|
|
when (indirectDeps /= [] && not (or cascade)) $ reportDepsExt indirectDeps []
|
|
|
|
|
|
|
|
|
|
-- Purge all the dependants from state
|
|
|
|
|
mapM_ purgeDep indirectDeps
|
|
|
|
|
|
|
|
|
|
-- delete the table and its direct dependencies
|
|
|
|
|
delTableAndDirectDeps qtn
|
|
|
|
|
|
|
|
|
|
return successMsg
|
|
|
|
|
where
|
|
|
|
|
isDirectDep = \case
|
|
|
|
|
(SOTableObj dtn _) -> qtn == dtn
|
|
|
|
|
_ -> False
|
|
|
|
|
|
|
|
|
|
runUntrackTableQ
|
|
|
|
|
:: (QErrM m, CacheRWM m, MonadTx m, UserInfoM m)
|
|
|
|
|
=> UntrackTable -> m EncJSON
|
|
|
|
|
runUntrackTableQ q = do
|
|
|
|
|
unTrackExistingTableOrViewP1 q
|
|
|
|
|
unTrackExistingTableOrViewP2 q
|
|
|
|
|
|
2019-03-01 12:17:22 +03:00
|
|
|
|
processTableChanges :: (MonadTx m, CacheRWM m)
|
2019-07-22 15:47:13 +03:00
|
|
|
|
=> TableInfo PGColInfo -> TableDiff -> m Bool
|
2018-06-27 16:11:32 +03:00
|
|
|
|
processTableChanges ti tableDiff = do
|
2019-03-01 12:17:22 +03:00
|
|
|
|
-- If table rename occurs then don't replace constraints and
|
|
|
|
|
-- process dropped/added columns, because schema reload happens eventually
|
2018-06-27 16:11:32 +03:00
|
|
|
|
sc <- askSchemaCache
|
2019-07-22 15:47:13 +03:00
|
|
|
|
let tn = _tiName ti
|
2019-03-01 12:17:22 +03:00
|
|
|
|
withOldTabName = do
|
|
|
|
|
replaceConstraints tn
|
|
|
|
|
procDroppedCols tn
|
|
|
|
|
procAddedCols tn
|
|
|
|
|
procAlteredCols sc tn
|
|
|
|
|
|
|
|
|
|
withNewTabName newTN = do
|
|
|
|
|
let tnGQL = GS.qualObjectToName newTN
|
|
|
|
|
defGCtx = scDefaultRemoteGCtx sc
|
|
|
|
|
-- check for GraphQL schema conflicts on new name
|
|
|
|
|
GS.checkConflictingNode defGCtx tnGQL
|
|
|
|
|
void $ procAlteredCols sc tn
|
|
|
|
|
-- update new table in catalog
|
|
|
|
|
renameTableInCatalog newTN tn
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
maybe withOldTabName withNewTabName mNewName
|
|
|
|
|
|
2018-06-27 16:11:32 +03:00
|
|
|
|
where
|
2019-01-03 06:58:12 +03:00
|
|
|
|
TableDiff mNewName droppedCols addedCols alteredCols _ constraints = tableDiff
|
2019-03-01 12:17:22 +03:00
|
|
|
|
replaceConstraints tn = flip modTableInCache tn $ \tInfo ->
|
2019-07-22 15:47:13 +03:00
|
|
|
|
return $ tInfo {_tiUniqOrPrimConstraints = constraints}
|
2019-03-01 12:17:22 +03:00
|
|
|
|
|
|
|
|
|
procDroppedCols tn =
|
|
|
|
|
forM_ droppedCols $ \droppedCol ->
|
|
|
|
|
-- Drop the column from the cache
|
|
|
|
|
delColFromCache droppedCol tn
|
|
|
|
|
|
|
|
|
|
procAddedCols tn =
|
|
|
|
|
-- In the newly added columns check that there is no conflict with relationships
|
2019-07-22 15:47:13 +03:00
|
|
|
|
forM_ addedCols $ \rawInfo@(PGRawColInfo colName _ _ _) ->
|
|
|
|
|
case M.lookup (fromPGCol colName) $ _tiFieldInfoMap ti of
|
2019-03-01 12:17:22 +03:00
|
|
|
|
Just (FIRelationship _) ->
|
|
|
|
|
throw400 AlreadyExists $ "cannot add column " <> colName
|
|
|
|
|
<<> " in table " <> tn <<>
|
|
|
|
|
" as a relationship with the name already exists"
|
2019-07-22 15:47:13 +03:00
|
|
|
|
_ -> do
|
|
|
|
|
info <- processColumnInfoUsingCache tn rawInfo
|
|
|
|
|
addColToCache colName info tn
|
2019-03-01 12:17:22 +03:00
|
|
|
|
|
2019-05-02 15:31:32 +03:00
|
|
|
|
procAlteredCols sc tn = fmap or $ forM alteredCols $
|
2019-07-22 15:47:13 +03:00
|
|
|
|
\( PGRawColInfo oldName oldType _ _
|
|
|
|
|
, newRawInfo@(PGRawColInfo newName newType _ _) ) -> do
|
|
|
|
|
let performColumnUpdate = do
|
|
|
|
|
newInfo <- processColumnInfoUsingCache tn newRawInfo
|
|
|
|
|
updColInCache newName newInfo tn
|
|
|
|
|
|
|
|
|
|
if | oldName /= newName -> renameColInCatalog oldName newName tn ti $> True
|
|
|
|
|
|
|
|
|
|
| oldType /= newType -> do
|
|
|
|
|
let colId = SOTableObj tn $ TOCol oldName
|
|
|
|
|
typeDepObjs = getDependentObjsWith (== DROnType) sc colId
|
|
|
|
|
|
|
|
|
|
unless (null typeDepObjs) $ throw400 DependencyError $
|
|
|
|
|
"cannot change type of column " <> oldName <<> " in table "
|
|
|
|
|
<> tn <<> " because of the following dependencies : " <>
|
|
|
|
|
reportSchemaObjs typeDepObjs
|
|
|
|
|
|
|
|
|
|
performColumnUpdate
|
|
|
|
|
|
|
|
|
|
-- If any dependent permissions found with the column whose type being altered is
|
|
|
|
|
-- provided with a session variable, then rebuild permission info and update the cache
|
|
|
|
|
let sessVarDepObjs = getDependentObjsWith (== DRSessionVariable) sc colId
|
|
|
|
|
forM_ sessVarDepObjs $ \case
|
|
|
|
|
SOTableObj qt (TOPerm rn pt) -> rebuildPermInfo qt rn pt
|
|
|
|
|
_ -> throw500 "unexpected schema dependency found for altering column type"
|
|
|
|
|
pure False
|
|
|
|
|
|
|
|
|
|
| otherwise -> performColumnUpdate $> False
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
2018-12-13 10:26:15 +03:00
|
|
|
|
delTableAndDirectDeps
|
|
|
|
|
:: (QErrM m, CacheRWM m, MonadTx m) => QualifiedTable -> m ()
|
2019-01-25 06:31:54 +03:00
|
|
|
|
delTableAndDirectDeps qtn@(QualifiedObject sn tn) = do
|
2018-11-16 15:40:23 +03:00
|
|
|
|
liftTx $ Q.catchE defaultTxErrorHandler $ do
|
|
|
|
|
Q.unitQ [Q.sql|
|
|
|
|
|
DELETE FROM "hdb_catalog"."hdb_relationship"
|
|
|
|
|
WHERE table_schema = $1 AND table_name = $2
|
|
|
|
|
|] (sn, tn) False
|
|
|
|
|
Q.unitQ [Q.sql|
|
|
|
|
|
DELETE FROM "hdb_catalog"."hdb_permission"
|
|
|
|
|
WHERE table_schema = $1 AND table_name = $2
|
|
|
|
|
|] (sn, tn) False
|
|
|
|
|
Q.unitQ [Q.sql|
|
|
|
|
|
DELETE FROM "hdb_catalog"."event_triggers"
|
|
|
|
|
WHERE schema_name = $1 AND table_name = $2
|
|
|
|
|
|] (sn, tn) False
|
|
|
|
|
delTableFromCatalog qtn
|
|
|
|
|
delTableFromCache qtn
|
|
|
|
|
|
2019-03-01 12:17:22 +03:00
|
|
|
|
processSchemaChanges :: (MonadTx m, CacheRWM m) => SchemaDiff -> m Bool
|
2018-06-27 16:11:32 +03:00
|
|
|
|
processSchemaChanges schemaDiff = do
|
|
|
|
|
-- Purge the dropped tables
|
2018-11-16 15:40:23 +03:00
|
|
|
|
mapM_ delTableAndDirectDeps droppedTables
|
2019-03-01 12:17:22 +03:00
|
|
|
|
|
2018-06-27 16:11:32 +03:00
|
|
|
|
sc <- askSchemaCache
|
2019-03-01 12:17:22 +03:00
|
|
|
|
fmap or $ forM alteredTables $ \(oldQtn, tableDiff) -> do
|
2018-06-27 16:11:32 +03:00
|
|
|
|
ti <- case M.lookup oldQtn $ scTables sc of
|
|
|
|
|
Just ti -> return ti
|
|
|
|
|
Nothing -> throw500 $ "old table metadata not found in cache : " <>> oldQtn
|
|
|
|
|
processTableChanges ti tableDiff
|
|
|
|
|
where
|
|
|
|
|
SchemaDiff droppedTables alteredTables = schemaDiff
|
|
|
|
|
|
2019-07-22 15:47:13 +03:00
|
|
|
|
-- | Builds an initial @'TableCache' 'PGColInfo'@ from catalog information. Does not fill in
|
|
|
|
|
-- '_tiRolePermInfoMap' or '_tiEventTriggerInfoMap' at all, and '_tiFieldInfoMap' only contains
|
|
|
|
|
-- columns, not relationships; those pieces of information are filled in by later stages.
|
|
|
|
|
buildTableCache
|
|
|
|
|
:: forall m. (MonadTx m, CacheRWM m)
|
|
|
|
|
=> [CatalogTable] -> m (TableCache PGColInfo)
|
|
|
|
|
buildTableCache = processTableCache <=< buildRawTableCache
|
2018-06-27 16:11:32 +03:00
|
|
|
|
where
|
2019-07-22 15:47:13 +03:00
|
|
|
|
withTable name = withSchemaObject $
|
|
|
|
|
InconsistentMetadataObj (MOTable name) MOTTable (toJSON name)
|
|
|
|
|
|
|
|
|
|
-- Step 1: Build the raw table cache from metadata information.
|
|
|
|
|
buildRawTableCache :: [CatalogTable] -> m (TableCache PGRawColInfo)
|
|
|
|
|
buildRawTableCache catalogTables = fmap (M.fromList . catMaybes) . for catalogTables $
|
|
|
|
|
\(CatalogTable name isSystemDefined isEnum maybeInfo) -> withTable name $ do
|
|
|
|
|
catalogInfo <- onNothing maybeInfo $
|
|
|
|
|
throw400 NotExists $ "no such table/view exists in postgres: " <>> name
|
|
|
|
|
|
|
|
|
|
let CatalogTableInfo columns constraints primaryKeyColumnNames viewInfo = catalogInfo
|
|
|
|
|
columnFields = M.fromList . flip map columns $ \column ->
|
|
|
|
|
(fromPGCol $ prciName column, FIColumn column)
|
|
|
|
|
|
|
|
|
|
primaryKeyColumns = flip filter columns $ \column ->
|
|
|
|
|
prciName column `elem` primaryKeyColumnNames
|
|
|
|
|
fetchEnumValues = fetchAndValidateEnumValues name primaryKeyColumns columns
|
|
|
|
|
|
|
|
|
|
maybeEnumValues <- if isEnum then Just <$> fetchEnumValues else pure Nothing
|
|
|
|
|
|
|
|
|
|
let info = TableInfo
|
|
|
|
|
{ _tiName = name
|
|
|
|
|
, _tiSystemDefined = isSystemDefined
|
|
|
|
|
, _tiFieldInfoMap = columnFields
|
|
|
|
|
, _tiRolePermInfoMap = mempty
|
|
|
|
|
, _tiUniqOrPrimConstraints = constraints
|
|
|
|
|
, _tiPrimaryKeyCols = primaryKeyColumnNames
|
|
|
|
|
, _tiViewInfo = viewInfo
|
|
|
|
|
, _tiEventTriggerInfoMap = mempty
|
|
|
|
|
, _tiEnumValues = maybeEnumValues }
|
|
|
|
|
pure (name, info)
|
|
|
|
|
|
|
|
|
|
-- Step 2: Process the raw table cache to replace Postgres column types with logical column
|
|
|
|
|
-- types.
|
|
|
|
|
processTableCache :: TableCache PGRawColInfo -> m (TableCache PGColInfo)
|
|
|
|
|
processTableCache rawTables = fmap (M.mapMaybe id) . for rawTables $ \rawInfo -> do
|
|
|
|
|
let tableName = _tiName rawInfo
|
|
|
|
|
withTable tableName $ rawInfo
|
|
|
|
|
& tiFieldInfoMap.traverse._FIColumn %%~ processColumnInfo enumTables tableName
|
|
|
|
|
where
|
|
|
|
|
enumTables = M.mapMaybe _tiEnumValues rawTables
|
|
|
|
|
|
|
|
|
|
-- | “Processes” a 'PGRawColInfo' into a 'PGColInfo' by resolving its type using a map of known
|
|
|
|
|
-- enum tables.
|
|
|
|
|
processColumnInfo
|
|
|
|
|
:: (QErrM m)
|
|
|
|
|
=> M.HashMap QualifiedTable EnumValues -- ^ known enum tables
|
|
|
|
|
-> QualifiedTable -- ^ the table this column belongs to
|
|
|
|
|
-> PGRawColInfo -- ^ the column’s raw information
|
|
|
|
|
-> m PGColInfo
|
|
|
|
|
processColumnInfo enumTables tableName rawInfo = do
|
|
|
|
|
resolvedType <- resolveColumnType
|
|
|
|
|
pure PGColInfo
|
|
|
|
|
{ pgiName = prciName rawInfo
|
|
|
|
|
, pgiType = resolvedType
|
|
|
|
|
, pgiIsNullable = prciIsNullable rawInfo }
|
|
|
|
|
where
|
|
|
|
|
resolveColumnType =
|
|
|
|
|
case prciReferences rawInfo of
|
|
|
|
|
-- no referenced tables? definitely not an enum
|
|
|
|
|
[] -> pure $ PGColumnScalar (prciType rawInfo)
|
|
|
|
|
|
|
|
|
|
-- one referenced table? might be an enum, so check if the referenced table is an enum
|
|
|
|
|
[referencedTableName] -> pure $ M.lookup referencedTableName enumTables & maybe
|
|
|
|
|
(PGColumnScalar $ prciType rawInfo)
|
|
|
|
|
(PGColumnEnumReference . EnumReference referencedTableName)
|
|
|
|
|
|
|
|
|
|
-- multiple referenced tables? we could check if any of them are enums, but the schema is
|
|
|
|
|
-- strange, so let’s just reject it
|
|
|
|
|
referencedTables -> throw400 ConstraintViolation
|
|
|
|
|
$ "cannot handle exotic schema: column " <> prciName rawInfo <<> " in table "
|
|
|
|
|
<> tableName <<> " references multiple foreign tables ("
|
|
|
|
|
<> T.intercalate ", " (map dquote referencedTables) <> ")?"
|
|
|
|
|
|
|
|
|
|
-- | Like 'processColumnInfo', but uses the information in the current schema cache to resolve a
|
|
|
|
|
-- column’s type.
|
|
|
|
|
processColumnInfoUsingCache :: (CacheRM m, QErrM m) => QualifiedTable -> PGRawColInfo -> m PGColInfo
|
|
|
|
|
processColumnInfoUsingCache tableName rawInfo = do
|
|
|
|
|
tables <- scTables <$> askSchemaCache
|
|
|
|
|
processColumnInfo (M.mapMaybe _tiEnumValues tables) tableName rawInfo
|
|
|
|
|
|
|
|
|
|
withSchemaObject
|
2019-04-17 19:29:39 +03:00
|
|
|
|
:: (QErrM m, CacheRWM m)
|
|
|
|
|
=> (T.Text -> InconsistentMetadataObj)
|
2019-07-22 15:47:13 +03:00
|
|
|
|
-> m a
|
|
|
|
|
-> m (Maybe a)
|
|
|
|
|
withSchemaObject f action =
|
|
|
|
|
(Just <$> action) `catchError` \err -> do
|
2019-04-17 19:29:39 +03:00
|
|
|
|
sc <- askSchemaCache
|
|
|
|
|
let inconsObj = f $ qeError err
|
|
|
|
|
allInconsObjs = inconsObj:scInconsistentObjs sc
|
2019-07-22 15:47:13 +03:00
|
|
|
|
writeSchemaCache sc { scInconsistentObjs = allInconsObjs }
|
|
|
|
|
pure Nothing
|
|
|
|
|
|
|
|
|
|
withSchemaObject_ :: (QErrM m, CacheRWM m) => (T.Text -> InconsistentMetadataObj) -> m () -> m ()
|
|
|
|
|
withSchemaObject_ f = void . withSchemaObject f
|
2019-04-17 19:29:39 +03:00
|
|
|
|
|
|
|
|
|
checkNewInconsistentMeta
|
|
|
|
|
:: (QErrM m)
|
|
|
|
|
=> SchemaCache -- old schema cache
|
|
|
|
|
-> SchemaCache -- new schema cache
|
|
|
|
|
-> m ()
|
2019-05-08 10:36:43 +03:00
|
|
|
|
checkNewInconsistentMeta oldSC newSC =
|
2019-04-17 19:29:39 +03:00
|
|
|
|
unless (null newInconsMetaObjects) $ do
|
|
|
|
|
let err = err500 Unexpected
|
|
|
|
|
"cannot continue due to newly found inconsistent metadata"
|
|
|
|
|
throwError err{qeInternal = Just $ toJSON newInconsMetaObjects}
|
|
|
|
|
where
|
|
|
|
|
oldInconsMeta = scInconsistentObjs oldSC
|
|
|
|
|
newInconsMeta = scInconsistentObjs newSC
|
|
|
|
|
newInconsMetaObjects = getDifference _moId newInconsMeta oldInconsMeta
|
|
|
|
|
|
|
|
|
|
buildSchemaCacheStrict
|
|
|
|
|
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
|
|
|
|
=> m ()
|
|
|
|
|
buildSchemaCacheStrict = do
|
|
|
|
|
buildSchemaCache
|
|
|
|
|
sc <- askSchemaCache
|
|
|
|
|
let inconsObjs = scInconsistentObjs sc
|
|
|
|
|
unless (null inconsObjs) $ do
|
|
|
|
|
let err = err400 Unexpected "cannot continue due to inconsistent metadata"
|
|
|
|
|
throwError err{qeInternal = Just $ toJSON inconsObjs}
|
|
|
|
|
|
2019-07-22 15:47:13 +03:00
|
|
|
|
-- | Rebuilds the schema cache. If an object with the given object id became newly inconsistent,
|
|
|
|
|
-- raises an error about it specifically. Otherwise, raises a generic metadata inconsistency error.
|
|
|
|
|
buildSchemaCacheFor
|
|
|
|
|
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
|
|
|
|
=> MetadataObjId -> m ()
|
|
|
|
|
buildSchemaCacheFor objectId = do
|
|
|
|
|
oldSchemaCache <- askSchemaCache
|
|
|
|
|
buildSchemaCache
|
|
|
|
|
newSchemaCache <- askSchemaCache
|
|
|
|
|
|
|
|
|
|
let diffInconsistentObjects = getDifference _moId `on` scInconsistentObjs
|
|
|
|
|
newInconsistentObjects = newSchemaCache `diffInconsistentObjects` oldSchemaCache
|
|
|
|
|
|
|
|
|
|
for_ (find ((== objectId) . _moId) newInconsistentObjects) $ \matchingObject ->
|
|
|
|
|
throw400 ConstraintViolation (_moReason matchingObject)
|
|
|
|
|
|
|
|
|
|
unless (null newInconsistentObjects) $
|
|
|
|
|
throwError (err400 Unexpected "cannot continue due to new inconsistent metadata")
|
|
|
|
|
{ qeInternal = Just $ toJSON newInconsistentObjects }
|
|
|
|
|
|
2018-12-13 10:26:15 +03:00
|
|
|
|
buildSchemaCache
|
2019-03-01 14:45:04 +03:00
|
|
|
|
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
2018-12-13 10:26:15 +03:00
|
|
|
|
=> m ()
|
2019-05-03 13:42:26 +03:00
|
|
|
|
buildSchemaCache = buildSchemaCacheG True
|
|
|
|
|
|
|
|
|
|
buildSCWithoutSetup
|
|
|
|
|
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
|
|
|
|
=> m ()
|
|
|
|
|
buildSCWithoutSetup = buildSchemaCacheG False
|
|
|
|
|
|
|
|
|
|
buildSchemaCacheG
|
|
|
|
|
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
|
|
|
|
=> Bool -> m ()
|
|
|
|
|
buildSchemaCacheG withSetup = do
|
2019-01-29 13:09:58 +03:00
|
|
|
|
-- clean hdb_views
|
2019-05-03 13:42:26 +03:00
|
|
|
|
when withSetup $ liftTx $ Q.catchE defaultTxErrorHandler clearHdbViews
|
2018-12-13 10:26:15 +03:00
|
|
|
|
-- reset the current schemacache
|
|
|
|
|
writeSchemaCache emptySchemaCache
|
2019-04-17 19:29:39 +03:00
|
|
|
|
sqlGenCtx <- askSQLGenCtx
|
2019-05-08 10:36:43 +03:00
|
|
|
|
|
|
|
|
|
-- fetch all catalog metadata
|
2019-07-23 22:11:34 +03:00
|
|
|
|
CatalogMetadata tables relationships permissions
|
2019-05-16 09:13:25 +03:00
|
|
|
|
eventTriggers remoteSchemas functions fkeys' allowlistDefs
|
|
|
|
|
<- liftTx fetchCatalogData
|
2019-05-08 10:36:43 +03:00
|
|
|
|
|
|
|
|
|
let fkeys = HS.fromList fkeys'
|
|
|
|
|
|
|
|
|
|
-- tables
|
2019-07-22 15:47:13 +03:00
|
|
|
|
modTableCache =<< buildTableCache tables
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
2019-05-08 10:36:43 +03:00
|
|
|
|
-- relationships
|
|
|
|
|
forM_ relationships $ \(CatalogRelation qt rn rt rDef cmnt) -> do
|
|
|
|
|
let objId = MOTableObj qt $ MTORel rn rt
|
2019-04-17 19:29:39 +03:00
|
|
|
|
def = toJSON $ WithTable qt $ RelDef rn rDef cmnt
|
|
|
|
|
mkInconsObj = InconsistentMetadataObj objId (MOTRel rt) def
|
2019-05-08 10:36:43 +03:00
|
|
|
|
modifyErr (\e -> "table " <> qt <<> "; rel " <> rn <<> "; " <> e) $
|
2019-07-22 15:47:13 +03:00
|
|
|
|
withSchemaObject_ mkInconsObj $
|
2019-04-17 19:29:39 +03:00
|
|
|
|
case rt of
|
|
|
|
|
ObjRel -> do
|
|
|
|
|
using <- decodeValue rDef
|
|
|
|
|
let relDef = RelDef rn using Nothing
|
|
|
|
|
validateObjRel qt relDef
|
2019-05-08 10:36:43 +03:00
|
|
|
|
objRelP2Setup qt fkeys relDef
|
2019-04-17 19:29:39 +03:00
|
|
|
|
ArrRel -> do
|
|
|
|
|
using <- decodeValue rDef
|
|
|
|
|
let relDef = RelDef rn using Nothing
|
|
|
|
|
validateArrRel qt relDef
|
2019-05-08 10:36:43 +03:00
|
|
|
|
arrRelP2Setup qt fkeys relDef
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
2019-05-08 10:36:43 +03:00
|
|
|
|
-- permissions
|
|
|
|
|
forM_ permissions $ \(CatalogPermission qt rn pt pDef cmnt) -> do
|
|
|
|
|
let objId = MOTableObj qt $ MTOPerm rn pt
|
2019-04-17 19:29:39 +03:00
|
|
|
|
def = toJSON $ WithTable qt $ PermDef rn pDef cmnt
|
|
|
|
|
mkInconsObj = InconsistentMetadataObj objId (MOTPerm pt) def
|
2019-05-08 10:36:43 +03:00
|
|
|
|
modifyErr (\e -> "table " <> qt <<> "; role " <> rn <<> "; " <> e) $
|
2019-07-22 15:47:13 +03:00
|
|
|
|
withSchemaObject_ mkInconsObj $
|
2019-04-17 19:29:39 +03:00
|
|
|
|
case pt of
|
2019-05-08 10:36:43 +03:00
|
|
|
|
PTInsert -> permHelper withSetup sqlGenCtx qt rn pDef PAInsert
|
|
|
|
|
PTSelect -> permHelper withSetup sqlGenCtx qt rn pDef PASelect
|
|
|
|
|
PTUpdate -> permHelper withSetup sqlGenCtx qt rn pDef PAUpdate
|
|
|
|
|
PTDelete -> permHelper withSetup sqlGenCtx qt rn pDef PADelete
|
|
|
|
|
|
|
|
|
|
-- event triggers
|
|
|
|
|
forM_ eventTriggers $ \(CatalogEventTrigger qt trn configuration) -> do
|
|
|
|
|
let objId = MOTableObj qt $ MTOTrigger trn
|
2019-04-17 19:29:39 +03:00
|
|
|
|
def = object ["table" .= qt, "configuration" .= configuration]
|
|
|
|
|
mkInconsObj = InconsistentMetadataObj objId MOTEventTrigger def
|
2019-07-22 15:47:13 +03:00
|
|
|
|
withSchemaObject_ mkInconsObj $ do
|
2019-04-17 19:29:39 +03:00
|
|
|
|
etc <- decodeValue configuration
|
|
|
|
|
subTableP2Setup qt etc
|
2019-07-22 15:47:13 +03:00
|
|
|
|
allCols <- getCols . _tiFieldInfoMap <$> askTabInfo qt
|
2019-05-03 13:42:26 +03:00
|
|
|
|
when withSetup $ liftTx $
|
2019-05-13 12:41:07 +03:00
|
|
|
|
mkAllTriggersQ trn qt allCols (stringifyNum sqlGenCtx) (etcDefinition etc)
|
2018-11-23 16:02:46 +03:00
|
|
|
|
|
2019-05-08 10:36:43 +03:00
|
|
|
|
-- sql functions
|
|
|
|
|
forM_ functions $ \(CatalogFunction qf rawfiM) -> do
|
|
|
|
|
let def = toJSON $ TrackFunction qf
|
2019-04-17 19:29:39 +03:00
|
|
|
|
mkInconsObj =
|
|
|
|
|
InconsistentMetadataObj (MOFunction qf) MOTFunction def
|
2019-05-08 10:36:43 +03:00
|
|
|
|
modifyErr (\e -> "function " <> qf <<> "; " <> e) $
|
2019-07-22 15:47:13 +03:00
|
|
|
|
withSchemaObject_ mkInconsObj $ do
|
2019-05-08 10:36:43 +03:00
|
|
|
|
rawfi <- onNothing rawfiM $
|
|
|
|
|
throw400 NotExists $ "no such function exists in postgres : " <>> qf
|
|
|
|
|
trackFunctionP2Setup qf rawfi
|
2019-01-25 06:31:54 +03:00
|
|
|
|
|
2019-05-16 09:13:25 +03:00
|
|
|
|
-- allow list
|
|
|
|
|
replaceAllowlist $ concatMap _cdQueries allowlistDefs
|
|
|
|
|
|
2019-07-08 08:51:41 +03:00
|
|
|
|
-- build GraphQL context with tables and functions
|
|
|
|
|
GS.buildGCtxMapPG
|
2018-11-23 16:02:46 +03:00
|
|
|
|
|
2019-04-17 19:29:39 +03:00
|
|
|
|
-- remote schemas
|
2019-07-08 08:51:41 +03:00
|
|
|
|
forM_ remoteSchemas resolveSingleRemoteSchema
|
2018-11-23 16:02:46 +03:00
|
|
|
|
|
2018-06-27 16:11:32 +03:00
|
|
|
|
where
|
2019-05-08 10:36:43 +03:00
|
|
|
|
permHelper setup sqlGenCtx qt rn pDef pa = do
|
2019-04-17 19:29:39 +03:00
|
|
|
|
qCtx <- mkAdminQCtx sqlGenCtx <$> askSchemaCache
|
2018-06-27 16:11:32 +03:00
|
|
|
|
perm <- decodeValue pDef
|
2019-05-08 10:36:43 +03:00
|
|
|
|
let permDef = PermDef rn perm Nothing
|
2018-06-27 16:11:32 +03:00
|
|
|
|
createPerm = WithTable qt permDef
|
2018-12-13 10:26:15 +03:00
|
|
|
|
(permInfo, deps) <- liftP1WithQCtx qCtx $ createPermP1 createPerm
|
2019-05-03 13:42:26 +03:00
|
|
|
|
when setup $ addPermP2Setup qt permDef permInfo
|
2018-11-16 15:40:23 +03:00
|
|
|
|
addPermToCache qt rn pa permInfo deps
|
2018-06-27 16:11:32 +03:00
|
|
|
|
-- p2F qt rn p1Res
|
|
|
|
|
|
2019-07-08 08:51:41 +03:00
|
|
|
|
resolveSingleRemoteSchema rs = do
|
|
|
|
|
let AddRemoteSchemaQuery name _ _ = rs
|
2019-04-17 19:29:39 +03:00
|
|
|
|
mkInconsObj = InconsistentMetadataObj (MORemoteSchema name)
|
|
|
|
|
MOTRemoteSchema (toJSON rs)
|
2019-07-22 15:47:13 +03:00
|
|
|
|
withSchemaObject_ mkInconsObj $ do
|
2019-07-08 08:51:41 +03:00
|
|
|
|
rsCtx <- addRemoteSchemaP2Setup rs
|
2019-04-17 19:29:39 +03:00
|
|
|
|
sc <- askSchemaCache
|
|
|
|
|
let gCtxMap = scGCtxMap sc
|
|
|
|
|
defGCtx = scDefaultRemoteGCtx sc
|
2019-07-08 08:51:41 +03:00
|
|
|
|
rGCtx = convRemoteGCtx $ rscGCtx rsCtx
|
2019-04-17 19:29:39 +03:00
|
|
|
|
mergedGCtxMap <- mergeRemoteSchema gCtxMap rGCtx
|
|
|
|
|
mergedDefGCtx <- mergeGCtx defGCtx rGCtx
|
|
|
|
|
writeSchemaCache sc { scGCtxMap = mergedGCtxMap
|
|
|
|
|
, scDefaultRemoteGCtx = mergedDefGCtx
|
|
|
|
|
}
|
|
|
|
|
|
2018-06-27 16:11:32 +03:00
|
|
|
|
data RunSQL
|
|
|
|
|
= RunSQL
|
2019-01-18 13:45:59 +03:00
|
|
|
|
{ rSql :: T.Text
|
|
|
|
|
, rCascade :: !(Maybe Bool)
|
|
|
|
|
, rCheckMetadataConsistency :: !(Maybe Bool)
|
2018-06-27 16:11:32 +03:00
|
|
|
|
} deriving (Show, Eq, Lift)
|
|
|
|
|
|
|
|
|
|
$(deriveJSON (aesonDrop 1 snakeCase){omitNothingFields=True} ''RunSQL)
|
|
|
|
|
|
|
|
|
|
data RunSQLRes
|
|
|
|
|
= RunSQLRes
|
|
|
|
|
{ rrResultType :: !T.Text
|
|
|
|
|
, rrResult :: !Value
|
|
|
|
|
} deriving (Show, Eq)
|
|
|
|
|
|
|
|
|
|
$(deriveJSON (aesonDrop 2 snakeCase){omitNothingFields=True} ''RunSQLRes)
|
|
|
|
|
|
2019-03-18 19:22:21 +03:00
|
|
|
|
instance Q.FromRes RunSQLRes where
|
|
|
|
|
fromRes (Q.ResultOkEmpty _) =
|
|
|
|
|
return $ RunSQLRes "CommandOk" Null
|
|
|
|
|
fromRes (Q.ResultOkData res) = do
|
|
|
|
|
csvRows <- resToCSV res
|
|
|
|
|
return $ RunSQLRes "TuplesOk" $ toJSON csvRows
|
|
|
|
|
|
|
|
|
|
execRawSQL :: (MonadTx m) => T.Text -> m EncJSON
|
2019-01-18 13:45:59 +03:00
|
|
|
|
execRawSQL =
|
2019-03-18 19:22:21 +03:00
|
|
|
|
fmap (encJFromJValue @RunSQLRes) .
|
2019-01-18 13:45:59 +03:00
|
|
|
|
liftTx . Q.multiQE rawSqlErrHandler . Q.fromText
|
|
|
|
|
where
|
|
|
|
|
rawSqlErrHandler txe =
|
|
|
|
|
let e = err400 PostgresError "query execution failed"
|
|
|
|
|
in e {qeInternal = Just $ toJSON txe}
|
|
|
|
|
|
|
|
|
|
execWithMDCheck
|
2019-03-01 14:45:04 +03:00
|
|
|
|
:: (QErrM m, CacheRWM m, MonadTx m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
2019-03-18 19:22:21 +03:00
|
|
|
|
=> RunSQL -> m EncJSON
|
2019-01-18 13:45:59 +03:00
|
|
|
|
execWithMDCheck (RunSQL t cascade _) = do
|
2018-06-27 16:11:32 +03:00
|
|
|
|
-- Drop hdb_views so no interference is caused to the sql query
|
2018-09-05 14:26:46 +03:00
|
|
|
|
liftTx $ Q.catchE defaultTxErrorHandler clearHdbViews
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
|
|
|
|
-- Get the metadata before the sql query, everything, need to filter this
|
|
|
|
|
oldMetaU <- liftTx $ Q.catchE defaultTxErrorHandler fetchTableMeta
|
2019-01-25 06:31:54 +03:00
|
|
|
|
oldFuncMetaU <-
|
|
|
|
|
liftTx $ Q.catchE defaultTxErrorHandler fetchFunctionMeta
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
|
|
|
|
-- Run the SQL
|
2019-01-18 13:45:59 +03:00
|
|
|
|
res <- execRawSQL t
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
|
|
|
|
-- Get the metadata after the sql query
|
|
|
|
|
newMeta <- liftTx $ Q.catchE defaultTxErrorHandler fetchTableMeta
|
2019-01-25 06:31:54 +03:00
|
|
|
|
newFuncMeta <- liftTx $ Q.catchE defaultTxErrorHandler fetchFunctionMeta
|
2018-06-27 16:11:32 +03:00
|
|
|
|
sc <- askSchemaCache
|
|
|
|
|
let existingTables = M.keys $ scTables sc
|
|
|
|
|
oldMeta = flip filter oldMetaU $ \tm -> tmTable tm `elem` existingTables
|
|
|
|
|
schemaDiff = getSchemaDiff oldMeta newMeta
|
2019-01-25 06:31:54 +03:00
|
|
|
|
existingFuncs = M.keys $ scFunctions sc
|
2019-02-05 08:57:03 +03:00
|
|
|
|
oldFuncMeta = flip filter oldFuncMetaU $ \fm -> funcFromMeta fm `elem` existingFuncs
|
|
|
|
|
FunctionDiff droppedFuncs alteredFuncs = getFuncDiff oldFuncMeta newFuncMeta
|
2019-02-14 07:05:18 +03:00
|
|
|
|
overloadedFuncs = getOverloadedFuncs existingFuncs newFuncMeta
|
|
|
|
|
|
|
|
|
|
-- Do not allow overloading functions
|
|
|
|
|
unless (null overloadedFuncs) $
|
|
|
|
|
throw400 NotSupported $ "the following tracked function(s) cannot be overloaded: "
|
|
|
|
|
<> reportFuncs overloadedFuncs
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
|
|
|
|
indirectDeps <- getSchemaChangeDeps schemaDiff
|
|
|
|
|
|
|
|
|
|
-- Report back with an error if cascade is not set
|
|
|
|
|
when (indirectDeps /= [] && not (or cascade)) $ reportDepsExt indirectDeps []
|
|
|
|
|
|
|
|
|
|
-- Purge all the indirect dependents from state
|
|
|
|
|
mapM_ purgeDep indirectDeps
|
|
|
|
|
|
2019-01-25 06:31:54 +03:00
|
|
|
|
-- Purge all dropped functions
|
|
|
|
|
let purgedFuncs = flip mapMaybe indirectDeps $ \dep ->
|
|
|
|
|
case dep of
|
|
|
|
|
SOFunction qf -> Just qf
|
|
|
|
|
_ -> Nothing
|
|
|
|
|
|
|
|
|
|
forM_ (droppedFuncs \\ purgedFuncs) $ \qf -> do
|
|
|
|
|
liftTx $ delFunctionFromCatalog qf
|
|
|
|
|
delFunctionFromCache qf
|
|
|
|
|
|
2019-02-05 08:57:03 +03:00
|
|
|
|
-- Process altered functions
|
|
|
|
|
forM_ alteredFuncs $ \(qf, newTy) ->
|
|
|
|
|
when (newTy == FTVOLATILE) $
|
|
|
|
|
throw400 NotSupported $
|
|
|
|
|
"type of function " <> qf <<> " is altered to \"VOLATILE\" which is not supported now"
|
|
|
|
|
|
2019-03-01 12:17:22 +03:00
|
|
|
|
-- update the schema cache and hdb_catalog with the changes
|
|
|
|
|
reloadRequired <- processSchemaChanges schemaDiff
|
|
|
|
|
|
2019-04-17 19:29:39 +03:00
|
|
|
|
let withReload = do -- in case of any rename
|
|
|
|
|
buildSchemaCache
|
|
|
|
|
newSC <- askSchemaCache
|
|
|
|
|
checkNewInconsistentMeta sc newSC
|
|
|
|
|
|
2019-03-01 12:17:22 +03:00
|
|
|
|
withoutReload = do
|
|
|
|
|
postSc <- askSchemaCache
|
|
|
|
|
-- recreate the insert permission infra
|
|
|
|
|
forM_ (M.elems $ scTables postSc) $ \ti -> do
|
2019-07-22 15:47:13 +03:00
|
|
|
|
let tn = _tiName ti
|
|
|
|
|
forM_ (M.elems $ _tiRolePermInfoMap ti) $ \rpi ->
|
2019-03-01 12:17:22 +03:00
|
|
|
|
maybe (return ()) (liftTx . buildInsInfra tn) $ _permIns rpi
|
|
|
|
|
|
2019-03-01 14:45:04 +03:00
|
|
|
|
strfyNum <- stringifyNum <$> askSQLGenCtx
|
2019-03-01 12:17:22 +03:00
|
|
|
|
--recreate triggers
|
|
|
|
|
forM_ (M.elems $ scTables postSc) $ \ti -> do
|
2019-07-22 15:47:13 +03:00
|
|
|
|
let tn = _tiName ti
|
|
|
|
|
cols = getCols $ _tiFieldInfoMap ti
|
|
|
|
|
forM_ (M.toList $ _tiEventTriggerInfoMap ti) $ \(trn, eti) -> do
|
2019-05-13 12:41:07 +03:00
|
|
|
|
let fullspec = etiOpsDef eti
|
|
|
|
|
liftTx $ mkAllTriggersQ trn tn cols strfyNum fullspec
|
2019-03-01 12:17:22 +03:00
|
|
|
|
|
|
|
|
|
bool withoutReload withReload reloadRequired
|
2018-09-05 14:26:46 +03:00
|
|
|
|
|
2019-01-18 13:45:59 +03:00
|
|
|
|
return res
|
2019-02-14 07:05:18 +03:00
|
|
|
|
where
|
|
|
|
|
reportFuncs = T.intercalate ", " . map dquoteTxt
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
2019-01-18 13:45:59 +03:00
|
|
|
|
isAltrDropReplace :: QErrM m => T.Text -> m Bool
|
|
|
|
|
isAltrDropReplace = either throwErr return . matchRegex regex False
|
2018-06-27 16:11:32 +03:00
|
|
|
|
where
|
2019-01-18 13:45:59 +03:00
|
|
|
|
throwErr s = throw500 $ "compiling regex failed: " <> T.pack s
|
2019-02-14 07:05:18 +03:00
|
|
|
|
regex = "alter|drop|replace|create function"
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
2018-12-13 10:26:15 +03:00
|
|
|
|
runRunSQL
|
2019-03-01 14:45:04 +03:00
|
|
|
|
:: (QErrM m, UserInfoM m, CacheRWM m, MonadTx m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
2019-03-18 19:22:21 +03:00
|
|
|
|
=> RunSQL -> m EncJSON
|
2019-01-18 13:45:59 +03:00
|
|
|
|
runRunSQL q@(RunSQL t _ mChkMDCnstcy) = do
|
|
|
|
|
adminOnly
|
|
|
|
|
isMDChkNeeded <- maybe (isAltrDropReplace t) return mChkMDCnstcy
|
2019-03-18 19:22:21 +03:00
|
|
|
|
bool (execRawSQL t) (execWithMDCheck q) isMDChkNeeded
|
2018-06-27 16:11:32 +03:00
|
|
|
|
|
|
|
|
|
-- Should be used only after checking the status
|
|
|
|
|
resToCSV :: PQ.Result -> ExceptT T.Text IO [[T.Text]]
|
|
|
|
|
resToCSV r = do
|
|
|
|
|
nr <- liftIO $ PQ.ntuples r
|
|
|
|
|
nc <- liftIO $ PQ.nfields r
|
|
|
|
|
|
|
|
|
|
hdr <- forM [0..pred nc] $ \ic -> do
|
|
|
|
|
colNameBS <- liftIO $ PQ.fname r ic
|
|
|
|
|
maybe (return "unknown") decodeBS colNameBS
|
|
|
|
|
|
|
|
|
|
rows <- forM [0..pred nr] $ \ir ->
|
|
|
|
|
forM [0..pred nc] $ \ic -> do
|
|
|
|
|
cellValBS <- liftIO $ PQ.getvalue r ir ic
|
|
|
|
|
maybe (return "NULL") decodeBS cellValBS
|
|
|
|
|
|
|
|
|
|
return $ hdr:rows
|
|
|
|
|
|
|
|
|
|
where
|
|
|
|
|
decodeBS = either (throwError . T.pack . show) return . TE.decodeUtf8'
|