2019-08-14 02:34:37 +03:00
|
|
|
|
{-| This module (along with the various @Hasura.RQL.DDL.Schema.*@ modules) provides operations to
|
|
|
|
|
load and modify the Hasura catalog and schema cache.
|
|
|
|
|
|
|
|
|
|
* The /catalog/ refers to the set of PostgreSQL tables and views that store all schema information
|
|
|
|
|
known by Hasura. This includes any tracked Postgres tables, views, and functions, all remote
|
|
|
|
|
schemas, and any additionaly Hasura-specific information such as permissions and relationships.
|
|
|
|
|
|
|
|
|
|
Primitive functions for loading and modifying the catalog are defined in
|
|
|
|
|
"Hasura.RQL.DDL.Schema.Catalog", but most uses are wrapped by other functions to synchronize
|
|
|
|
|
catalog information with the information in the schema cache.
|
|
|
|
|
|
|
|
|
|
* The /schema cache/ is a process-global value of type 'SchemaCache' that stores an in-memory
|
|
|
|
|
representation of the data stored in the catalog. The in-memory representation is not identical
|
|
|
|
|
to the data in the catalog, since it has some post-processing applied to it in order to make it
|
|
|
|
|
easier to consume for other parts of the system, such as GraphQL schema generation. For example,
|
2020-10-22 23:42:27 +03:00
|
|
|
|
although column information is represented by 'RawColumnInfo', the schema cache contains
|
|
|
|
|
“processed” 'ColumnInfo' values, instead.
|
2019-08-14 02:34:37 +03:00
|
|
|
|
|
|
|
|
|
Ultimately, the catalog is the source of truth for all information contained in the schema
|
|
|
|
|
cache, but to avoid rebuilding the entire schema cache on every change to the catalog, various
|
|
|
|
|
functions incrementally update the cache when they modify the catalog.
|
|
|
|
|
-}
|
2019-11-15 03:20:18 +03:00
|
|
|
|
|
|
|
|
|
{-# LANGUAGE RecordWildCards #-}
|
|
|
|
|
|
2019-08-14 02:34:37 +03:00
|
|
|
|
module Hasura.RQL.DDL.Schema
|
|
|
|
|
( module Hasura.RQL.DDL.Schema.Cache
|
|
|
|
|
, module Hasura.RQL.DDL.Schema.Catalog
|
|
|
|
|
, module Hasura.RQL.DDL.Schema.Function
|
|
|
|
|
, module Hasura.RQL.DDL.Schema.Rename
|
|
|
|
|
, module Hasura.RQL.DDL.Schema.Table
|
|
|
|
|
|
|
|
|
|
, RunSQL(..)
|
|
|
|
|
, runRunSQL
|
2020-04-23 00:03:23 +03:00
|
|
|
|
, isSchemaCacheBuildRequiredRunSQL
|
2019-08-14 02:34:37 +03:00
|
|
|
|
) where
|
|
|
|
|
|
|
|
|
|
import Hasura.Prelude
|
|
|
|
|
|
|
|
|
|
import qualified Data.Text.Encoding as TE
|
|
|
|
|
import qualified Database.PG.Query as Q
|
|
|
|
|
import qualified Database.PostgreSQL.LibPQ as PQ
|
2020-04-02 02:22:55 +03:00
|
|
|
|
import qualified Text.Regex.TDFA as TDFA
|
2019-08-14 02:34:37 +03:00
|
|
|
|
|
2020-12-28 15:56:00 +03:00
|
|
|
|
import Control.Monad.Trans.Control (MonadBaseControl)
|
2019-08-14 02:34:37 +03:00
|
|
|
|
import Data.Aeson
|
|
|
|
|
import Data.Aeson.Casing
|
|
|
|
|
import Data.Aeson.TH
|
|
|
|
|
|
|
|
|
|
import Hasura.EncJSON
|
|
|
|
|
import Hasura.RQL.DDL.Schema.Cache
|
|
|
|
|
import Hasura.RQL.DDL.Schema.Catalog
|
|
|
|
|
import Hasura.RQL.DDL.Schema.Function
|
|
|
|
|
import Hasura.RQL.DDL.Schema.Rename
|
|
|
|
|
import Hasura.RQL.DDL.Schema.Table
|
|
|
|
|
import Hasura.RQL.Instances ()
|
|
|
|
|
import Hasura.RQL.Types
|
2020-04-02 02:22:55 +03:00
|
|
|
|
import Hasura.Server.Utils (quoteRegex)
|
2019-08-14 02:34:37 +03:00
|
|
|
|
|
|
|
|
|
data RunSQL
|
|
|
|
|
= RunSQL
|
2019-11-27 01:49:42 +03:00
|
|
|
|
{ rSql :: Text
|
|
|
|
|
, rCascade :: !Bool
|
|
|
|
|
, rCheckMetadataConsistency :: !(Maybe Bool)
|
|
|
|
|
, rTxAccessMode :: !Q.TxAccess
|
2020-11-26 16:57:03 +03:00
|
|
|
|
} deriving (Show, Eq)
|
2019-11-15 03:20:18 +03:00
|
|
|
|
|
|
|
|
|
instance FromJSON RunSQL where
|
|
|
|
|
parseJSON = withObject "RunSQL" $ \o -> do
|
|
|
|
|
rSql <- o .: "sql"
|
|
|
|
|
rCascade <- o .:? "cascade" .!= False
|
|
|
|
|
rCheckMetadataConsistency <- o .:? "check_metadata_consistency"
|
|
|
|
|
isReadOnly <- o .:? "read_only" .!= False
|
|
|
|
|
let rTxAccessMode = if isReadOnly then Q.ReadOnly else Q.ReadWrite
|
|
|
|
|
pure RunSQL{..}
|
|
|
|
|
|
|
|
|
|
instance ToJSON RunSQL where
|
|
|
|
|
toJSON RunSQL {..} =
|
|
|
|
|
object
|
|
|
|
|
[ "sql" .= rSql
|
|
|
|
|
, "cascade" .= rCascade
|
|
|
|
|
, "check_metadata_consistency" .= rCheckMetadataConsistency
|
|
|
|
|
, "read_only" .=
|
|
|
|
|
case rTxAccessMode of
|
2019-11-27 01:49:42 +03:00
|
|
|
|
Q.ReadOnly -> True
|
2019-11-15 03:20:18 +03:00
|
|
|
|
Q.ReadWrite -> False
|
|
|
|
|
]
|
2019-08-14 02:34:37 +03:00
|
|
|
|
|
2020-04-23 00:03:23 +03:00
|
|
|
|
-- | see Note [Checking metadata consistency in run_sql]
|
|
|
|
|
isSchemaCacheBuildRequiredRunSQL :: RunSQL -> Bool
|
|
|
|
|
isSchemaCacheBuildRequiredRunSQL RunSQL {..} =
|
|
|
|
|
case rTxAccessMode of
|
|
|
|
|
Q.ReadOnly -> False
|
|
|
|
|
Q.ReadWrite -> fromMaybe (containsDDLKeyword rSql) rCheckMetadataConsistency
|
|
|
|
|
where
|
|
|
|
|
containsDDLKeyword :: Text -> Bool
|
|
|
|
|
containsDDLKeyword = TDFA.match $$(quoteRegex
|
|
|
|
|
TDFA.defaultCompOpt
|
|
|
|
|
{ TDFA.caseSensitive = False
|
|
|
|
|
, TDFA.multiline = True
|
|
|
|
|
, TDFA.lastStarGreedy = True }
|
|
|
|
|
TDFA.defaultExecOpt
|
|
|
|
|
{ TDFA.captureGroups = False }
|
|
|
|
|
"\\balter\\b|\\bdrop\\b|\\breplace\\b|\\bcreate function\\b|\\bcomment on\\b")
|
|
|
|
|
|
2020-12-28 15:56:00 +03:00
|
|
|
|
runRunSQL :: (MonadIO m, MonadBaseControl IO m, MonadError QErr m, CacheRWM m, HasSQLGenCtx m, MetadataM m)
|
|
|
|
|
=> SourceName -> RunSQL -> m EncJSON
|
|
|
|
|
runRunSQL source q@RunSQL {..}
|
2020-04-02 02:22:55 +03:00
|
|
|
|
-- see Note [Checking metadata consistency in run_sql]
|
2020-04-23 00:03:23 +03:00
|
|
|
|
| isSchemaCacheBuildRequiredRunSQL q
|
2020-12-28 15:56:00 +03:00
|
|
|
|
= withMetadataCheck source rCascade rTxAccessMode $ execRawSQL rSql
|
2020-04-23 00:03:23 +03:00
|
|
|
|
| otherwise
|
2020-12-28 15:56:00 +03:00
|
|
|
|
= (_pcConfiguration <$> askPGSourceCache source) >>= \sourceConfig ->
|
|
|
|
|
liftEitherM $ runExceptT $
|
|
|
|
|
runLazyTx (_pscExecCtx sourceConfig) rTxAccessMode $ execRawSQL rSql
|
2019-08-14 02:34:37 +03:00
|
|
|
|
where
|
|
|
|
|
execRawSQL :: (MonadTx m) => Text -> m EncJSON
|
|
|
|
|
execRawSQL =
|
2019-10-11 08:13:57 +03:00
|
|
|
|
fmap (encJFromJValue @RunSQLRes) . liftTx . Q.multiQE rawSqlErrHandler . Q.fromText
|
2019-08-14 02:34:37 +03:00
|
|
|
|
where
|
|
|
|
|
rawSqlErrHandler txe =
|
2020-04-02 02:22:55 +03:00
|
|
|
|
(err400 PostgresError "query execution failed") { qeInternal = Just $ toJSON txe }
|
|
|
|
|
|
|
|
|
|
{- Note [Checking metadata consistency in run_sql]
|
|
|
|
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
|
|
|
SQL queries executed by run_sql may change the Postgres schema in arbitrary
|
|
|
|
|
ways. We attempt to automatically update the metadata to reflect those changes
|
|
|
|
|
as much as possible---for example, if a table is renamed, we want to update the
|
|
|
|
|
metadata to track the table under its new name instead of its old one. This
|
|
|
|
|
schema diffing (plus some integrity checking) is handled by withMetadataCheck.
|
|
|
|
|
|
|
|
|
|
But this process has overhead---it involves reloading the metadata, diffing it,
|
|
|
|
|
and rebuilding the schema cache---so we don’t want to do it if it isn’t
|
|
|
|
|
necessary. The user can explicitly disable the check via the
|
|
|
|
|
check_metadata_consistency option, and we also skip it if the current
|
|
|
|
|
transaction is in READ ONLY mode, since the schema can’t be modified in that
|
|
|
|
|
case, anyway.
|
|
|
|
|
|
|
|
|
|
However, even if neither read_only or check_metadata_consistency is passed, lots
|
|
|
|
|
of queries may not modify the schema at all. As a (fairly stupid) heuristic, we
|
|
|
|
|
check if the query contains any keywords for DDL operations, and if not, we skip
|
|
|
|
|
the metadata check as well. -}
|
2019-08-14 02:34:37 +03:00
|
|
|
|
|
|
|
|
|
data RunSQLRes
|
|
|
|
|
= RunSQLRes
|
|
|
|
|
{ rrResultType :: !Text
|
|
|
|
|
, rrResult :: !Value
|
|
|
|
|
} deriving (Show, Eq)
|
|
|
|
|
$(deriveJSON (aesonDrop 2 snakeCase) ''RunSQLRes)
|
|
|
|
|
|
|
|
|
|
instance Q.FromRes RunSQLRes where
|
|
|
|
|
fromRes (Q.ResultOkEmpty _) =
|
|
|
|
|
return $ RunSQLRes "CommandOk" Null
|
|
|
|
|
fromRes (Q.ResultOkData res) = do
|
|
|
|
|
csvRows <- resToCSV res
|
|
|
|
|
return $ RunSQLRes "TuplesOk" $ toJSON csvRows
|
|
|
|
|
where
|
2020-10-27 16:53:49 +03:00
|
|
|
|
resToCSV :: PQ.Result -> ExceptT Text IO [[Text]]
|
2019-08-14 02:34:37 +03:00
|
|
|
|
resToCSV r = do
|
|
|
|
|
nr <- liftIO $ PQ.ntuples r
|
|
|
|
|
nc <- liftIO $ PQ.nfields r
|
|
|
|
|
|
|
|
|
|
hdr <- forM [0..pred nc] $ \ic -> do
|
|
|
|
|
colNameBS <- liftIO $ PQ.fname r ic
|
|
|
|
|
maybe (return "unknown") decodeBS colNameBS
|
|
|
|
|
|
|
|
|
|
rows <- forM [0..pred nr] $ \ir ->
|
|
|
|
|
forM [0..pred nc] $ \ic -> do
|
|
|
|
|
cellValBS <- liftIO $ PQ.getvalue r ir ic
|
|
|
|
|
maybe (return "NULL") decodeBS cellValBS
|
|
|
|
|
|
|
|
|
|
return $ hdr:rows
|
|
|
|
|
|
2020-12-08 17:22:31 +03:00
|
|
|
|
decodeBS = either (throwError . tshow) return . TE.decodeUtf8'
|