mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-14 17:02:49 +03:00
server: support separate metadata database and server code setup for multi sources (#197)
This is an incremental PR towards https://github.com/hasura/graphql-engine/pull/5797 Co-authored-by: Anon Ray <ecthiender@users.noreply.github.com> GitOrigin-RevId: a6cb8c239b2ff840a0095e78845f682af0e588a9
This commit is contained in:
parent
7cefefabd1
commit
29f2ddc289
@ -265,17 +265,6 @@ library
|
||||
, cron >= 0.6.2
|
||||
-- needed for deriving via
|
||||
, semigroups >= 0.19
|
||||
|
||||
, random
|
||||
, mmorph
|
||||
, http-api-data
|
||||
, lens-aeson
|
||||
, safe
|
||||
|
||||
, semigroups >= 0.19.1
|
||||
|
||||
-- scheduled triggers
|
||||
, cron >= 0.6.2
|
||||
if !flag(profiling)
|
||||
build-depends:
|
||||
-- 0.6.1 is supposedly not okay for ghc 8.6:
|
||||
@ -310,6 +299,7 @@ library
|
||||
, Hasura.Backends.Postgres.Connection
|
||||
, Hasura.Backends.Postgres.Execute.Mutation
|
||||
, Hasura.Backends.Postgres.Execute.RemoteJoin
|
||||
, Hasura.Backends.Postgres.Execute.Types
|
||||
, Hasura.Backends.Postgres.Translate.BoolExp
|
||||
, Hasura.Backends.Postgres.Translate.Column
|
||||
, Hasura.Backends.Postgres.Translate.Delete
|
||||
@ -398,6 +388,7 @@ library
|
||||
, Hasura.RQL.Types.SchemaCache
|
||||
, Hasura.RQL.Types.SchemaCache.Build
|
||||
, Hasura.RQL.Types.SchemaCacheTypes
|
||||
, Hasura.RQL.Types.Source
|
||||
, Hasura.RQL.Types.Table
|
||||
, Hasura.RQL.DDL.Action
|
||||
, Hasura.RQL.DDL.ComputedField
|
||||
@ -430,6 +421,7 @@ library
|
||||
, Hasura.RQL.DDL.Schema.Function
|
||||
, Hasura.RQL.DDL.Schema.Rename
|
||||
, Hasura.RQL.DDL.Schema.Table
|
||||
, Hasura.RQL.DDL.Schema.Source
|
||||
, Hasura.RQL.DDL.EventTrigger
|
||||
, Hasura.RQL.DDL.ScheduledTrigger
|
||||
, Hasura.RQL.DML.Count
|
||||
@ -547,6 +539,7 @@ test-suite graphql-engine-tests
|
||||
, transformers-base
|
||||
, unordered-containers
|
||||
, text
|
||||
, mmorph
|
||||
hs-source-dirs: src-test
|
||||
main-is: Main.hs
|
||||
other-modules:
|
||||
|
@ -3,33 +3,36 @@
|
||||
module Main where
|
||||
|
||||
import Control.Exception
|
||||
import Control.Monad.Trans.Managed (ManagedT(..), lowerManagedT)
|
||||
import Data.Int (Int64)
|
||||
import Data.Text.Conversions (convertText)
|
||||
import Data.Time.Clock (getCurrentTime)
|
||||
import Data.Time.Clock.POSIX (getPOSIXTime)
|
||||
import Control.Monad.Trans.Managed (ManagedT (..), lowerManagedT)
|
||||
import Data.Int (Int64)
|
||||
import Data.Text.Conversions (convertText)
|
||||
import Data.Time.Clock (getCurrentTime)
|
||||
import Data.Time.Clock.POSIX (getPOSIXTime)
|
||||
|
||||
import Hasura.App
|
||||
import Hasura.Logging (Hasura, LogLevel (..), defaultEnabledEngineLogTypes)
|
||||
import Hasura.Logging (Hasura, LogLevel (..),
|
||||
defaultEnabledEngineLogTypes)
|
||||
import Hasura.Metadata.Class
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.DDL.Schema
|
||||
import Hasura.RQL.DDL.Schema.Cache.Common
|
||||
import Hasura.RQL.DDL.Schema.Source
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Server.Init
|
||||
import Hasura.Server.Migrate (downgradeCatalog, dropCatalog)
|
||||
import Hasura.Server.Migrate (downgradeCatalog, dropCatalog)
|
||||
import Hasura.Server.Version
|
||||
|
||||
import qualified Control.Concurrent.Extended as C
|
||||
import qualified Data.ByteString.Char8 as BC
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.ByteString.Lazy.Char8 as BLC
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Hasura.GC as GC
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
import qualified System.Exit as Sys
|
||||
import qualified System.Metrics as EKG
|
||||
import qualified System.Posix.Signals as Signals
|
||||
import qualified Control.Concurrent.Extended as C
|
||||
import qualified Data.ByteString.Char8 as BC
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.ByteString.Lazy.Char8 as BLC
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Hasura.GC as GC
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
import qualified System.Exit as Sys
|
||||
import qualified System.Metrics as EKG
|
||||
import qualified System.Posix.Signals as Signals
|
||||
|
||||
|
||||
main :: IO ()
|
||||
@ -44,9 +47,11 @@ main = do
|
||||
Right r -> return r
|
||||
|
||||
runApp :: Env.Environment -> HGEOptions Hasura -> IO ()
|
||||
runApp env (HGEOptionsG rci hgeCmd) = do
|
||||
runApp env (HGEOptionsG rci metadataDbUrl hgeCmd) = do
|
||||
initTime <- liftIO getCurrentTime
|
||||
globalCtx@GlobalCtx{..} <- initGlobalCtx rci
|
||||
globalCtx@GlobalCtx{..} <- initGlobalCtx env metadataDbUrl rci
|
||||
|
||||
let (dbUrlConf, defaultPgConnInfo, maybeRetries) = _gcDefaultPostgresConnInfo
|
||||
|
||||
withVersion $$(getVersionFromEnvironment) $ case hgeCmd of
|
||||
HCServe serveOptions -> do
|
||||
@ -73,33 +78,38 @@ runApp env (HGEOptionsG rci hgeCmd) = do
|
||||
Signals.sigTERM
|
||||
(Signals.CatchOnce (shutdownGracefully $ _scShutdownLatch serveCtx))
|
||||
Nothing
|
||||
|
||||
let Loggers _ logger _ = _scLoggers serveCtx
|
||||
|
||||
let Loggers _ logger pgLogger = _scLoggers serveCtx
|
||||
_idleGCThread <- C.forkImmortal "ourIdleGC" logger $
|
||||
GC.ourIdleGC logger (seconds 0.3) (seconds 10) (seconds 60)
|
||||
|
||||
|
||||
serverMetrics <- liftIO $ createServerMetrics ekgStore
|
||||
flip runPGMetadataStorageApp (_scPgPool serveCtx) . lowerManagedT $ do
|
||||
runHGEServer env serveOptions serveCtx Nothing initTime Nothing serverMetrics ekgStore
|
||||
flip runPGMetadataStorageApp (_scMetadataDbPool serveCtx, pgLogger) . lowerManagedT $ do
|
||||
runHGEServer env serveOptions serveCtx initTime Nothing serverMetrics ekgStore
|
||||
|
||||
HCExport -> do
|
||||
res <- runTxWithMinimalPool _gcConnInfo fetchMetadataFromCatalog
|
||||
res <- runTxWithMinimalPool defaultPgConnInfo fetchMetadataFromCatalog
|
||||
either (printErrJExit MetadataExportError) printJSON res
|
||||
|
||||
HCClean -> do
|
||||
res <- runTxWithMinimalPool _gcConnInfo dropCatalog
|
||||
res <- runTxWithMinimalPool _gcMetadataDbConnInfo dropCatalog
|
||||
let cleanSuccessMsg = "successfully cleaned graphql-engine related data"
|
||||
either (printErrJExit MetadataCleanError) (const $ liftIO $ putStrLn cleanSuccessMsg) res
|
||||
|
||||
HCExecute -> do
|
||||
queryBs <- liftIO BL.getContents
|
||||
let sqlGenCtx = SQLGenCtx False
|
||||
runManagedT (mkMinimalPool _gcConnInfo) $ \pool -> do
|
||||
res <- flip runPGMetadataStorageApp pool $
|
||||
runMetadataStorageT $ liftEitherM $
|
||||
runAsAdmin pool sqlGenCtx RemoteSchemaPermsDisabled _gcHttpManager $ do
|
||||
metadata <- liftTx fetchMetadataFromCatalog
|
||||
schemaCache <- buildRebuildableSchemaCache env metadata
|
||||
remoteSchemaPermsCtx = RemoteSchemaPermsDisabled
|
||||
pgLogger = print
|
||||
pgSourceResolver = mkPgSourceResolver pgLogger
|
||||
cacheBuildParams = CacheBuildParams _gcHttpManager sqlGenCtx remoteSchemaPermsCtx pgSourceResolver
|
||||
runManagedT (mkMinimalPool _gcMetadataDbConnInfo) $ \metadataDbPool -> do
|
||||
res <- flip runPGMetadataStorageApp (metadataDbPool, pgLogger) $
|
||||
runMetadataStorageT $ liftEitherM do
|
||||
metadata <- fetchMetadata
|
||||
runAsAdmin sqlGenCtx _gcHttpManager remoteSchemaPermsCtx $ do
|
||||
schemaCache <- runCacheBuild cacheBuildParams $
|
||||
buildRebuildableSchemaCache env metadata
|
||||
execQuery env queryBs
|
||||
& Tracing.runTraceTWithReporter Tracing.noReporter "execute"
|
||||
& runMetadataT metadata
|
||||
@ -108,7 +118,10 @@ runApp env (HGEOptionsG rci hgeCmd) = do
|
||||
either (printErrJExit ExecuteProcessError) (liftIO . BLC.putStrLn) res
|
||||
|
||||
HCDowngrade opts -> do
|
||||
res <- runTxWithMinimalPool _gcConnInfo $ downgradeCatalog opts initTime
|
||||
let pgSourceConnInfo = PostgresSourceConnInfo dbUrlConf
|
||||
defaultPostgresPoolSettings{_ppsRetries = fromMaybe 1 maybeRetries}
|
||||
defaultSourceConfig = SourceConfiguration pgSourceConnInfo Nothing
|
||||
res <- runTxWithMinimalPool _gcMetadataDbConnInfo $ downgradeCatalog defaultSourceConfig opts initTime
|
||||
either (printErrJExit DowngradeProcessError) (liftIO . print) res
|
||||
|
||||
HCVersion -> liftIO $ putStrLn $ "Hasura GraphQL Engine: " ++ convertText currentVersion
|
||||
|
@ -6,14 +6,15 @@ module Data.Environment
|
||||
, mkEnvironment
|
||||
, emptyEnvironment
|
||||
, maybeEnvironment
|
||||
, lookupEnv)
|
||||
where
|
||||
, lookupEnv
|
||||
, Data.Environment.toList
|
||||
) where
|
||||
|
||||
import Hasura.Prelude
|
||||
import Data.Aeson
|
||||
import Data.Aeson
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Map as M
|
||||
import qualified System.Environment
|
||||
import qualified Data.Map as M
|
||||
|
||||
newtype Environment = Environment (M.Map String String) deriving (Eq, Show, Generic)
|
||||
|
||||
@ -33,3 +34,6 @@ emptyEnvironment = Environment M.empty
|
||||
|
||||
lookupEnv :: Environment -> String -> Maybe String
|
||||
lookupEnv (Environment es) k = M.lookup k es
|
||||
|
||||
toList :: Environment -> [(String, String)]
|
||||
toList (Environment e) = M.toList e
|
||||
|
@ -4,6 +4,7 @@ module Data.URL.Template
|
||||
, TemplateItem
|
||||
, Variable
|
||||
, printURLTemplate
|
||||
, mkPlainURLTemplate
|
||||
, parseURLTemplate
|
||||
, renderURLTemplate
|
||||
, genURLTemplate
|
||||
@ -44,6 +45,10 @@ newtype URLTemplate = URLTemplate {unURLTemplate :: [TemplateItem]}
|
||||
printURLTemplate :: URLTemplate -> Text
|
||||
printURLTemplate = T.concat . map printTemplateItem . unURLTemplate
|
||||
|
||||
mkPlainURLTemplate :: Text -> URLTemplate
|
||||
mkPlainURLTemplate =
|
||||
URLTemplate . pure . TIText
|
||||
|
||||
parseURLTemplate :: Text -> Either String URLTemplate
|
||||
parseURLTemplate t = parseOnly parseTemplate t
|
||||
where
|
||||
|
@ -8,11 +8,11 @@ import Control.Exception (bracket_, throwIO)
|
||||
import Control.Monad.Base
|
||||
import Control.Monad.Catch (Exception, MonadCatch, MonadMask,
|
||||
MonadThrow, onException)
|
||||
import Control.Monad.Trans.Managed (ManagedT(..), allocate)
|
||||
import Control.Monad.Morph (hoist)
|
||||
import Control.Monad.Stateless
|
||||
import Control.Monad.STM (atomically)
|
||||
import Control.Monad.Trans.Control (MonadBaseControl (..))
|
||||
import Control.Monad.Trans.Managed (ManagedT (..), allocate)
|
||||
import Control.Monad.Unique
|
||||
import Data.Time.Clock (UTCTime)
|
||||
#ifndef PROFILING
|
||||
@ -28,6 +28,7 @@ import qualified Data.Aeson as A
|
||||
import qualified Data.ByteString.Char8 as BC
|
||||
import qualified Data.ByteString.Lazy.Char8 as BLC
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as HM
|
||||
import qualified Data.Set as Set
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Time.Clock as Clock
|
||||
@ -56,7 +57,9 @@ import Hasura.Logging
|
||||
import Hasura.Metadata.Class
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.DDL.Schema.Cache
|
||||
import Hasura.RQL.DDL.Schema.Cache.Common
|
||||
import Hasura.RQL.DDL.Schema.Catalog
|
||||
import Hasura.RQL.DDL.Schema.Source
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.RQL.Types.Run
|
||||
import Hasura.Server.API.Query (requiresAdmin, runQueryM)
|
||||
@ -138,7 +141,9 @@ parseArgs = do
|
||||
header "Hasura GraphQL Engine: Realtime GraphQL API over Postgres with access control" <>
|
||||
footerDoc (Just mainCmdFooter)
|
||||
)
|
||||
hgeOpts = HGEOptionsG <$> parseRawConnInfo <*> parseHGECommand
|
||||
hgeOpts = HGEOptionsG <$> parsePostgresConnInfo
|
||||
<*> parseMetadataDbUrl
|
||||
<*> parseHGECommand
|
||||
|
||||
printJSON :: (A.ToJSON a, MonadIO m) => a -> m ()
|
||||
printJSON = liftIO . BLC.putStrLn . A.encode
|
||||
@ -153,30 +158,42 @@ mkPGLogger (Logger logger) (Q.PLERetryMsg msg) =
|
||||
-- | Context required for all graphql-engine CLI commands
|
||||
data GlobalCtx
|
||||
= GlobalCtx
|
||||
{ _gcHttpManager :: !HTTP.Manager
|
||||
, _gcConnInfo :: !Q.ConnInfo
|
||||
{ _gcHttpManager :: !HTTP.Manager
|
||||
, _gcMetadataDbConnInfo :: !Q.ConnInfo
|
||||
, _gcDefaultPostgresConnInfo :: !(UrlConf, Q.ConnInfo, Maybe Int)
|
||||
-- ^ Url Config for --database-url option and optional retries
|
||||
}
|
||||
|
||||
initGlobalCtx
|
||||
:: (MonadIO m) => RawConnInfo -> m GlobalCtx
|
||||
initGlobalCtx rawConnInfo = do
|
||||
_gcHttpManager <- liftIO $ HTTP.newManager HTTP.tlsManagerSettings
|
||||
_gcConnInfo <- liftIO $ onLeft (mkConnInfo rawConnInfo) $
|
||||
printErrExit InvalidDatabaseConnectionParamsError . ("Fatal Error : " <>)
|
||||
pure GlobalCtx{..}
|
||||
:: (MonadIO m)
|
||||
=> Env.Environment -> Maybe String -> PostgresConnInfo UrlConf -> m GlobalCtx
|
||||
initGlobalCtx env metadataDbUrl defaultPgConnInfo = do
|
||||
httpManager <- liftIO $ HTTP.newManager HTTP.tlsManagerSettings
|
||||
|
||||
let PostgresConnInfo dbUrlConf maybeRetries = defaultPgConnInfo
|
||||
defaultDbConnInfo <- resolvePostgresConnInfo env dbUrlConf maybeRetries
|
||||
|
||||
let maybeMetadataDbConnInfo =
|
||||
let retries = fromMaybe 1 $ _pciRetries defaultPgConnInfo
|
||||
in (Q.ConnInfo retries . Q.CDDatabaseURI . txtToBs . T.pack)
|
||||
<$> metadataDbUrl
|
||||
-- If no metadata storage specified consider use default database as
|
||||
-- metadata storage
|
||||
metadataDbConnInfo = fromMaybe defaultDbConnInfo maybeMetadataDbConnInfo
|
||||
|
||||
pure $ GlobalCtx httpManager metadataDbConnInfo (dbUrlConf, defaultDbConnInfo, maybeRetries)
|
||||
|
||||
|
||||
-- | Context required for the 'serve' CLI command.
|
||||
data ServeCtx
|
||||
= ServeCtx
|
||||
{ _scHttpManager :: !HTTP.Manager
|
||||
, _scInstanceId :: !InstanceId
|
||||
, _scLoggers :: !Loggers
|
||||
, _scConnInfo :: !Q.ConnInfo
|
||||
, _scPgPool :: !Q.PGPool
|
||||
, _scShutdownLatch :: !ShutdownLatch
|
||||
, _scSchemaCache :: !RebuildableSchemaCache
|
||||
, _scSchemaSyncCtx :: !SchemaSyncCtx
|
||||
{ _scHttpManager :: !HTTP.Manager
|
||||
, _scInstanceId :: !InstanceId
|
||||
, _scLoggers :: !Loggers
|
||||
, _scMetadataDbPool :: !Q.PGPool
|
||||
, _scShutdownLatch :: !ShutdownLatch
|
||||
, _scSchemaCache :: !RebuildableSchemaCache
|
||||
, _scSchemaSyncCtx :: !SchemaSyncCtx
|
||||
}
|
||||
|
||||
-- | Collection of the LoggerCtx, the regular Logger and the PGLogger
|
||||
@ -190,12 +207,22 @@ data Loggers
|
||||
|
||||
-- | An application with Postgres database as a metadata storage
|
||||
newtype PGMetadataStorageApp a
|
||||
= PGMetadataStorageApp {runPGMetadataStorageApp :: Q.PGPool -> IO a}
|
||||
= PGMetadataStorageApp {runPGMetadataStorageApp :: (Q.PGPool, Q.PGLogger) -> IO a}
|
||||
deriving ( Functor, Applicative, Monad
|
||||
, MonadIO, MonadBase IO, MonadBaseControl IO
|
||||
, MonadCatch, MonadThrow, MonadMask
|
||||
, MonadUnique, MonadReader Q.PGPool
|
||||
) via (ReaderT Q.PGPool IO)
|
||||
, MonadUnique, MonadReader (Q.PGPool, Q.PGLogger)
|
||||
) via (ReaderT (Q.PGPool, Q.PGLogger) IO)
|
||||
|
||||
resolvePostgresConnInfo
|
||||
:: (MonadIO m) => Env.Environment -> UrlConf -> Maybe Int -> m Q.ConnInfo
|
||||
resolvePostgresConnInfo env dbUrlConf maybeRetries = do
|
||||
dbUrlText <-
|
||||
runExcept (resolveUrlConf env dbUrlConf) `onLeft` \err ->
|
||||
liftIO (printErrExit InvalidDatabaseConnectionParamsError (BLC.unpack $ A.encode err))
|
||||
pure $ Q.ConnInfo retries $ Q.CDDatabaseURI $ txtToBs dbUrlText
|
||||
where
|
||||
retries = fromMaybe 1 maybeRetries
|
||||
|
||||
-- | Initializes or migrates the catalog and returns the context required to start the server.
|
||||
initialiseServeCtx
|
||||
@ -210,23 +237,36 @@ initialiseServeCtx env GlobalCtx{..} so@ServeOptions{..} = do
|
||||
loggers@(Loggers loggerCtx logger pgLogger) <- mkLoggers soEnabledLogTypes soLogLevel
|
||||
-- log serve options
|
||||
unLogger logger $ serveOptsToLog so
|
||||
|
||||
-- log postgres connection info
|
||||
unLogger logger $ connInfoToLog _gcConnInfo
|
||||
pool <- liftIO $ Q.initPGPool _gcConnInfo soConnParams pgLogger
|
||||
let sqlGenCtx = SQLGenCtx soStringifyNum
|
||||
unLogger logger $ connInfoToLog _gcMetadataDbConnInfo
|
||||
|
||||
metadataDbPool <- liftIO $ Q.initPGPool _gcMetadataDbConnInfo soConnParams pgLogger
|
||||
|
||||
let defaultSourceConfig =
|
||||
let (dbUrlConf, _, maybeRetries) = _gcDefaultPostgresConnInfo
|
||||
connSettings = PostgresPoolSettings
|
||||
{ _ppsMaxConnections = Q.cpConns soConnParams
|
||||
, _ppsIdleTimeout = Q.cpIdleTime soConnParams
|
||||
, _ppsRetries = fromMaybe 1 maybeRetries
|
||||
}
|
||||
sourceConnInfo = PostgresSourceConnInfo dbUrlConf connSettings
|
||||
in SourceConfiguration sourceConnInfo Nothing
|
||||
sqlGenCtx = SQLGenCtx soStringifyNum
|
||||
|
||||
-- Start a background thread for listening schema sync events from other server instances,
|
||||
-- just before building @'RebuildableSchemaCache' (happens in @'migrateCatalogSchema' function).
|
||||
-- See Note [Schema Cache Sync]
|
||||
(schemaSyncListenerThread, schemaSyncEventRef) <- startSchemaSyncListenerThread pool logger instanceId
|
||||
(schemaSyncListenerThread, schemaSyncEventRef) <- startSchemaSyncListenerThread metadataDbPool logger instanceId
|
||||
|
||||
(rebuildableSchemaCache, cacheInitStartTime) <-
|
||||
lift . flip onException (flushLogger loggerCtx) $ migrateCatalogSchema env logger pool _gcHttpManager sqlGenCtx soEnableRemoteSchemaPermissions
|
||||
lift . flip onException (flushLogger loggerCtx) $
|
||||
migrateCatalogSchema env logger metadataDbPool defaultSourceConfig _gcHttpManager
|
||||
sqlGenCtx soEnableRemoteSchemaPermissions (mkPgSourceResolver pgLogger)
|
||||
|
||||
let schemaSyncCtx = SchemaSyncCtx schemaSyncListenerThread schemaSyncEventRef cacheInitStartTime
|
||||
initCtx = ServeCtx _gcHttpManager instanceId loggers _gcConnInfo pool latch
|
||||
rebuildableSchemaCache schemaSyncCtx
|
||||
pure initCtx
|
||||
pure $ ServeCtx _gcHttpManager instanceId loggers metadataDbPool latch
|
||||
rebuildableSchemaCache schemaSyncCtx
|
||||
|
||||
mkLoggers
|
||||
:: (MonadIO m, MonadBaseControl IO m)
|
||||
@ -243,16 +283,18 @@ mkLoggers enabledLogs logLevel = do
|
||||
-- | helper function to initialize or migrate the @hdb_catalog@ schema (used by pro as well)
|
||||
migrateCatalogSchema
|
||||
:: (HasVersion, MonadIO m, MonadBaseControl IO m)
|
||||
=> Env.Environment -> Logger Hasura -> Q.PGPool -> HTTP.Manager -> SQLGenCtx
|
||||
-> RemoteSchemaPermsCtx
|
||||
=> Env.Environment -> Logger Hasura -> Q.PGPool -> SourceConfiguration
|
||||
-> HTTP.Manager -> SQLGenCtx -> RemoteSchemaPermsCtx -> SourceResolver
|
||||
-> m (RebuildableSchemaCache, UTCTime)
|
||||
migrateCatalogSchema env logger pool httpManager sqlGenCtx remoteSchemaPermsCtx = do
|
||||
let pgExecCtx = mkPGExecCtx Q.Serializable pool
|
||||
adminRunCtx = RunCtx adminUserInfo httpManager sqlGenCtx remoteSchemaPermsCtx
|
||||
migrateCatalogSchema env logger pool defaultSourceConfig httpManager sqlGenCtx remoteSchemaPermsCtx sourceResolver = do
|
||||
currentTime <- liftIO Clock.getCurrentTime
|
||||
initialiseResult <- runExceptT $
|
||||
peelRun adminRunCtx pgExecCtx Q.ReadWrite Nothing $
|
||||
migrateCatalog env currentTime
|
||||
initialiseResult <- runExceptT $ do
|
||||
(migrationResult, metadata) <- Q.runTx pool (Q.Serializable, Just Q.ReadWrite) $
|
||||
migrateCatalog defaultSourceConfig currentTime
|
||||
let cacheBuildParams = CacheBuildParams httpManager sqlGenCtx remoteSchemaPermsCtx sourceResolver
|
||||
schemaCache <- runCacheBuild cacheBuildParams $
|
||||
buildRebuildableSchemaCache env metadata
|
||||
pure (migrationResult, schemaCache)
|
||||
|
||||
(migrationResult, schemaCache) <-
|
||||
initialiseResult `onLeft` \err -> do
|
||||
@ -309,7 +351,7 @@ createServerMetrics store = do
|
||||
-- (SIGTERM, or more generally, whenever the shutdown latch is set), we need to
|
||||
-- make absolutely sure that we clean up any resources which were allocated during
|
||||
-- server setup. In the case of a multitenant process, failure to do so can lead to
|
||||
-- resource leaks.
|
||||
-- resource leaks.
|
||||
--
|
||||
-- To track these resources, we use the ManagedT monad, and attach finalizers at
|
||||
-- the same point in the code where we allocate resources. If you fork a new
|
||||
@ -344,12 +386,11 @@ runHGEServer
|
||||
, MonadQueryInstrumentation m
|
||||
, HasResourceLimits m
|
||||
, MonadMetadataStorage (MetadataStorageT m)
|
||||
, MonadResolveSource m
|
||||
)
|
||||
=> Env.Environment
|
||||
-> ServeOptions impl
|
||||
-> ServeCtx
|
||||
-> Maybe PGExecCtx
|
||||
-- ^ An optional specialized pg exection context for executing queries
|
||||
-- and mutations
|
||||
-> UTCTime
|
||||
-- ^ start time
|
||||
@ -357,7 +398,7 @@ runHGEServer
|
||||
-> ServerMetrics
|
||||
-> EKG.Store
|
||||
-> ManagedT m ()
|
||||
runHGEServer env ServeOptions{..} ServeCtx{..} pgExecCtx initTime postPollHook serverMetrics ekgStore = do
|
||||
runHGEServer env ServeOptions{..} ServeCtx{..} initTime postPollHook serverMetrics ekgStore = do
|
||||
-- Comment this to enable expensive assertions from "GHC.AssertNF". These
|
||||
-- will log lines to STDOUT containing "not in normal form". In the future we
|
||||
-- could try to integrate this into our tests. For now this is a development
|
||||
@ -379,13 +420,9 @@ runHGEServer env ServeOptions{..} ServeCtx{..} pgExecCtx initTime postPollHook s
|
||||
|
||||
HasuraApp app cacheRef stopWsServer <- lift $ flip onException (flushLogger loggerCtx) $
|
||||
mkWaiApp env
|
||||
soTxIso
|
||||
logger
|
||||
sqlGenCtx
|
||||
soEnableAllowlist
|
||||
_scPgPool
|
||||
pgExecCtx
|
||||
_scConnInfo
|
||||
_scHttpManager
|
||||
authMode
|
||||
soCorsConfig
|
||||
@ -409,20 +446,22 @@ runHGEServer env ServeOptions{..} ServeCtx{..} pgExecCtx initTime postPollHook s
|
||||
liftIO $ logInconsObjs logger inconsObjs
|
||||
|
||||
-- Start a background thread for processing schema sync event present in the '_sscSyncEventRef'
|
||||
_ <- startSchemaSyncProcessorThread sqlGenCtx _scPgPool
|
||||
logger _scHttpManager _sscSyncEventRef
|
||||
cacheRef _scInstanceId _sscCacheInitStartTime soEnableRemoteSchemaPermissions
|
||||
_ <- startSchemaSyncProcessorThread sqlGenCtx
|
||||
logger _scHttpManager _sscSyncEventRef
|
||||
cacheRef _scInstanceId _sscCacheInitStartTime soEnableRemoteSchemaPermissions
|
||||
|
||||
let
|
||||
maxEvThrds = fromMaybe defaultMaxEventThreads soEventsHttpPoolSize
|
||||
fetchI = milliseconds $ fromMaybe (Milliseconds defaultFetchInterval) soEventsFetchInterval
|
||||
logEnvHeaders = soLogHeadersFromEnv
|
||||
allPgSources = map _pcConfiguration $ HM.elems $ scPostgres $
|
||||
lastBuiltSchemaCache _scSchemaCache
|
||||
|
||||
lockedEventsCtx <- allocate
|
||||
(liftIO $ atomically initLockedEventsCtx)
|
||||
(\lockedEventsCtx ->
|
||||
liftWithStateless \lowerIO ->
|
||||
shutdownEvents _scPgPool (\a b -> hoist lowerIO (unlockScheduledEvents a b)) logger lockedEventsCtx)
|
||||
(\lockedEventsCtx ->
|
||||
liftWithStateless \lowerIO ->
|
||||
shutdownEvents allPgSources (\a b -> hoist lowerIO (unlockScheduledEvents a b)) logger lockedEventsCtx)
|
||||
|
||||
-- prepare event triggers data
|
||||
eventEngineCtx <- liftIO $ atomically $ initEventEngineCtx maxEvThrds fetchI
|
||||
@ -430,7 +469,7 @@ runHGEServer env ServeOptions{..} ServeCtx{..} pgExecCtx initTime postPollHook s
|
||||
|
||||
_eventQueueThread <- C.forkManagedT "processEventQueue" logger $
|
||||
processEventQueue logger logEnvHeaders
|
||||
_scHttpManager _scPgPool (getSCFromRef cacheRef) eventEngineCtx lockedEventsCtx
|
||||
_scHttpManager (getSCFromRef cacheRef) eventEngineCtx lockedEventsCtx
|
||||
|
||||
-- start a backgroud thread to handle async actions
|
||||
_asyncActionsThread <- C.forkManagedT "asyncActionsProcessor" logger $
|
||||
@ -456,7 +495,7 @@ runHGEServer env ServeOptions{..} ServeCtx{..} pgExecCtx initTime postPollHook s
|
||||
then do
|
||||
lift . unLogger logger $ mkGenericStrLog LevelInfo "telemetry" telemetryNotice
|
||||
|
||||
(dbId, pgVersion) <- liftIO $ runTxIO _scPgPool (Q.ReadCommitted, Nothing) $
|
||||
(dbId, pgVersion) <- liftIO $ runTxIO _scMetadataDbPool (Q.ReadCommitted, Nothing) $
|
||||
(,) <$> getDbId <*> getPgVersion
|
||||
|
||||
telemetryThread <- C.forkManagedT "runTelemetry" logger $ liftIO $
|
||||
@ -476,23 +515,23 @@ runHGEServer env ServeOptions{..} ServeCtx{..} pgExecCtx initTime postPollHook s
|
||||
(EKG.Gauge.inc $ smWarpThreads serverMetrics)
|
||||
(EKG.Gauge.dec $ smWarpThreads serverMetrics)
|
||||
(f unmask))
|
||||
|
||||
|
||||
let shutdownHandler closeSocket = LA.link =<< LA.async do
|
||||
waitForShutdown _scShutdownLatch
|
||||
unLogger logger $ mkGenericStrLog LevelInfo "server" "gracefully shutting down server"
|
||||
closeSocket
|
||||
|
||||
|
||||
let warpSettings = Warp.setPort soPort
|
||||
. Warp.setHost soHost
|
||||
. Warp.setGracefulShutdownTimeout (Just 30) -- 30s graceful shutdown
|
||||
. Warp.setInstallShutdownHandler shutdownHandler
|
||||
. setForkIOWithMetrics
|
||||
$ Warp.defaultSettings
|
||||
|
||||
|
||||
-- Here we block until the shutdown latch 'MVar' is filled, and then
|
||||
-- shut down the server. Once this blocking call returns, we'll tidy up
|
||||
-- any resources using the finalizers attached using 'ManagedT' above.
|
||||
-- Structuring things using the shutdown latch in this way lets us decide
|
||||
-- Structuring things using the shutdown latch in this way lets us decide
|
||||
-- elsewhere exactly how we want to control shutdown.
|
||||
liftIO $ Warp.runSettings warpSettings app `LE.finally` do
|
||||
-- These cleanup actions are not directly associated with any
|
||||
@ -525,17 +564,18 @@ runHGEServer env ServeOptions{..} ServeCtx{..} pgExecCtx initTime postPollHook s
|
||||
-- processed but not been marked as delivered in the db will be unlocked by `shutdownEvents`
|
||||
-- and will be processed when the events are proccessed next time.
|
||||
shutdownEvents
|
||||
:: Q.PGPool
|
||||
:: [SourceConfig 'Postgres]
|
||||
-> (ScheduledEventType -> [ScheduledEventId] -> MetadataStorageT IO Int)
|
||||
-> Logger Hasura
|
||||
-> LockedEventsCtx
|
||||
-> IO ()
|
||||
shutdownEvents pool unlockScheduledEvents' hasuraLogger@(Logger logger) LockedEventsCtx {..} = do
|
||||
liftIO $ logger $ mkGenericStrLog LevelInfo "event_triggers" "unlocking events that are locked by the HGE"
|
||||
let unlockEvents' =
|
||||
liftEitherM . liftIO . runTx pool (Q.ReadCommitted, Nothing) . unlockEvents
|
||||
unlockEventsForShutdown hasuraLogger "event_triggers" "" unlockEvents' leEvents
|
||||
liftIO $ logger $ mkGenericStrLog LevelInfo "scheduled_triggers" "unlocking scheduled events that are locked by the HGE"
|
||||
shutdownEvents pgSources unlockScheduledEvents' hasuraLogger@(Logger logger) LockedEventsCtx {..} = do
|
||||
forM_ pgSources $ \pgSource -> do
|
||||
logger $ mkGenericStrLog LevelInfo "event_triggers" "unlocking events that are locked by the HGE"
|
||||
let unlockEvents' l = MetadataStorageT $ runLazyTx (_pscExecCtx pgSource) Q.ReadWrite $ liftTx $ unlockEvents l
|
||||
unlockEventsForShutdown hasuraLogger "event_triggers" "" unlockEvents' leEvents
|
||||
logger $ mkGenericStrLog LevelInfo "scheduled_triggers" "unlocking scheduled events that are locked by the HGE"
|
||||
|
||||
unlockEventsForShutdown hasuraLogger "scheduled_triggers" "cron events" (unlockScheduledEvents' Cron) leCronEvents
|
||||
unlockEventsForShutdown hasuraLogger "scheduled_triggers" "scheduled events" (unlockScheduledEvents' OneOff) leOneOffEvents
|
||||
|
||||
@ -556,28 +596,22 @@ runHGEServer env ServeOptions{..} ServeCtx{..} pgExecCtx initTime postPollHook s
|
||||
Right count -> logger $ mkGenericStrLog LevelInfo triggerType $
|
||||
show count ++ " " ++ T.unpack eventType ++ " events successfully unlocked"
|
||||
|
||||
runTx :: Q.PGPool -> Q.TxMode -> Q.TxE QErr a -> IO (Either QErr a)
|
||||
runTx pool txLevel tx =
|
||||
liftIO $ runExceptT $ Q.runTx pool txLevel tx
|
||||
|
||||
runAsAdmin
|
||||
:: (MonadIO m, MonadBaseControl IO m)
|
||||
=> Q.PGPool
|
||||
-> SQLGenCtx
|
||||
-> RemoteSchemaPermsCtx
|
||||
:: SQLGenCtx
|
||||
-> HTTP.Manager
|
||||
-> RemoteSchemaPermsCtx
|
||||
-> RunT m a
|
||||
-> m (Either QErr a)
|
||||
runAsAdmin pool sqlGenCtx remoteSchemaPermsCtx httpManager m = do
|
||||
runAsAdmin sqlGenCtx httpManager remoteSchemaPermsCtx m = do
|
||||
let runCtx = RunCtx adminUserInfo httpManager sqlGenCtx remoteSchemaPermsCtx
|
||||
pgCtx = mkPGExecCtx Q.Serializable pool
|
||||
runExceptT $ peelRun runCtx pgCtx Q.ReadWrite Nothing m
|
||||
runExceptT $ peelRun runCtx m
|
||||
|
||||
|
||||
execQuery
|
||||
:: ( HasVersion
|
||||
, CacheRWM m
|
||||
, MonadTx m
|
||||
, MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
, MonadUnique m
|
||||
, HasHttpManager m
|
||||
, HasSQLGenCtx m
|
||||
@ -585,7 +619,7 @@ execQuery
|
||||
, Tracing.MonadTrace m
|
||||
, HasRemoteSchemaPermsCtx m
|
||||
, MetadataM m
|
||||
, MonadScheduledEvents m
|
||||
, MonadMetadataStorageQueryAPI m
|
||||
)
|
||||
=> Env.Environment
|
||||
-> BLC.ByteString
|
||||
@ -595,7 +629,7 @@ execQuery env queryBs = do
|
||||
Just jVal -> decodeValue jVal
|
||||
Nothing -> throw400 InvalidJSON "invalid json"
|
||||
buildSchemaCacheStrict
|
||||
encJToLBS <$> runQueryM env query
|
||||
encJToLBS <$> runQueryM env defaultSource query
|
||||
|
||||
instance Tracing.HasReporter PGMetadataStorageApp
|
||||
|
||||
@ -650,9 +684,12 @@ instance MonadQueryLog PGMetadataStorageApp where
|
||||
instance WS.MonadWSLog PGMetadataStorageApp where
|
||||
logWSLog = unLogger
|
||||
|
||||
instance MonadResolveSource PGMetadataStorageApp where
|
||||
getSourceResolver = mkPgSourceResolver <$> asks snd
|
||||
|
||||
runInSeparateTx :: Q.TxE QErr a -> MetadataStorageT PGMetadataStorageApp a
|
||||
runInSeparateTx tx = do
|
||||
pool <- lift ask
|
||||
pool <- lift $ asks fst
|
||||
liftEitherM $ liftIO $ runExceptT $ Q.runTx pool (Q.RepeatableRead, Nothing) tx
|
||||
|
||||
-- | Using @pg_notify@ function to publish schema sync events to other server
|
||||
@ -682,6 +719,8 @@ instance MonadMetadataStorage (MetadataStorageT PGMetadataStorageApp) where
|
||||
EventPayload{..} <- decodeValue payload
|
||||
pure $ SchemaSyncEventProcessResult (instanceId /= _epInstanceId) _epInvalidations
|
||||
|
||||
checkMetadataStorageHealth = (lift (asks fst)) >>= checkDbConnection
|
||||
|
||||
getDeprivedCronTriggerStats = runInSeparateTx getDeprivedCronTriggerStatsTx
|
||||
getScheduledEventsForDelivery = runInSeparateTx getScheduledEventsForDeliveryTx
|
||||
insertScheduledEvent = runInSeparateTx . insertScheduledEventTx
|
||||
@ -695,6 +734,7 @@ instance MonadMetadataStorage (MetadataStorageT PGMetadataStorageApp) where
|
||||
fetchUndeliveredActionEvents = runInSeparateTx fetchUndeliveredActionEventsTx
|
||||
setActionStatus a b = runInSeparateTx $ setActionStatusTx a b
|
||||
fetchActionResponse = runInSeparateTx . fetchActionResponseTx
|
||||
clearActionData = runInSeparateTx . clearActionDataTx
|
||||
|
||||
--- helper functions ---
|
||||
|
||||
|
@ -8,8 +8,6 @@ module Hasura.Backends.Postgres.Connection
|
||||
, LazyTxT
|
||||
, LazyTx
|
||||
|
||||
, PGExecCtx(..)
|
||||
, mkPGExecCtx
|
||||
, runLazyTx
|
||||
, runQueryTx
|
||||
, withUserInfo
|
||||
@ -18,72 +16,36 @@ module Hasura.Backends.Postgres.Connection
|
||||
|
||||
, RespTx
|
||||
, LazyRespTx
|
||||
, defaultTxErrorHandler
|
||||
, mkTxErrorHandler
|
||||
, lazyTxToQTx
|
||||
|
||||
, doesSchemaExist
|
||||
, doesTableExist
|
||||
, isExtensionAvailable
|
||||
, enablePgcryptoExtension
|
||||
|
||||
, module ET
|
||||
) where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson.Extended as J
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Database.PG.Query.Connection as Q
|
||||
import qualified Data.Aeson.Extended as J
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Database.PG.Query.Connection as Q
|
||||
|
||||
import Control.Lens
|
||||
import Control.Monad.Morph (hoist)
|
||||
import Control.Monad.Trans.Control (MonadBaseControl (..))
|
||||
import Control.Monad.Morph (hoist)
|
||||
import Control.Monad.Trans.Control (MonadBaseControl (..))
|
||||
import Control.Monad.Unique
|
||||
import Control.Monad.Validate
|
||||
import Data.Either (isRight)
|
||||
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as S
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as S
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
|
||||
import Hasura.Backends.Postgres.SQL.Error
|
||||
import Hasura.Backends.Postgres.Execute.Types as ET
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.EncJSON
|
||||
import Hasura.RQL.Types.Error
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
|
||||
type RunTx =
|
||||
forall m a. (MonadIO m, MonadBaseControl IO m) => Q.TxET QErr m a -> ExceptT QErr m a
|
||||
|
||||
data PGExecCtx
|
||||
= PGExecCtx
|
||||
{ _pecRunReadOnly :: RunTx
|
||||
-- ^ Run a Q.ReadOnly transaction
|
||||
, _pecRunReadNoTx :: RunTx
|
||||
-- ^ Run a read only statement without an explicit transaction block
|
||||
, _pecRunReadWrite :: RunTx
|
||||
-- ^ Run a Q.ReadWrite transaction
|
||||
, _pecCheckHealth :: (IO Bool)
|
||||
-- ^ Checks the health of this execution context
|
||||
}
|
||||
|
||||
-- | Creates a Postgres execution context for a single Postgres master pool
|
||||
mkPGExecCtx :: Q.TxIsolation -> Q.PGPool -> PGExecCtx
|
||||
mkPGExecCtx isoLevel pool =
|
||||
PGExecCtx
|
||||
{ _pecRunReadOnly = (Q.runTx pool (isoLevel, Just Q.ReadOnly))
|
||||
, _pecRunReadNoTx = (Q.runTx' pool)
|
||||
, _pecRunReadWrite = (Q.runTx pool (isoLevel, Just Q.ReadWrite))
|
||||
, _pecCheckHealth = checkDbConnection
|
||||
}
|
||||
where
|
||||
checkDbConnection = do
|
||||
e <- liftIO $ runExceptT $ Q.runTx' pool select1Query
|
||||
pure $ isRight e
|
||||
where
|
||||
select1Query :: Q.TxE QErr Int
|
||||
select1Query =
|
||||
runIdentity . Q.getRow <$>
|
||||
Q.withQE defaultTxErrorHandler [Q.sql| SELECT 1 |] () False
|
||||
|
||||
class (MonadError QErr m) => MonadTx m where
|
||||
liftTx :: Q.TxE QErr a -> m a
|
||||
|
||||
@ -162,41 +124,6 @@ setHeadersTx session = do
|
||||
sessionInfoJsonExp :: SessionVariables -> S.SQLExp
|
||||
sessionInfoJsonExp = S.SELit . J.encodeToStrictText
|
||||
|
||||
defaultTxErrorHandler :: Q.PGTxErr -> QErr
|
||||
defaultTxErrorHandler = mkTxErrorHandler (const False)
|
||||
|
||||
-- | Constructs a transaction error handler given a predicate that determines which errors are
|
||||
-- expected and should be reported to the user. All other errors are considered internal errors.
|
||||
mkTxErrorHandler :: (PGErrorType -> Bool) -> Q.PGTxErr -> QErr
|
||||
mkTxErrorHandler isExpectedError txe = fromMaybe unexpectedError expectedError
|
||||
where
|
||||
unexpectedError = (internalError "database query error") { qeInternal = Just $ J.toJSON txe }
|
||||
expectedError = uncurry err400 <$> do
|
||||
errorDetail <- Q.getPGStmtErr txe
|
||||
message <- Q.edMessage errorDetail
|
||||
errorType <- pgErrorType errorDetail
|
||||
guard $ isExpectedError errorType
|
||||
pure $ case errorType of
|
||||
PGIntegrityConstraintViolation code ->
|
||||
let cv = (ConstraintViolation,)
|
||||
customMessage = (code ^? _Just._PGErrorSpecific) <&> \case
|
||||
PGRestrictViolation -> cv "Can not delete or update due to data being referred. "
|
||||
PGNotNullViolation -> cv "Not-NULL violation. "
|
||||
PGForeignKeyViolation -> cv "Foreign key violation. "
|
||||
PGUniqueViolation -> cv "Uniqueness violation. "
|
||||
PGCheckViolation -> (PermissionError, "Check constraint violation. ")
|
||||
PGExclusionViolation -> cv "Exclusion violation. "
|
||||
in maybe (ConstraintViolation, message) (fmap (<> message)) customMessage
|
||||
|
||||
PGDataException code -> case code of
|
||||
Just (PGErrorSpecific PGInvalidEscapeSequence) -> (BadRequest, message)
|
||||
_ -> (DataException, message)
|
||||
|
||||
PGSyntaxErrorOrAccessRuleViolation code -> (ConstraintError,) $ case code of
|
||||
Just (PGErrorSpecific PGInvalidColumnReference) ->
|
||||
"there is no unique or exclusion constraint on target column(s)"
|
||||
_ -> message
|
||||
|
||||
withUserInfo :: (MonadIO m) => UserInfo -> LazyTxT QErr m a -> LazyTxT QErr m a
|
||||
withUserInfo uInfo = \case
|
||||
LTErr e -> LTErr e
|
||||
@ -299,3 +226,33 @@ isExtensionAvailable extensionName =
|
||||
( SELECT 1 FROM pg_catalog.pg_available_extensions
|
||||
WHERE name = $1
|
||||
) |] (Identity extensionName) False
|
||||
|
||||
enablePgcryptoExtension :: forall m. MonadTx m => m ()
|
||||
enablePgcryptoExtension = do
|
||||
pgcryptoAvailable <- isExtensionAvailable "pgcrypto"
|
||||
if pgcryptoAvailable then createPgcryptoExtension
|
||||
else throw400 Unexpected $
|
||||
"pgcrypto extension is required, but could not find the extension in the "
|
||||
<> "PostgreSQL server. Please make sure this extension is available."
|
||||
where
|
||||
createPgcryptoExtension :: m ()
|
||||
createPgcryptoExtension =
|
||||
liftTx $ Q.unitQE needsPGCryptoError
|
||||
"CREATE EXTENSION IF NOT EXISTS pgcrypto SCHEMA public" () False
|
||||
where
|
||||
needsPGCryptoError e@(Q.PGTxErr _ _ _ err) =
|
||||
case err of
|
||||
Q.PGIUnexpected _ -> requiredError
|
||||
Q.PGIStatement pgErr -> case Q.edStatusCode pgErr of
|
||||
Just "42501" -> err500 PostgresError permissionsMessage
|
||||
_ -> requiredError
|
||||
where
|
||||
requiredError =
|
||||
(err500 PostgresError requiredMessage) { qeInternal = Just $ J.toJSON e }
|
||||
requiredMessage =
|
||||
"pgcrypto extension is required, but it could not be created;"
|
||||
<> " encountered unknown postgres error"
|
||||
permissionsMessage =
|
||||
"pgcrypto extension is required, but the current user doesn’t have permission to"
|
||||
<> " create it. Please grant superuser permission, or setup the initial schema via"
|
||||
<> " https://hasura.io/docs/1.0/graphql/manual/deployment/postgres-permissions.html"
|
||||
|
128
server/src-lib/Hasura/Backends/Postgres/Execute/Types.hs
Normal file
128
server/src-lib/Hasura/Backends/Postgres/Execute/Types.hs
Normal file
@ -0,0 +1,128 @@
|
||||
-- A module for postgres execution related types
|
||||
|
||||
module Hasura.Backends.Postgres.Execute.Types
|
||||
( PGExecCtx(..)
|
||||
, mkPGExecCtx
|
||||
, checkDbConnection
|
||||
, defaultTxErrorHandler
|
||||
, mkTxErrorHandler
|
||||
|
||||
-- * Execution in a Postgres Source
|
||||
, PGSourceConfig(..)
|
||||
, runPgSourceReadTx
|
||||
, runPgSourceWriteTx
|
||||
) where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson.Extended as J
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Database.PG.Query.Connection as Q
|
||||
|
||||
import Control.Lens
|
||||
import Control.Monad.Trans.Control (MonadBaseControl (..))
|
||||
import Data.Either (isRight)
|
||||
|
||||
import Hasura.Backends.Postgres.SQL.Error
|
||||
import Hasura.Incremental (Cacheable (..))
|
||||
import Hasura.RQL.Types.Error
|
||||
|
||||
type RunTx =
|
||||
forall m a. (MonadIO m, MonadBaseControl IO m) => Q.TxET QErr m a -> ExceptT QErr m a
|
||||
|
||||
data PGExecCtx
|
||||
= PGExecCtx
|
||||
{ _pecRunReadOnly :: RunTx
|
||||
-- ^ Run a Q.ReadOnly transaction
|
||||
, _pecRunReadNoTx :: RunTx
|
||||
-- ^ Run a read only statement without an explicit transaction block
|
||||
, _pecRunReadWrite :: RunTx
|
||||
-- ^ Run a Q.ReadWrite transaction
|
||||
, _pecCheckHealth :: (IO Bool)
|
||||
-- ^ Checks the health of this execution context
|
||||
}
|
||||
|
||||
-- | Creates a Postgres execution context for a single Postgres master pool
|
||||
mkPGExecCtx :: Q.TxIsolation -> Q.PGPool -> PGExecCtx
|
||||
mkPGExecCtx isoLevel pool =
|
||||
PGExecCtx
|
||||
{ _pecRunReadOnly = (Q.runTx pool (isoLevel, Just Q.ReadOnly))
|
||||
, _pecRunReadNoTx = (Q.runTx' pool)
|
||||
, _pecRunReadWrite = (Q.runTx pool (isoLevel, Just Q.ReadWrite))
|
||||
, _pecCheckHealth = checkDbConnection pool
|
||||
}
|
||||
|
||||
checkDbConnection :: MonadIO m => Q.PGPool -> m Bool
|
||||
checkDbConnection pool = do
|
||||
e <- liftIO $ runExceptT $ Q.runTx' pool select1Query
|
||||
pure $ isRight e
|
||||
where
|
||||
select1Query :: Q.TxE QErr Int
|
||||
select1Query =
|
||||
runIdentity . Q.getRow <$>
|
||||
Q.withQE defaultTxErrorHandler [Q.sql| SELECT 1 |] () False
|
||||
|
||||
defaultTxErrorHandler :: Q.PGTxErr -> QErr
|
||||
defaultTxErrorHandler = mkTxErrorHandler (const False)
|
||||
|
||||
-- | Constructs a transaction error handler given a predicate that determines which errors are
|
||||
-- expected and should be reported to the user. All other errors are considered internal errors.
|
||||
mkTxErrorHandler :: (PGErrorType -> Bool) -> Q.PGTxErr -> QErr
|
||||
mkTxErrorHandler isExpectedError txe = fromMaybe unexpectedError expectedError
|
||||
where
|
||||
unexpectedError = (internalError "database query error") { qeInternal = Just $ J.toJSON txe }
|
||||
expectedError = uncurry err400 <$> do
|
||||
errorDetail <- Q.getPGStmtErr txe
|
||||
message <- Q.edMessage errorDetail
|
||||
errorType <- pgErrorType errorDetail
|
||||
guard $ isExpectedError errorType
|
||||
pure $ case errorType of
|
||||
PGIntegrityConstraintViolation code ->
|
||||
let cv = (ConstraintViolation,)
|
||||
customMessage = (code ^? _Just._PGErrorSpecific) <&> \case
|
||||
PGRestrictViolation -> cv "Can not delete or update due to data being referred. "
|
||||
PGNotNullViolation -> cv "Not-NULL violation. "
|
||||
PGForeignKeyViolation -> cv "Foreign key violation. "
|
||||
PGUniqueViolation -> cv "Uniqueness violation. "
|
||||
PGCheckViolation -> (PermissionError, "Check constraint violation. ")
|
||||
PGExclusionViolation -> cv "Exclusion violation. "
|
||||
in maybe (ConstraintViolation, message) (fmap (<> message)) customMessage
|
||||
|
||||
PGDataException code -> case code of
|
||||
Just (PGErrorSpecific PGInvalidEscapeSequence) -> (BadRequest, message)
|
||||
_ -> (DataException, message)
|
||||
|
||||
PGSyntaxErrorOrAccessRuleViolation code -> (ConstraintError,) $ case code of
|
||||
Just (PGErrorSpecific PGInvalidColumnReference) ->
|
||||
"there is no unique or exclusion constraint on target column(s)"
|
||||
_ -> message
|
||||
|
||||
data PGSourceConfig
|
||||
= PGSourceConfig
|
||||
{ _pscExecCtx :: !PGExecCtx
|
||||
, _pscConnInfo :: !Q.ConnInfo
|
||||
, _pscReadReplicaConnInfos :: !(Maybe (NonEmpty Q.ConnInfo))
|
||||
} deriving (Generic)
|
||||
|
||||
instance Eq PGSourceConfig where
|
||||
lconf == rconf =
|
||||
(_pscConnInfo lconf, _pscReadReplicaConnInfos lconf)
|
||||
== (_pscConnInfo rconf, _pscReadReplicaConnInfos rconf)
|
||||
|
||||
instance Cacheable PGSourceConfig where
|
||||
unchanged _ = (==)
|
||||
|
||||
instance J.ToJSON PGSourceConfig where
|
||||
toJSON = J.toJSON . show . _pscConnInfo
|
||||
|
||||
runPgSourceReadTx
|
||||
:: (MonadIO m, MonadBaseControl IO m)
|
||||
=> PGSourceConfig -> Q.TxET QErr m a -> m (Either QErr a)
|
||||
runPgSourceReadTx psc =
|
||||
runExceptT . _pecRunReadNoTx (_pscExecCtx psc)
|
||||
|
||||
runPgSourceWriteTx
|
||||
:: (MonadIO m, MonadBaseControl IO m)
|
||||
=> PGSourceConfig -> Q.TxET QErr m a -> m (Either QErr a)
|
||||
runPgSourceWriteTx psc =
|
||||
runExceptT . _pecRunReadWrite (_pscExecCtx psc)
|
@ -299,7 +299,7 @@ annBoolExp rhsParser fim boolExp =
|
||||
BoolNot e -> BoolNot <$> annBoolExp rhsParser fim e
|
||||
BoolExists (GExists refqt whereExp) ->
|
||||
withPathK "_exists" $ do
|
||||
refFields <- withPathK "_table" $ askFieldInfoMap refqt
|
||||
refFields <- withPathK "_table" $ askFieldInfoMapSource refqt
|
||||
annWhereExp <- withPathK "_where" $
|
||||
annBoolExp rhsParser refFields whereExp
|
||||
return $ BoolExists $ GExists refqt annWhereExp
|
||||
@ -322,7 +322,7 @@ annColExp rhsParser colInfoMap (ColExp fieldName colVal) = do
|
||||
AVCol pgi <$> parseOperationsExpression rhsParser colInfoMap pgi colVal
|
||||
FIRelationship relInfo -> do
|
||||
relBoolExp <- decodeValue colVal
|
||||
relFieldInfoMap <- askFieldInfoMap $ riRTable relInfo
|
||||
relFieldInfoMap <- askFieldInfoMapSource $ riRTable relInfo
|
||||
annRelBoolExp <- annBoolExp rhsParser relFieldInfoMap $
|
||||
unBoolExp relBoolExp
|
||||
return $ AVRel relInfo annRelBoolExp
|
||||
@ -476,29 +476,29 @@ hasStaticExp :: OpExpG backend (PartialSQLExp backend) -> Bool
|
||||
hasStaticExp = getAny . foldMap (coerce isStaticValue)
|
||||
|
||||
getColExpDeps
|
||||
:: QualifiedTable -> AnnBoolExpFldPartialSQL 'Postgres -> [SchemaDependency]
|
||||
getColExpDeps tn = \case
|
||||
:: SourceName -> QualifiedTable -> AnnBoolExpFldPartialSQL 'Postgres -> [SchemaDependency]
|
||||
getColExpDeps source tn = \case
|
||||
AVCol colInfo opExps ->
|
||||
let cn = pgiColumn colInfo
|
||||
colDepReason = bool DRSessionVariable DROnType $ any hasStaticExp opExps
|
||||
colDep = mkColDep colDepReason tn cn
|
||||
colDep = mkColDep colDepReason source tn cn
|
||||
depColsInOpExp = mapMaybe opExpDepCol opExps
|
||||
colDepsInOpExp = map (mkColDep DROnType tn) depColsInOpExp
|
||||
colDepsInOpExp = map (mkColDep DROnType source tn) depColsInOpExp
|
||||
in colDep:colDepsInOpExp
|
||||
AVRel relInfo relBoolExp ->
|
||||
let rn = riName relInfo
|
||||
relTN = riRTable relInfo
|
||||
pd = SchemaDependency (SOTableObj tn (TORel rn)) DROnType
|
||||
in pd : getBoolExpDeps relTN relBoolExp
|
||||
pd = SchemaDependency (SOSourceObj source $ SOITableObj tn (TORel rn)) DROnType
|
||||
in pd : getBoolExpDeps source relTN relBoolExp
|
||||
|
||||
getBoolExpDeps :: QualifiedTable -> AnnBoolExpPartialSQL 'Postgres -> [SchemaDependency]
|
||||
getBoolExpDeps tn = \case
|
||||
getBoolExpDeps :: SourceName -> QualifiedTable -> AnnBoolExpPartialSQL 'Postgres -> [SchemaDependency]
|
||||
getBoolExpDeps source tn = \case
|
||||
BoolAnd exps -> procExps exps
|
||||
BoolOr exps -> procExps exps
|
||||
BoolNot e -> getBoolExpDeps tn e
|
||||
BoolNot e -> getBoolExpDeps source tn e
|
||||
BoolExists (GExists refqt whereExp) ->
|
||||
let tableDep = SchemaDependency (SOTable refqt) DRRemoteTable
|
||||
in tableDep:getBoolExpDeps refqt whereExp
|
||||
BoolFld fld -> getColExpDeps tn fld
|
||||
let tableDep = SchemaDependency (SOSourceObj source $ SOITable refqt) DRRemoteTable
|
||||
in tableDep:getBoolExpDeps source refqt whereExp
|
||||
BoolFld fld -> getColExpDeps source tn fld
|
||||
where
|
||||
procExps = concatMap (getBoolExpDeps tn)
|
||||
procExps = concatMap (getBoolExpDeps source tn)
|
||||
|
@ -42,42 +42,43 @@ module Hasura.Eventing.EventTrigger
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Control.Concurrent.Async.Lifted.Safe as LA
|
||||
import qualified Data.ByteString.Lazy as LBS
|
||||
import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.TByteString as TBS
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Time.Clock as Time
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Database.PG.Query.PTI as PTI
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified PostgreSQL.Binary.Encoding as PE
|
||||
import qualified Control.Concurrent.Async.Lifted.Safe as LA
|
||||
import qualified Data.ByteString.Lazy as LBS
|
||||
import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.TByteString as TBS
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Time.Clock as Time
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Database.PG.Query.PTI as PTI
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified PostgreSQL.Binary.Encoding as PE
|
||||
|
||||
import Control.Concurrent.Extended (sleep)
|
||||
import Control.Concurrent.Extended (sleep)
|
||||
import Control.Concurrent.STM.TVar
|
||||
import Control.Monad.Catch (MonadMask, bracket_)
|
||||
import Control.Monad.Catch (MonadMask, bracket_)
|
||||
import Control.Monad.STM
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import Data.Has
|
||||
import Data.Int (Int64)
|
||||
import Data.Int (Int64)
|
||||
import Data.String
|
||||
import Data.Text.Extended
|
||||
import Data.Text.NonEmpty
|
||||
import Data.Time.Clock
|
||||
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
|
||||
import Hasura.Backends.Postgres.Execute.Types
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Eventing.Common
|
||||
import Hasura.Eventing.HTTP
|
||||
import Hasura.HTTP
|
||||
import Hasura.RQL.DDL.Headers
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
|
||||
|
||||
data TriggerMetadata
|
||||
@ -98,12 +99,13 @@ instance L.ToEngineLog EventInternalErr L.Hasura where
|
||||
-- https://docs.hasura.io/1.0/graphql/manual/event-triggers/payload.html
|
||||
data Event
|
||||
= Event
|
||||
{ eId :: EventId
|
||||
, eTable :: QualifiedTable
|
||||
, eTrigger :: TriggerMetadata
|
||||
, eEvent :: Value
|
||||
, eTries :: Int
|
||||
, eCreatedAt :: Time.UTCTime
|
||||
{ eId :: !EventId
|
||||
, eSource :: !SourceName
|
||||
, eTable :: !QualifiedTable
|
||||
, eTrigger :: !TriggerMetadata
|
||||
, eEvent :: !Value
|
||||
, eTries :: !Int
|
||||
, eCreatedAt :: !Time.UTCTime
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveFromJSON (aesonDrop 1 snakeCase){omitNothingFields=True} ''Event)
|
||||
@ -155,6 +157,8 @@ initEventEngineCtx maxT _eeCtxFetchInterval = do
|
||||
_eeCtxEventThreadsCapacity <- newTVar maxT
|
||||
return $ EventEngineCtx{..}
|
||||
|
||||
type EventWithSource = (Event, SourceConfig 'Postgres)
|
||||
|
||||
-- | Service events from our in-DB queue.
|
||||
--
|
||||
-- There are a few competing concerns and constraints here; we want to...
|
||||
@ -176,12 +180,11 @@ processEventQueue
|
||||
=> L.Logger L.Hasura
|
||||
-> LogEnvHeaders
|
||||
-> HTTP.Manager
|
||||
-> Q.PGPool
|
||||
-> IO SchemaCache
|
||||
-> EventEngineCtx
|
||||
-> LockedEventsCtx
|
||||
-> m void
|
||||
processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx{..} LockedEventsCtx{leEvents} = do
|
||||
processEventQueue logger logenv httpMgr getSchemaCache eeCtx@EventEngineCtx{..} LockedEventsCtx{leEvents} = do
|
||||
events0 <- popEventsBatch
|
||||
go events0 0 False
|
||||
where
|
||||
@ -197,18 +200,20 @@ processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx
|
||||
Any serial order of updates to a row will lead to an eventually consistent state as the row will have
|
||||
(delivered=t or error=t or archived=t) after a fixed number of tries (assuming it begins with locked='f').
|
||||
-}
|
||||
let run = liftIO . runExceptT . Q.runTx' pool
|
||||
run (fetchEvents fetchBatchSize) >>= \case
|
||||
Left err -> do
|
||||
liftIO $ L.unLogger logger $ EventInternalErr err
|
||||
return []
|
||||
Right events -> do
|
||||
saveLockedEvents (map eId events) leEvents
|
||||
return events
|
||||
pgSources <- scPostgres <$> liftIO getSchemaCache
|
||||
fmap concat $ forM (M.toList pgSources) $ \(sourceName, sourceCache) -> do
|
||||
let sourceConfig = _pcConfiguration sourceCache
|
||||
liftIO $ runPgSourceWriteTx sourceConfig (fetchEvents sourceName fetchBatchSize) >>= \case
|
||||
Left err -> do
|
||||
liftIO $ L.unLogger logger $ EventInternalErr err
|
||||
return []
|
||||
Right events -> do
|
||||
saveLockedEvents (map eId events) leEvents
|
||||
return $ map (, sourceConfig) events
|
||||
|
||||
-- work on this batch of events while prefetching the next. Recurse after we've forked workers
|
||||
-- for each in the batch, minding the requested pool size.
|
||||
go :: [Event] -> Int -> Bool -> m void
|
||||
go :: [EventWithSource] -> Int -> Bool -> m void
|
||||
go events !fullFetchCount !alreadyWarned = do
|
||||
-- process events ASAP until we've caught up; only then can we sleep
|
||||
when (null events) . liftIO $ sleep _eeCtxFetchInterval
|
||||
@ -218,8 +223,8 @@ processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx
|
||||
-- worth the effort for something more fine-tuned
|
||||
eventsNext <- LA.withAsync popEventsBatch $ \eventsNextA -> do
|
||||
-- process approximately in order, minding HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE:
|
||||
forM_ events $ \event -> do
|
||||
t <- processEvent event
|
||||
forM_ events $ \(event, sourceConfig) -> do
|
||||
t <- processEvent event sourceConfig
|
||||
& withEventEngineCtx eeCtx
|
||||
& flip runReaderT (logger, httpMgr)
|
||||
& LA.async
|
||||
@ -258,8 +263,8 @@ processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx
|
||||
, Has (L.Logger L.Hasura) r
|
||||
, Tracing.HasReporter io
|
||||
)
|
||||
=> Event -> io ()
|
||||
processEvent e = do
|
||||
=> Event -> SourceConfig 'Postgres -> io ()
|
||||
processEvent e sourceConfig = do
|
||||
cache <- liftIO getSchemaCache
|
||||
|
||||
tracingCtx <- liftIO (Tracing.extractEventContext (eEvent e))
|
||||
@ -275,7 +280,7 @@ processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx
|
||||
-- i) schema cache is not up-to-date (due to some bug, say during schema syncing across multiple instances)
|
||||
-- ii) the event trigger is dropped when this event was just fetched
|
||||
logQErr $ err500 Unexpected err
|
||||
liftIO . runExceptT $ Q.runTx pool (Q.ReadCommitted, Just Q.ReadWrite) $ do
|
||||
liftIO $ runPgSourceWriteTx sourceConfig $ do
|
||||
currentTime <- liftIO getCurrentTime
|
||||
-- For such an event, we unlock the event and retry after a minute
|
||||
setRetry e (addUTCTime 60 currentTime)
|
||||
@ -296,8 +301,8 @@ processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx
|
||||
logHTTPForET res extraLogCtx requestDetails
|
||||
let decodedHeaders = map (decodeHeader logenv headerInfos) headers
|
||||
either
|
||||
(processError pool e retryConf decodedHeaders ep)
|
||||
(processSuccess pool e decodedHeaders ep) res
|
||||
(processError sourceConfig e retryConf decodedHeaders ep)
|
||||
(processSuccess sourceConfig e decodedHeaders ep) res
|
||||
>>= flip onLeft logQErr
|
||||
|
||||
withEventEngineCtx ::
|
||||
@ -332,22 +337,22 @@ createEventPayload retryConf e = EventPayload
|
||||
|
||||
processSuccess
|
||||
:: ( MonadIO m )
|
||||
=> Q.PGPool -> Event -> [HeaderConf] -> EventPayload -> HTTPResp a
|
||||
=> SourceConfig 'Postgres -> Event -> [HeaderConf] -> EventPayload -> HTTPResp a
|
||||
-> m (Either QErr ())
|
||||
processSuccess pool e decodedHeaders ep resp = do
|
||||
processSuccess sourceConfig e decodedHeaders ep resp = do
|
||||
let respBody = hrsBody resp
|
||||
respHeaders = hrsHeaders resp
|
||||
respStatus = hrsStatus resp
|
||||
invocation = mkInvocation ep respStatus decodedHeaders respBody respHeaders
|
||||
liftIO $ runExceptT $ Q.runTx pool (Q.ReadCommitted, Just Q.ReadWrite) $ do
|
||||
liftIO $ runPgSourceWriteTx sourceConfig $ do
|
||||
insertInvocation invocation
|
||||
setSuccess e
|
||||
|
||||
processError
|
||||
:: ( MonadIO m )
|
||||
=> Q.PGPool -> Event -> RetryConf -> [HeaderConf] -> EventPayload -> HTTPErr a
|
||||
=> SourceConfig 'Postgres -> Event -> RetryConf -> [HeaderConf] -> EventPayload -> HTTPErr a
|
||||
-> m (Either QErr ())
|
||||
processError pool e retryConf decodedHeaders ep err = do
|
||||
processError sourceConfig e retryConf decodedHeaders ep err = do
|
||||
let invocation = case err of
|
||||
HClient excp -> do
|
||||
let errMsg = TBS.fromLBS $ encode $ show excp
|
||||
@ -363,7 +368,7 @@ processError pool e retryConf decodedHeaders ep err = do
|
||||
HOther detail -> do
|
||||
let errMsg = TBS.fromLBS $ encode detail
|
||||
mkInvocation ep 500 decodedHeaders errMsg []
|
||||
liftIO $ runExceptT $ Q.runTx pool (Q.ReadCommitted, Just Q.ReadWrite) $ do
|
||||
liftIO $ runPgSourceWriteTx sourceConfig $ do
|
||||
insertInvocation invocation
|
||||
retryOrSetError e retryConf err
|
||||
|
||||
@ -412,7 +417,7 @@ logQErr err = do
|
||||
getEventTriggerInfoFromEvent :: SchemaCache -> Event -> Either Text EventTriggerInfo
|
||||
getEventTriggerInfoFromEvent sc e = do
|
||||
let table = eTable e
|
||||
mTableInfo = M.lookup table $ scTables sc
|
||||
mTableInfo = getPGTableInfo (eSource e) table $ scPostgres sc
|
||||
tableInfo <- onNothing mTableInfo $ Left ("table '" <> table <<> "' not found")
|
||||
let triggerName = tmName $ eTrigger e
|
||||
mEventTriggerInfo = M.lookup triggerName (_tiEventTriggerInfoMap tableInfo)
|
||||
@ -429,8 +434,8 @@ getEventTriggerInfoFromEvent sc e = do
|
||||
-- limit. Process events approximately in created_at order, but we make no
|
||||
-- ordering guarentees; events can and will race. Nevertheless we want to
|
||||
-- ensure newer change events don't starve older ones.
|
||||
fetchEvents :: Int -> Q.TxE QErr [Event]
|
||||
fetchEvents limitI =
|
||||
fetchEvents :: SourceName -> Int -> Q.TxE QErr [Event]
|
||||
fetchEvents source limitI =
|
||||
map uncurryEvent <$> Q.listQE defaultTxErrorHandler [Q.sql|
|
||||
UPDATE hdb_catalog.event_log
|
||||
SET locked = NOW()
|
||||
@ -448,6 +453,7 @@ fetchEvents limitI =
|
||||
where uncurryEvent (id', sn, tn, trn, Q.AltJ payload, tries, created) =
|
||||
Event
|
||||
{ eId = id'
|
||||
, eSource = source
|
||||
, eTable = QualifiedObject sn tn
|
||||
, eTrigger = TriggerMetadata trn
|
||||
, eEvent = payload
|
||||
|
@ -22,20 +22,22 @@ module Hasura.GraphQL.Context
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import Hasura.SQL.Backend
|
||||
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as PG
|
||||
import qualified Hasura.RQL.IR.Delete as IR
|
||||
import qualified Hasura.RQL.IR.Insert as IR
|
||||
import qualified Hasura.RQL.IR.Select as IR
|
||||
import qualified Hasura.RQL.IR.Update as IR
|
||||
import qualified Hasura.RQL.Types.Action as RQL
|
||||
import qualified Hasura.RQL.Types.RemoteSchema as RQL
|
||||
import qualified Hasura.Backends.Postgres.Connection as PG
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as PG
|
||||
import qualified Hasura.RQL.IR.Delete as IR
|
||||
import qualified Hasura.RQL.IR.Insert as IR
|
||||
import qualified Hasura.RQL.IR.Select as IR
|
||||
import qualified Hasura.RQL.IR.Update as IR
|
||||
import qualified Hasura.RQL.Types.Action as RQL
|
||||
import qualified Hasura.RQL.Types.Common as RQL
|
||||
import qualified Hasura.RQL.Types.RemoteSchema as RQL
|
||||
|
||||
import Hasura.GraphQL.Parser
|
||||
|
||||
@ -63,7 +65,7 @@ type ParserFn a
|
||||
-> Either (NESeq ParseError) (a, QueryReusability)
|
||||
|
||||
data RootField db remote action raw
|
||||
= RFDB db
|
||||
= RFDB !RQL.SourceName !PG.PGExecCtx db
|
||||
| RFRemote remote
|
||||
| RFAction action
|
||||
| RFRaw raw
|
||||
@ -74,7 +76,7 @@ traverseDB :: forall db db' remote action raw f
|
||||
-> RootField db remote action raw
|
||||
-> f (RootField db' remote action raw)
|
||||
traverseDB f = \case
|
||||
RFDB x -> RFDB <$> f x
|
||||
RFDB s e x -> RFDB s e <$> f x
|
||||
RFRemote x -> pure $ RFRemote x
|
||||
RFAction x -> pure $ RFAction x
|
||||
RFRaw x -> pure $ RFRaw x
|
||||
@ -85,7 +87,7 @@ traverseAction :: forall db remote action action' raw f
|
||||
-> RootField db remote action raw
|
||||
-> f (RootField db remote action' raw)
|
||||
traverseAction f = \case
|
||||
RFDB x -> pure $ RFDB x
|
||||
RFDB s e x -> pure $ RFDB s e x
|
||||
RFRemote x -> pure $ RFRemote x
|
||||
RFAction x -> RFAction <$> f x
|
||||
RFRaw x -> pure $ RFRaw x
|
||||
@ -96,7 +98,7 @@ traverseRemoteField :: forall db remote remote' action raw f
|
||||
-> RootField db remote action raw
|
||||
-> f (RootField db remote' action raw)
|
||||
traverseRemoteField f = \case
|
||||
RFDB x -> pure $ RFDB x
|
||||
RFDB s e x -> pure $ RFDB s e x
|
||||
RFRemote x -> RFRemote <$> f x
|
||||
RFAction x -> pure $ RFAction x
|
||||
RFRaw x -> pure $ RFRaw x
|
||||
@ -136,5 +138,5 @@ data ActionMutation (b :: BackendType) v
|
||||
type MutationRootField v =
|
||||
RootField (MutationDB 'Postgres v) RemoteField (ActionMutation 'Postgres v) J.Value
|
||||
|
||||
type SubscriptionRootField v = RootField (QueryDB 'Postgres v) Void (RQL.AnnActionAsyncQuery 'Postgres v) Void
|
||||
type SubscriptionRootFieldResolved = RootField (QueryDB 'Postgres PG.SQLExp) Void (IR.AnnSimpleSel 'Postgres) Void
|
||||
type SubscriptionRootField v = RootField (QueryDB 'Postgres v) Void Void Void
|
||||
type SubscriptionRootFieldResolved = RootField (QueryDB 'Postgres PG.SQLExp) Void Void Void
|
||||
|
@ -49,6 +49,7 @@ import qualified Hasura.GraphQL.Execute.Inline as EI
|
||||
import qualified Hasura.GraphQL.Execute.LiveQuery as EL
|
||||
import qualified Hasura.GraphQL.Execute.Mutation as EM
|
||||
-- import qualified Hasura.GraphQL.Execute.Plan as EP
|
||||
import qualified Hasura.GraphQL.Execute.Action as EA
|
||||
import qualified Hasura.GraphQL.Execute.Prepare as EPr
|
||||
import qualified Hasura.GraphQL.Execute.Query as EQ
|
||||
import qualified Hasura.GraphQL.Execute.Types as ET
|
||||
@ -65,7 +66,6 @@ data ExecutionCtx
|
||||
= ExecutionCtx
|
||||
{ _ecxLogger :: !(L.Logger L.Hasura)
|
||||
, _ecxSqlGenCtx :: !SQLGenCtx
|
||||
, _ecxPgExecCtx :: !PGExecCtx
|
||||
-- , _ecxPlanCache :: !EP.PlanCache
|
||||
, _ecxSchemaCache :: !SchemaCache
|
||||
, _ecxSchemaCacheVer :: !SchemaCacheVer
|
||||
@ -167,22 +167,35 @@ getExecPlanPartial userInfo sc queryType req =
|
||||
-- The graphql query is resolved into a sequence of execution operations
|
||||
data ResolvedExecutionPlan tx
|
||||
= QueryExecutionPlan
|
||||
(EPr.ExecutionPlan (tx EncJSON, Maybe EQ.PreparedSql)) [C.QueryRootField (UnpreparedValue 'Postgres)]
|
||||
(EPr.ExecutionPlan EA.ActionExecutionPlan (tx EncJSON, Maybe EQ.PreparedSql)) [C.QueryRootField (UnpreparedValue 'Postgres)]
|
||||
-- ^ query execution; remote schemas and introspection possible
|
||||
| MutationExecutionPlan (EPr.ExecutionPlan (tx EncJSON, HTTP.ResponseHeaders))
|
||||
| MutationExecutionPlan (EPr.ExecutionPlan (EA.ActionExecutionPlan, HTTP.ResponseHeaders) (tx EncJSON, HTTP.ResponseHeaders))
|
||||
-- ^ mutation execution; only __typename introspection supported
|
||||
| SubscriptionExecutionPlan EL.LiveQueryPlan
|
||||
-- ^ live query execution; remote schemas and introspection not supported
|
||||
|
||||
validateSubscriptionRootField
|
||||
:: MonadError QErr m
|
||||
=> C.QueryRootField v -> m (C.SubscriptionRootField v)
|
||||
validateSubscriptionRootField = \case
|
||||
C.RFDB x -> pure $ C.RFDB x
|
||||
C.RFAction (C.AQAsync s) -> pure $ C.RFAction s
|
||||
C.RFAction (C.AQQuery _) -> throw400 NotSupported "query actions cannot be run as a subscription"
|
||||
C.RFRemote _ -> throw400 NotSupported "subscription to remote server is not supported"
|
||||
C.RFRaw _ -> throw400 NotSupported "Introspection not supported over subscriptions"
|
||||
:: (MonadError QErr m, Traversable t)
|
||||
=> t (C.QueryRootField v) -> m (PGExecCtx, t (C.SubscriptionRootField v))
|
||||
validateSubscriptionRootField rootFields = do
|
||||
subscriptionRootFields <- for rootFields \case
|
||||
C.RFDB src e x -> pure $ C.RFDB src e x
|
||||
C.RFAction (C.AQAsync _) -> throw400 NotSupported "async action queries are temporarily not supported in subscription"
|
||||
C.RFAction (C.AQQuery _) -> throw400 NotSupported "query actions cannot be run as a subscription"
|
||||
C.RFRemote _ -> throw400 NotSupported "subscription to remote server is not supported"
|
||||
C.RFRaw _ -> throw400 NotSupported "Introspection not supported over subscriptions"
|
||||
|
||||
pgExecCtx <- case toList subscriptionRootFields of
|
||||
[] -> throw500 "empty selset for subscription"
|
||||
[C.RFDB _ e _] -> pure e
|
||||
((C.RFDB headSrc e _):restFields) -> do
|
||||
let getSource (C.RFDB s _ _) = s
|
||||
getSource _ = defaultSource
|
||||
unless (all ((headSrc ==) . getSource) restFields) $ throw400 NotSupported ""
|
||||
pure e
|
||||
|
||||
pure (pgExecCtx, subscriptionRootFields)
|
||||
|
||||
|
||||
|
||||
checkQueryInAllowlist
|
||||
@ -219,7 +232,6 @@ getResolvedExecPlan
|
||||
)
|
||||
=> Env.Environment
|
||||
-> L.Logger L.Hasura
|
||||
-> PGExecCtx
|
||||
-- -> EP.PlanCache
|
||||
-> UserInfo
|
||||
-> SQLGenCtx
|
||||
@ -230,7 +242,7 @@ getResolvedExecPlan
|
||||
-> [HTTP.Header]
|
||||
-> (GQLReqUnparsed, GQLReqParsed)
|
||||
-> m (Telem.CacheHit, ResolvedExecutionPlan tx)
|
||||
getResolvedExecPlan env logger pgExecCtx {- planCache-} userInfo sqlGenCtx
|
||||
getResolvedExecPlan env logger {- planCache-} userInfo sqlGenCtx
|
||||
sc scVer queryType httpManager reqHeaders (reqUnparsed, reqParsed) = -- do
|
||||
|
||||
-- See Note [Temporarily disabling query plan caching]
|
||||
@ -261,6 +273,7 @@ getResolvedExecPlan env logger pgExecCtx {- planCache-} userInfo sqlGenCtx
|
||||
fragments =
|
||||
mapMaybe takeFragment $ unGQLExecDoc $ _grQuery reqParsed
|
||||
(gCtx, queryParts) <- getExecPlanPartial userInfo sc queryType reqParsed
|
||||
|
||||
case queryParts of
|
||||
G.TypedOperationDefinition G.OperationTypeQuery _ varDefs dirs selSet -> do
|
||||
-- (Here the above fragment inlining is actually executed.)
|
||||
@ -297,6 +310,8 @@ getResolvedExecPlan env logger pgExecCtx {- planCache-} userInfo sqlGenCtx
|
||||
in
|
||||
unless (multipleAllowed || null rst) $
|
||||
throw400 ValidationFailed "subscriptions must select one top level field"
|
||||
validSubscriptionAST <- for unpreparedAST validateSubscriptionRootField
|
||||
|
||||
(pgExecCtx, validSubscriptionAST) <- validateSubscriptionRootField unpreparedAST
|
||||
|
||||
(lqOp, _plan) <- EL.buildLiveQueryPlan pgExecCtx userInfo validSubscriptionAST
|
||||
return $ SubscriptionExecutionPlan lqOp
|
||||
|
@ -1,5 +1,7 @@
|
||||
module Hasura.GraphQL.Execute.Action
|
||||
( ActionExecuteTx(..)
|
||||
( ActionExecution(..)
|
||||
, runActionExecution
|
||||
, ActionExecutionPlan(..)
|
||||
, ActionExecuteResult(..)
|
||||
, asyncActionsProcessor
|
||||
, resolveActionExecution
|
||||
@ -9,6 +11,7 @@ module Hasura.GraphQL.Execute.Action
|
||||
, fetchUndeliveredActionEventsTx
|
||||
, setActionStatusTx
|
||||
, fetchActionResponseTx
|
||||
, clearActionDataTx
|
||||
) where
|
||||
|
||||
import Hasura.Prelude
|
||||
@ -16,6 +19,7 @@ import Hasura.Prelude
|
||||
import qualified Control.Concurrent.Async.Lifted.Safe as LA
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Aeson.Casing as J
|
||||
import qualified Data.Aeson.Ordered as AO
|
||||
import qualified Data.Aeson.TH as J
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.CaseInsensitive as CI
|
||||
@ -65,13 +69,34 @@ import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
|
||||
|
||||
newtype ActionExecuteTx =
|
||||
ActionExecuteTx {
|
||||
unActionExecuteTx
|
||||
:: forall tx
|
||||
. (MonadIO tx, MonadTx tx, Tracing.MonadTrace tx) => tx EncJSON
|
||||
newtype ActionExecution =
|
||||
ActionExecution {
|
||||
unActionExecution
|
||||
:: forall m
|
||||
. (MonadIO m, MonadBaseControl IO m, MonadError QErr m, Tracing.MonadTrace m) => m EncJSON
|
||||
}
|
||||
|
||||
-- A plan to execute any action
|
||||
data ActionExecutionPlan
|
||||
= AEPSync !ActionExecution
|
||||
| AEPAsyncQuery !ActionId !(ActionLogResponse -> ActionExecution)
|
||||
| AEPAsyncMutation !EncJSON
|
||||
|
||||
runActionExecution
|
||||
:: ( MonadIO m, MonadBaseControl IO m
|
||||
, MonadError QErr m, Tracing.MonadTrace m
|
||||
, MonadMetadataStorage (MetadataStorageT m)
|
||||
)
|
||||
=> ActionExecutionPlan -> m (DiffTime, EncJSON)
|
||||
runActionExecution aep = do
|
||||
(time, resp) <- withElapsedTime $ case aep of
|
||||
AEPSync e -> unActionExecution e
|
||||
AEPAsyncQuery actionId f -> do
|
||||
actionLogResponse <- liftEitherM $ runMetadataStorageT $ fetchActionResponse actionId
|
||||
unActionExecution $ f actionLogResponse
|
||||
AEPAsyncMutation m -> pure m
|
||||
pure (time, resp)
|
||||
|
||||
newtype ActionContext
|
||||
= ActionContext {_acName :: ActionName}
|
||||
deriving (Show, Eq)
|
||||
@ -145,7 +170,7 @@ instance L.ToEngineLog ActionHandlerLog L.Hasura where
|
||||
|
||||
data ActionExecuteResult
|
||||
= ActionExecuteResult
|
||||
{ _aerExecution :: !ActionExecuteTx
|
||||
{ _aerExecution :: !ActionExecution
|
||||
, _aerHeaders :: !HTTP.ResponseHeaders
|
||||
}
|
||||
|
||||
@ -165,32 +190,53 @@ resolveActionExecution
|
||||
resolveActionExecution env logger userInfo annAction execContext = do
|
||||
let actionContext = ActionContext actionName
|
||||
handlerPayload = ActionWebhookPayload actionContext sessionVariables inputPayload
|
||||
(webhookRes, respHeaders) <- flip runReaderT logger $ callWebhook env manager outputType outputFields reqHeaders confHeaders
|
||||
(webhookRes, respHeaders) <- flip runReaderT logger $
|
||||
callWebhook env manager outputType outputFields reqHeaders confHeaders
|
||||
forwardClientHeaders resolvedWebhook handlerPayload timeout
|
||||
let webhookResponseExpression = RS.AEInput $ UVLiteral $
|
||||
toTxtValue $ ColumnValue (ColumnScalar PGJSONB) $ PGValJSONB $ Q.JSONB $ J.toJSON webhookRes
|
||||
selectAstUnresolved = processOutputSelectionSet webhookResponseExpression
|
||||
outputType definitionList annFields stringifyNum
|
||||
(astResolved, _expectedVariables) <- flip runStateT Set.empty $ RS.traverseAnnSimpleSelect prepareWithoutPlan selectAstUnresolved
|
||||
return $ ActionExecuteResult (executeAction astResolved) respHeaders
|
||||
|
||||
flip ActionExecuteResult respHeaders <$> case actionSource of
|
||||
-- Build client response
|
||||
ASINoSource -> pure $ ActionExecution $ pure $ AO.toEncJSON $ makeActionResponseNoRelations annFields webhookRes
|
||||
ASISource sourceConfig -> do
|
||||
let webhookResponseExpression = RS.AEInput $ UVLiteral $
|
||||
toTxtValue $ ColumnValue (ColumnScalar PGJSONB) $ PGValJSONB $ Q.JSONB $ J.toJSON webhookRes
|
||||
selectAstUnresolved = processOutputSelectionSet webhookResponseExpression
|
||||
outputType definitionList annFields stringifyNum
|
||||
(astResolved, _expectedVariables) <- flip runStateT Set.empty $ RS.traverseAnnSimpleSelect prepareWithoutPlan selectAstUnresolved
|
||||
pure $ executeActionInDb sourceConfig astResolved
|
||||
where
|
||||
AnnActionExecution actionName outputType annFields inputPayload
|
||||
outputFields definitionList resolvedWebhook confHeaders
|
||||
forwardClientHeaders stringifyNum timeout = annAction
|
||||
forwardClientHeaders stringifyNum timeout actionSource = annAction
|
||||
ActionExecContext manager reqHeaders sessionVariables = execContext
|
||||
|
||||
|
||||
executeAction :: RS.AnnSimpleSel 'Postgres -> ActionExecuteTx
|
||||
executeAction astResolved = ActionExecuteTx do
|
||||
executeActionInDb :: SourceConfig 'Postgres -> RS.AnnSimpleSel 'Postgres -> ActionExecution
|
||||
executeActionInDb sourceConfig astResolved = ActionExecution do
|
||||
let (astResolvedWithoutRemoteJoins,maybeRemoteJoins) = RJ.getRemoteJoins astResolved
|
||||
jsonAggType = mkJsonAggSelect outputType
|
||||
case maybeRemoteJoins of
|
||||
Just remoteJoins ->
|
||||
let query = Q.fromBuilder $ toSQL $
|
||||
RS.mkSQLSelect jsonAggType astResolvedWithoutRemoteJoins
|
||||
in RJ.executeQueryWithRemoteJoins env manager reqHeaders userInfo query [] remoteJoins
|
||||
Nothing ->
|
||||
liftTx $ asSingleRowJsonResp (Q.fromBuilder $ toSQL $ RS.mkSQLSelect jsonAggType astResolved) []
|
||||
liftEitherM $ runExceptT $ runLazyTx (_pscExecCtx sourceConfig) Q.ReadOnly $
|
||||
case maybeRemoteJoins of
|
||||
Just remoteJoins ->
|
||||
let query = Q.fromBuilder $ toSQL $
|
||||
RS.mkSQLSelect jsonAggType astResolvedWithoutRemoteJoins
|
||||
in RJ.executeQueryWithRemoteJoins env manager reqHeaders userInfo query [] remoteJoins
|
||||
Nothing ->
|
||||
liftTx $ asSingleRowJsonResp (Q.fromBuilder $ toSQL $ RS.mkSQLSelect jsonAggType astResolved) []
|
||||
|
||||
|
||||
-- | Build action response from the Webhook JSON response when there are no relationships defined
|
||||
makeActionResponseNoRelations :: RS.AnnFieldsG b v -> ActionWebhookResponse -> AO.Value
|
||||
makeActionResponseNoRelations annFields webhookResponse =
|
||||
let mkResponseObject obj =
|
||||
AO.object $ flip mapMaybe annFields $ \(fieldName, annField) ->
|
||||
let fieldText = getFieldNameTxt fieldName
|
||||
in (fieldText,) <$> case annField of
|
||||
RS.AFExpression t -> Just $ AO.String t
|
||||
_ -> AO.toOrdered <$> Map.lookup fieldText (mapKeys G.unName obj)
|
||||
in case webhookResponse of
|
||||
AWRArray objs -> AO.array $ map mkResponseObject objs
|
||||
AWRObject obj -> mkResponseObject obj
|
||||
|
||||
{- Note: [Async action architecture]
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -211,11 +257,10 @@ resolveActionMutationAsync
|
||||
=> AnnActionMutationAsync
|
||||
-> [HTTP.Header]
|
||||
-> SessionVariables
|
||||
-> m ActionExecuteTx
|
||||
-> m EncJSON
|
||||
resolveActionMutationAsync annAction reqHeaders sessionVariables = do
|
||||
actionId <- insertAction actionName sessionVariables reqHeaders inputArgs
|
||||
pure $ ActionExecuteTx $
|
||||
pure $ encJFromJValue $ actionIdToText actionId
|
||||
pure $ encJFromJValue $ actionIdToText actionId
|
||||
where
|
||||
AnnActionMutationAsync actionName inputArgs = annAction
|
||||
|
||||
@ -232,39 +277,57 @@ action's type. Here, we treat the "output" field as a computed field to hdb_acti
|
||||
|
||||
-- TODO: Add tracing here? Avoided now because currently the function is pure
|
||||
resolveAsyncActionQuery
|
||||
:: (MonadMetadataStorage m)
|
||||
=> UserInfo
|
||||
:: UserInfo
|
||||
-> AnnActionAsyncQuery 'Postgres (UnpreparedValue 'Postgres)
|
||||
-> m (RS.AnnSimpleSelG 'Postgres (UnpreparedValue 'Postgres))
|
||||
resolveAsyncActionQuery userInfo annAction = do
|
||||
actionLogResponse <- fetchActionResponse actionId
|
||||
let annotatedFields = asyncFields <&> second \case
|
||||
AsyncTypename t -> RS.AFExpression t
|
||||
AsyncOutput annFields ->
|
||||
-- See Note [Resolving async action query/subscription]
|
||||
let inputTableArgument = RS.AETableRow $ Just $ Identifier "response_payload"
|
||||
jsonAggSelect = mkJsonAggSelect outputType
|
||||
in RS.AFComputedField $ RS.CFSTable jsonAggSelect $
|
||||
processOutputSelectionSet inputTableArgument outputType
|
||||
definitionList annFields stringifyNumerics
|
||||
-> ActionLogResponse
|
||||
-> ActionExecution
|
||||
resolveAsyncActionQuery userInfo annAction actionLogResponse = ActionExecution
|
||||
case actionSource of
|
||||
ASINoSource -> do
|
||||
let ActionLogResponse{..} = actionLogResponse
|
||||
resolvedFields <- for asyncFields $ \(fieldName, fld) -> do
|
||||
let fieldText = getFieldNameTxt fieldName
|
||||
(fieldText,) <$> case fld of
|
||||
AsyncTypename t -> pure $ AO.String t
|
||||
AsyncOutput annFields ->
|
||||
fromMaybe AO.Null <$> forM _alrResponsePayload
|
||||
\response -> makeActionResponseNoRelations annFields <$> decodeValue response
|
||||
AsyncId -> pure $ AO.String $ actionIdToText actionId
|
||||
AsyncCreatedAt -> pure $ AO.toOrdered $ J.toJSON _alrCreatedAt
|
||||
AsyncErrors -> pure $ AO.toOrdered $ J.toJSON _alrErrors
|
||||
pure $ AO.toEncJSON $ AO.object resolvedFields
|
||||
|
||||
AsyncId -> mkAnnFldFromPGCol idColumn
|
||||
AsyncCreatedAt -> mkAnnFldFromPGCol createdAtColumn
|
||||
AsyncErrors -> mkAnnFldFromPGCol errorsColumn
|
||||
ASISource sourceConfig -> do
|
||||
let jsonAggSelect = mkJsonAggSelect outputType
|
||||
annotatedFields = asyncFields <&> second \case
|
||||
AsyncTypename t -> RS.AFExpression t
|
||||
AsyncOutput annFields ->
|
||||
-- See Note [Resolving async action query/subscription]
|
||||
let inputTableArgument = RS.AETableRow $ Just $ Identifier "response_payload"
|
||||
in RS.AFComputedField $ RS.CFSTable jsonAggSelect $
|
||||
processOutputSelectionSet inputTableArgument outputType
|
||||
definitionList annFields stringifyNumerics
|
||||
|
||||
jsonbToRecordSet = QualifiedObject "pg_catalog" $ FunctionName "jsonb_to_recordset"
|
||||
actionLogInput = UVLiteral $ S.SELit $ lbsToTxt $ J.encode [actionLogResponse]
|
||||
functionArgs = RS.FunctionArgsExp [RS.AEInput actionLogInput] mempty
|
||||
tableFromExp = RS.FromFunction jsonbToRecordSet functionArgs $ Just
|
||||
[idColumn, createdAtColumn, responsePayloadColumn, errorsColumn, sessionVarsColumn]
|
||||
tableArguments = RS.noSelectArgs
|
||||
{ RS._saWhere = Just tableBoolExpression}
|
||||
tablePermissions = RS.TablePerm annBoolExpTrue Nothing
|
||||
AsyncId -> mkAnnFldFromPGCol idColumn
|
||||
AsyncCreatedAt -> mkAnnFldFromPGCol createdAtColumn
|
||||
AsyncErrors -> mkAnnFldFromPGCol errorsColumn
|
||||
|
||||
pure $ RS.AnnSelectG annotatedFields tableFromExp tablePermissions
|
||||
tableArguments stringifyNumerics
|
||||
jsonbToRecordSet = QualifiedObject "pg_catalog" $ FunctionName "jsonb_to_recordset"
|
||||
actionLogInput = UVLiteral $ S.SELit $ lbsToTxt $ J.encode [actionLogResponse]
|
||||
functionArgs = RS.FunctionArgsExp [RS.AEInput actionLogInput] mempty
|
||||
tableFromExp = RS.FromFunction jsonbToRecordSet functionArgs $ Just
|
||||
[idColumn, createdAtColumn, responsePayloadColumn, errorsColumn, sessionVarsColumn]
|
||||
tableArguments = RS.noSelectArgs
|
||||
{ RS._saWhere = Just tableBoolExpression}
|
||||
tablePermissions = RS.TablePerm annBoolExpTrue Nothing
|
||||
annSelect = RS.AnnSelectG annotatedFields tableFromExp tablePermissions
|
||||
tableArguments stringifyNumerics
|
||||
|
||||
(selectResolved, _) <- flip runStateT Set.empty $ RS.traverseAnnSimpleSelect prepareWithoutPlan annSelect
|
||||
liftEitherM $ liftIO $ runPgSourceReadTx sourceConfig $
|
||||
asSingleRowJsonResp (Q.fromBuilder $ toSQL $ RS.mkSQLSelect jsonAggSelect selectResolved) []
|
||||
where
|
||||
AnnActionAsyncQuery _ actionId outputType asyncFields definitionList stringifyNumerics = annAction
|
||||
AnnActionAsyncQuery _ actionId outputType asyncFields definitionList stringifyNumerics actionSource = annAction
|
||||
|
||||
idColumn = (unsafePGCol "id", PGUUID)
|
||||
responsePayloadColumn = (unsafePGCol "response_payload", PGJSONB)
|
||||
@ -551,3 +614,10 @@ fetchActionResponseTx actionId = do
|
||||
WHERE id = $1
|
||||
|] (Identity actionId) True
|
||||
pure $ ActionLogResponse actionId ca (Q.getAltJ <$> rp) (Q.getAltJ <$> errs) sessVars
|
||||
|
||||
clearActionDataTx :: ActionName -> Q.TxE QErr ()
|
||||
clearActionDataTx actionName =
|
||||
Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
DELETE FROM hdb_catalog.hdb_action_log
|
||||
WHERE action_name = $1
|
||||
|] (Identity actionName) True
|
||||
|
@ -23,7 +23,6 @@ import Hasura.Backends.Postgres.SQL.Value
|
||||
import Hasura.Backends.Postgres.Translate.Select (asSingleRowJsonResp)
|
||||
import Hasura.EncJSON
|
||||
import Hasura.GraphQL.Context
|
||||
import Hasura.GraphQL.Execute.Action
|
||||
import Hasura.GraphQL.Execute.Prepare
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
@ -45,12 +44,12 @@ instance J.ToJSON PreparedSql where
|
||||
|
||||
data RootFieldPlan
|
||||
= RFPPostgres !PreparedSql
|
||||
| RFPActionQuery !ActionExecuteTx
|
||||
-- | RFPActionQuery !ActionExecution
|
||||
|
||||
instance J.ToJSON RootFieldPlan where
|
||||
toJSON = \case
|
||||
RFPPostgres pgPlan -> J.toJSON pgPlan
|
||||
RFPActionQuery _ -> J.String "Action Execution Tx"
|
||||
-- RFPActionQuery _ -> J.String "Action Execution Tx"
|
||||
|
||||
|
||||
-- | A method for extracting profiling data from instrumented query results.
|
||||
@ -85,7 +84,7 @@ mkCurPlanTx env manager reqHdrs userInfo instrument ep = \case
|
||||
asSingleRowJsonResp (instrument q) prepArgs
|
||||
Just remoteJoins ->
|
||||
executeQueryWithRemoteJoins env manager reqHdrs userInfo q prepArgs remoteJoins
|
||||
RFPActionQuery atx -> (unActionExecuteTx atx, Nothing)
|
||||
-- RFPActionQuery atx -> (unActionExecution atx, Nothing)
|
||||
|
||||
-- convert a query from an intermediate representation to... another
|
||||
irToRootFieldPlan
|
||||
|
@ -40,6 +40,7 @@ import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified PostgreSQL.Binary.Encoding as PE
|
||||
|
||||
import Control.Lens
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.Semigroup.Generic
|
||||
import Data.UUID (UUID)
|
||||
|
||||
@ -55,10 +56,8 @@ import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Backends.Postgres.SQL.Value
|
||||
import Hasura.Backends.Postgres.Translate.Column (toTxtValue)
|
||||
import Hasura.GraphQL.Context
|
||||
import Hasura.GraphQL.Execute.Action
|
||||
import Hasura.GraphQL.Execute.Query
|
||||
import Hasura.GraphQL.Parser.Column
|
||||
import Hasura.Metadata.Class
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
@ -72,11 +71,11 @@ newtype MultiplexedQuery = MultiplexedQuery { unMultiplexedQuery :: Q.Query }
|
||||
|
||||
toSQLFromItem :: S.Alias -> SubscriptionRootFieldResolved -> S.FromItem
|
||||
toSQLFromItem alias = \case
|
||||
RFDB (QDBPrimaryKey s) -> fromSelect $ DS.mkSQLSelect DS.JASSingleObject s
|
||||
RFDB (QDBSimple s) -> fromSelect $ DS.mkSQLSelect DS.JASMultipleRows s
|
||||
RFDB (QDBAggregation s) -> fromSelect $ DS.mkAggregateSelect s
|
||||
RFDB (QDBConnection s) -> S.mkSelectWithFromItem (DS.mkConnectionSelect s) alias
|
||||
RFAction s -> fromSelect $ DS.mkSQLSelect DS.JASSingleObject s
|
||||
RFDB _ _ (QDBPrimaryKey s) -> fromSelect $ DS.mkSQLSelect DS.JASSingleObject s
|
||||
RFDB _ _ (QDBSimple s) -> fromSelect $ DS.mkSQLSelect DS.JASMultipleRows s
|
||||
RFDB _ _ (QDBAggregation s) -> fromSelect $ DS.mkAggregateSelect s
|
||||
RFDB _ _ (QDBConnection s) -> S.mkSelectWithFromItem (DS.mkConnectionSelect s) alias
|
||||
-- RFAction s -> fromSelect $ DS.mkSQLSelect DS.JASSingleObject s
|
||||
where
|
||||
fromSelect s = S.mkSelFromItem s alias
|
||||
|
||||
@ -324,6 +323,7 @@ data LiveQueryPlan
|
||||
= LiveQueryPlan
|
||||
{ _lqpParameterizedPlan :: !ParameterizedLiveQueryPlan
|
||||
, _lqpVariables :: !CohortVariables
|
||||
, _lqpPGExecCtx :: !PGExecCtx
|
||||
}
|
||||
|
||||
data ParameterizedLiveQueryPlan
|
||||
@ -346,7 +346,6 @@ $(J.deriveToJSON (J.aesonDrop 4 J.snakeCase) ''ReusableLiveQueryPlan)
|
||||
-- of the plan if possible.
|
||||
buildLiveQueryPlan
|
||||
:: ( MonadError QErr m
|
||||
, MonadMetadataStorage (MetadataStorageT m)
|
||||
, MonadIO m
|
||||
)
|
||||
=> PGExecCtx
|
||||
@ -358,7 +357,7 @@ buildLiveQueryPlan pgExecCtx userInfo unpreparedAST = do
|
||||
for unpreparedAST \unpreparedQuery -> do
|
||||
resolvedRootField <- traverseQueryRootField resolveMultiplexedValue unpreparedQuery
|
||||
case resolvedRootField of
|
||||
RFDB qDB -> do
|
||||
RFDB _ _ qDB -> do
|
||||
let remoteJoins = case qDB of
|
||||
QDBSimple s -> snd $ RR.getRemoteJoins s
|
||||
QDBPrimaryKey s -> snd $ RR.getRemoteJoins s
|
||||
@ -366,10 +365,7 @@ buildLiveQueryPlan pgExecCtx userInfo unpreparedAST = do
|
||||
QDBConnection s -> snd $ RR.getRemoteJoinsConnectionSelect s
|
||||
when (remoteJoins /= mempty)
|
||||
$ throw400 NotSupported "Remote relationships are not allowed in subscriptions"
|
||||
_ -> pure ()
|
||||
flip traverseAction resolvedRootField $
|
||||
(lift . liftEitherM . runMetadataStorageT . resolveAsyncActionQuery userInfo)
|
||||
>=> DS.traverseAnnSimpleSelect resolveMultiplexedValue
|
||||
pure resolvedRootField
|
||||
|
||||
let multiplexedQuery = mkMultiplexedQuery preparedAST
|
||||
roleName = _uiRole userInfo
|
||||
@ -385,7 +381,7 @@ buildLiveQueryPlan pgExecCtx userInfo unpreparedAST = do
|
||||
cohortVariables = mkCohortVariables _qpiReferencedSessionVariables
|
||||
(_uiSession userInfo) validatedQueryVars validatedSyntheticVars
|
||||
|
||||
plan = LiveQueryPlan parameterizedPlan cohortVariables
|
||||
plan = LiveQueryPlan parameterizedPlan cohortVariables pgExecCtx
|
||||
-- See Note [Temporarily disabling query plan caching]
|
||||
-- varTypes = finalReusability ^? GV._Reusable
|
||||
reusablePlan =
|
||||
@ -404,15 +400,19 @@ data LiveQueryPlanExplanation
|
||||
$(J.deriveToJSON (J.aesonDrop 5 J.snakeCase) ''LiveQueryPlanExplanation)
|
||||
|
||||
explainLiveQueryPlan
|
||||
:: (MonadTx m, MonadIO m)
|
||||
:: ( MonadError QErr m
|
||||
, MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
)
|
||||
=> LiveQueryPlan -> m LiveQueryPlanExplanation
|
||||
explainLiveQueryPlan plan = do
|
||||
let parameterizedPlan = _lqpParameterizedPlan plan
|
||||
pgExecCtx = _lqpPGExecCtx plan
|
||||
queryText = Q.getQueryText . unMultiplexedQuery $ _plqpQuery parameterizedPlan
|
||||
-- CAREFUL!: an `EXPLAIN ANALYZE` here would actually *execute* this
|
||||
-- query, maybe resulting in privilege escalation:
|
||||
explainQuery = Q.fromText $ "EXPLAIN (FORMAT TEXT) " <> queryText
|
||||
cohortId <- newCohortId
|
||||
explanationLines <- map runIdentity <$> executeQuery explainQuery
|
||||
[(cohortId, _lqpVariables plan)]
|
||||
explanationLines <- liftEitherM $ runExceptT $ runLazyTx pgExecCtx Q.ReadOnly $
|
||||
map runIdentity <$> executeQuery explainQuery [(cohortId, _lqpVariables plan)]
|
||||
pure $ LiveQueryPlanExplanation queryText explanationLines $ _lqpVariables plan
|
||||
|
@ -31,7 +31,6 @@ import GHC.AssertNF
|
||||
import qualified Hasura.GraphQL.Execute.LiveQuery.TMap as TMap
|
||||
import qualified Hasura.Logging as L
|
||||
|
||||
import Hasura.Backends.Postgres.Connection
|
||||
import Hasura.GraphQL.Execute.LiveQuery.Options
|
||||
import Hasura.GraphQL.Execute.LiveQuery.Plan
|
||||
import Hasura.GraphQL.Execute.LiveQuery.Poll
|
||||
@ -44,18 +43,18 @@ import Hasura.RQL.Types.Common (unNonNegativeDiffTime
|
||||
data LiveQueriesState
|
||||
= LiveQueriesState
|
||||
{ _lqsOptions :: !LiveQueriesOptions
|
||||
, _lqsPGExecTx :: !PGExecCtx
|
||||
, _lqsLiveQueryMap :: !PollerMap
|
||||
, _lqsPostPollHook :: !LiveQueryPostPollHook
|
||||
-- ^ A hook function which is run after each fetch cycle
|
||||
}
|
||||
|
||||
initLiveQueriesState :: LiveQueriesOptions -> PGExecCtx -> LiveQueryPostPollHook -> IO LiveQueriesState
|
||||
initLiveQueriesState options pgCtx pollHook =
|
||||
LiveQueriesState options pgCtx <$> STMMap.newIO <*> pure pollHook
|
||||
initLiveQueriesState
|
||||
:: LiveQueriesOptions -> LiveQueryPostPollHook -> IO LiveQueriesState
|
||||
initLiveQueriesState options pollHook =
|
||||
LiveQueriesState options <$> STMMap.newIO <*> pure pollHook
|
||||
|
||||
dumpLiveQueriesState :: Bool -> LiveQueriesState -> IO J.Value
|
||||
dumpLiveQueriesState extended (LiveQueriesState opts _ lqMap _) = do
|
||||
dumpLiveQueriesState extended (LiveQueriesState opts lqMap _) = do
|
||||
lqMapJ <- dumpPollerMap extended lqMap
|
||||
return $ J.object
|
||||
[ "options" J..= opts
|
||||
@ -120,9 +119,9 @@ addLiveQuery logger subscriberMetadata lqState plan onResultAction = do
|
||||
|
||||
pure $ LiveQueryId handlerId cohortKey subscriberId
|
||||
where
|
||||
LiveQueriesState lqOpts pgExecCtx lqMap postPollHook = lqState
|
||||
LiveQueriesState lqOpts lqMap postPollHook = lqState
|
||||
LiveQueriesOptions _ refetchInterval = lqOpts
|
||||
LiveQueryPlan (ParameterizedLiveQueryPlan role query) cohortKey = plan
|
||||
LiveQueryPlan (ParameterizedLiveQueryPlan role query) cohortKey pgExecCtx = plan
|
||||
|
||||
handlerId = PollerKey role query
|
||||
|
||||
|
@ -105,9 +105,6 @@ convertMutationAction
|
||||
, MonadError QErr m
|
||||
, MonadMetadataStorage (MetadataStorageT m)
|
||||
, Tracing.MonadTrace m
|
||||
, Tracing.MonadTrace tx
|
||||
, MonadIO tx
|
||||
, MonadTx tx
|
||||
)
|
||||
=> Env.Environment
|
||||
-> L.Logger L.Hasura
|
||||
@ -115,12 +112,13 @@ convertMutationAction
|
||||
-> HTTP.Manager
|
||||
-> HTTP.RequestHeaders
|
||||
-> ActionMutation 'Postgres (UnpreparedValue 'Postgres)
|
||||
-> m (tx EncJSON, HTTP.ResponseHeaders)
|
||||
-> m (ActionExecutionPlan, HTTP.ResponseHeaders)
|
||||
convertMutationAction env logger userInfo manager reqHeaders = \case
|
||||
AMSync s -> ((unActionExecuteTx . _aerExecution) &&& _aerHeaders) <$>
|
||||
AMSync s -> ((AEPSync . _aerExecution) &&& _aerHeaders) <$>
|
||||
resolveActionExecution env logger userInfo s actionExecContext
|
||||
AMAsync s -> (noResponseHeaders . unActionExecuteTx) <$>
|
||||
liftEitherM (runMetadataStorageT $ resolveActionMutationAsync s reqHeaders userSession)
|
||||
AMAsync s -> do
|
||||
result <- liftEitherM (runMetadataStorageT $ resolveActionMutationAsync s reqHeaders userSession)
|
||||
pure (AEPAsyncMutation result, [])
|
||||
where
|
||||
userSession = _uiSession userInfo
|
||||
actionExecContext = ActionExecContext manager reqHeaders $ _uiSession userInfo
|
||||
@ -146,7 +144,7 @@ convertMutationSelectionSet
|
||||
-> G.SelectionSet G.NoFragments G.Name
|
||||
-> [G.VariableDefinition]
|
||||
-> Maybe GH.VariableValues
|
||||
-> m (ExecutionPlan (tx EncJSON, HTTP.ResponseHeaders))
|
||||
-> m (ExecutionPlan (ActionExecutionPlan, HTTP.ResponseHeaders) (tx EncJSON, HTTP.ResponseHeaders))
|
||||
convertMutationSelectionSet env logger gqlContext SQLGenCtx{stringifyNum} userInfo manager reqHeaders fields varDefs varValsM = do
|
||||
mutationParser <- onNothing (gqlMutationParser gqlContext) $
|
||||
throw400 ValidationFailed "no mutations exist"
|
||||
@ -160,7 +158,7 @@ convertMutationSelectionSet env logger gqlContext SQLGenCtx{stringifyNum} userIn
|
||||
let userSession = _uiSession userInfo
|
||||
remoteJoinCtx = (manager, reqHeaders, userInfo)
|
||||
txs <- for unpreparedQueries \case
|
||||
RFDB db -> ExecStepDB . noResponseHeaders <$> case db of
|
||||
RFDB _ execCtx db -> ExecStepDB execCtx . noResponseHeaders <$> case db of
|
||||
MDBInsert s -> convertInsert env userSession remoteJoinCtx s stringifyNum
|
||||
MDBUpdate s -> convertUpdate env userSession remoteJoinCtx s stringifyNum
|
||||
MDBDelete s -> convertDelete env userSession remoteJoinCtx s stringifyNum
|
||||
@ -172,7 +170,7 @@ convertMutationSelectionSet env logger gqlContext SQLGenCtx{stringifyNum} userIn
|
||||
remoteSchemaInfo
|
||||
G.OperationTypeMutation
|
||||
$ [G.SelectionField resolvedRemoteField]
|
||||
RFAction action -> ExecStepDB <$> convertMutationAction env logger userInfo manager reqHeaders action
|
||||
RFAction action -> ExecStepAction <$> convertMutationAction env logger userInfo manager reqHeaders action
|
||||
RFRaw s -> pure $ ExecStepRaw s
|
||||
|
||||
return txs
|
||||
|
@ -14,26 +14,26 @@ module Hasura.GraphQL.Execute.Prepare
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashSet as Set
|
||||
import qualified Data.IntMap as IntMap
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashSet as Set
|
||||
import qualified Data.IntMap as IntMap
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
|
||||
import Data.Text.Extended
|
||||
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as S
|
||||
import qualified Hasura.GraphQL.Transport.HTTP.Protocol as GH
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as S
|
||||
import qualified Hasura.GraphQL.Transport.HTTP.Protocol as GH
|
||||
|
||||
import Hasura.Backends.Postgres.SQL.Value
|
||||
import Hasura.Backends.Postgres.Translate.Column
|
||||
import Hasura.GraphQL.Parser.Column
|
||||
import Hasura.GraphQL.Parser.Schema
|
||||
import Hasura.RQL.DML.Internal (currentSession)
|
||||
import Hasura.RQL.DML.Internal (currentSession)
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.SQL.Types
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
|
||||
|
||||
type PlanVariables = Map.HashMap G.Name Int
|
||||
@ -45,13 +45,15 @@ type PrepArgMap = IntMap.IntMap (Q.PrepArg, PGScalarValue)
|
||||
-- | Full execution plan to process one GraphQL query. Once we work on
|
||||
-- heterogeneous execution this will contain a mixture of things to run on the
|
||||
-- database and things to run on remote schemas.
|
||||
type ExecutionPlan db = InsOrdHashMap G.Name (ExecutionStep db)
|
||||
type ExecutionPlan action db = InsOrdHashMap G.Name (ExecutionStep action db)
|
||||
|
||||
-- | One execution step to processing a GraphQL query (e.g. one root field).
|
||||
-- Polymorphic to allow the SQL to be generated in stages.
|
||||
data ExecutionStep db
|
||||
= ExecStepDB db
|
||||
data ExecutionStep action db
|
||||
= ExecStepDB PGExecCtx db
|
||||
-- ^ A query to execute against the database
|
||||
| ExecStepAction action
|
||||
-- ^ Execute an action
|
||||
| ExecStepRemote !RemoteSchemaInfo !GH.GQLReqOutgoing
|
||||
-- ^ A graphql query to execute against a remote schema
|
||||
| ExecStepRaw J.Value
|
||||
|
@ -13,21 +13,20 @@ module Hasura.GraphQL.Execute.Query
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.Sequence.NonEmpty as NESeq
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Types as HTTP
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.Sequence.NonEmpty as NESeq
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Types as HTTP
|
||||
|
||||
import qualified Hasura.Backends.Postgres.Translate.Select as DS
|
||||
import qualified Hasura.GraphQL.Transport.HTTP.Protocol as GH
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Hasura.RQL.IR.Select as DS
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
import qualified Hasura.GraphQL.Transport.HTTP.Protocol as GH
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Hasura.RQL.IR.Select as DS
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
|
||||
import Hasura.Backends.Postgres.Connection
|
||||
import Hasura.EncJSON
|
||||
@ -40,21 +39,10 @@ import Hasura.GraphQL.Execute.Resolve
|
||||
import Hasura.GraphQL.Parser
|
||||
import Hasura.Metadata.Class
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.Session
|
||||
|
||||
|
||||
data ActionQueryPlan (b :: BackendType)
|
||||
= AQPAsyncQuery !(DS.AnnSimpleSel b) -- ^ Cacheable plan
|
||||
| AQPQuery !ActionExecuteTx -- ^ Non cacheable transaction
|
||||
|
||||
actionQueryToRootFieldPlan
|
||||
:: PrepArgMap -> ActionQueryPlan 'Postgres -> RootFieldPlan
|
||||
actionQueryToRootFieldPlan prepped = \case
|
||||
AQPAsyncQuery s -> RFPPostgres $
|
||||
PreparedSql (DS.selectQuerySQL DS.JASSingleObject s) prepped Nothing
|
||||
AQPQuery tx -> RFPActionQuery tx
|
||||
|
||||
-- See Note [Temporarily disabling query plan caching]
|
||||
-- data ReusableVariableTypes
|
||||
-- data ReusableVariableValues
|
||||
@ -152,7 +140,6 @@ instance MonadQueryInstrumentation m => MonadQueryInstrumentation (MetadataStora
|
||||
convertQuerySelSet
|
||||
:: forall m tx .
|
||||
( MonadError QErr m
|
||||
, MonadMetadataStorage (MetadataStorageT m)
|
||||
, HasVersion
|
||||
, MonadIO m
|
||||
, Tracing.MonadTrace m
|
||||
@ -171,7 +158,7 @@ convertQuerySelSet
|
||||
-> G.SelectionSet G.NoFragments G.Name
|
||||
-> [G.VariableDefinition]
|
||||
-> Maybe GH.VariableValues
|
||||
-> m ( ExecutionPlan (tx EncJSON, Maybe PreparedSql)
|
||||
-> m ( ExecutionPlan ActionExecutionPlan (tx EncJSON, Maybe PreparedSql)
|
||||
-- , Maybe ReusableQueryPlan
|
||||
, [QueryRootField (UnpreparedValue 'Postgres)]
|
||||
)
|
||||
@ -184,24 +171,24 @@ convertQuerySelSet env logger gqlContext userInfo manager reqHeaders directives
|
||||
(preparedQuery, PlanningSt _ _ planVals expectedVariables)
|
||||
<- flip runStateT initPlanningSt
|
||||
$ traverseQueryRootField prepareWithPlan unpreparedQuery
|
||||
>>= traverseAction convertActionQuery
|
||||
>>= traverseRemoteField (resolveRemoteField userInfo)
|
||||
validateSessionVariables expectedVariables $ _uiSession userInfo
|
||||
traverseDB (pure . irToRootFieldPlan planVals) preparedQuery
|
||||
>>= traverseAction (pure . actionQueryToRootFieldPlan planVals)
|
||||
|
||||
(instrument, ep) <- askInstrumentQuery directives
|
||||
|
||||
-- Transform the query plans into an execution plan
|
||||
let executionPlan = queryPlan <&> \case
|
||||
RFRemote (RemoteFieldG remoteSchemaInfo remoteField) -> do
|
||||
buildExecStepRemote
|
||||
remoteSchemaInfo
|
||||
G.OperationTypeQuery
|
||||
[G.SelectionField remoteField]
|
||||
RFDB db -> ExecStepDB $ mkCurPlanTx env manager reqHeaders userInfo instrument ep (RFPPostgres db)
|
||||
RFAction rfp -> ExecStepDB $ mkCurPlanTx env manager reqHeaders userInfo instrument ep rfp
|
||||
RFRaw r -> ExecStepRaw r
|
||||
executionPlan <- forM queryPlan \case
|
||||
RFRemote (RemoteFieldG remoteSchemaInfo remoteField) -> pure $
|
||||
buildExecStepRemote
|
||||
remoteSchemaInfo
|
||||
G.OperationTypeQuery
|
||||
[G.SelectionField remoteField]
|
||||
RFDB _ e db -> pure $ ExecStepDB e $ mkCurPlanTx env manager reqHeaders userInfo instrument ep (RFPPostgres db)
|
||||
RFAction (AQQuery s) -> ExecStepAction . AEPSync . _aerExecution <$>
|
||||
resolveActionExecution env logger userInfo s (ActionExecContext manager reqHeaders usrVars)
|
||||
RFAction (AQAsync s) -> pure $ ExecStepAction $ AEPAsyncQuery (_aaaqActionId s) $ resolveAsyncActionQuery userInfo s
|
||||
RFRaw r -> pure $ ExecStepRaw r
|
||||
|
||||
let asts :: [QueryRootField (UnpreparedValue 'Postgres)]
|
||||
asts = OMap.elems unpreparedQueries
|
||||
@ -209,18 +196,6 @@ convertQuerySelSet env logger gqlContext userInfo manager reqHeaders directives
|
||||
where
|
||||
usrVars = _uiSession userInfo
|
||||
|
||||
convertActionQuery
|
||||
:: ActionQuery 'Postgres (UnpreparedValue 'Postgres)
|
||||
-> StateT PlanningSt m (ActionQueryPlan 'Postgres)
|
||||
convertActionQuery = \case
|
||||
AQQuery s -> lift $ do
|
||||
result <- resolveActionExecution env logger userInfo s $ ActionExecContext manager reqHeaders usrVars
|
||||
pure $ AQPQuery $ _aerExecution result
|
||||
AQAsync s -> do
|
||||
unpreparedAst <- lift $ liftEitherM $ runMetadataStorageT $
|
||||
resolveAsyncActionQuery userInfo s
|
||||
AQPAsyncQuery <$> DS.traverseAnnSimpleSelect prepareWithPlan unpreparedAst
|
||||
|
||||
-- See Note [Temporarily disabling query plan caching]
|
||||
-- use the existing plan and new variables to create a pg query
|
||||
-- queryOpFromPlan
|
||||
|
@ -8,8 +8,8 @@ module Hasura.GraphQL.Execute.Remote
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.HashSet as Set
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashSet as Set
|
||||
import qualified Data.Text as T
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
|
||||
@ -17,9 +17,9 @@ import Data.Text.Extended
|
||||
|
||||
import qualified Hasura.GraphQL.Transport.HTTP.Protocol as GH
|
||||
|
||||
import Hasura.GraphQL.Context (RemoteField, RemoteFieldG (..))
|
||||
import Hasura.GraphQL.Execute.Prepare
|
||||
import Hasura.GraphQL.Parser
|
||||
import Hasura.GraphQL.Context (RemoteFieldG (..), RemoteField)
|
||||
import Hasura.RQL.Types
|
||||
|
||||
import Hasura.Session
|
||||
@ -34,12 +34,12 @@ mkVariableDefinitionAndValue var@(Variable varInfo gType varValue) =
|
||||
|
||||
defaultVal =
|
||||
case varInfo of
|
||||
VIRequired _ -> Nothing
|
||||
VIRequired _ -> Nothing
|
||||
VIOptional _ val -> Just val
|
||||
|
||||
varJSONValue =
|
||||
case varValue of
|
||||
JSONValue v -> v
|
||||
JSONValue v -> v
|
||||
GraphQLValue val -> graphQLValueToJSON val
|
||||
|
||||
unresolveVariables
|
||||
@ -59,11 +59,11 @@ collectVariables =
|
||||
Set.unions . fmap (foldMap Set.singleton)
|
||||
|
||||
buildExecStepRemote
|
||||
:: forall db
|
||||
:: forall db action
|
||||
. RemoteSchemaInfo
|
||||
-> G.OperationType
|
||||
-> G.SelectionSet G.NoFragments Variable
|
||||
-> ExecutionStep db
|
||||
-> ExecutionStep db action
|
||||
buildExecStepRemote remoteSchemaInfo tp selSet =
|
||||
let unresolvedSelSet = unresolveVariables selSet
|
||||
allVars = map mkVariableDefinitionAndValue $ Set.toList $ collectVariables selSet
|
||||
|
@ -13,6 +13,7 @@ import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.Text.Extended
|
||||
|
||||
import qualified Hasura.Backends.Postgres.Execute.RemoteJoin as RR
|
||||
@ -30,11 +31,10 @@ import Hasura.Backends.Postgres.Translate.Column (toTxtValue)
|
||||
import Hasura.EncJSON
|
||||
import Hasura.GraphQL.Context
|
||||
import Hasura.GraphQL.Parser
|
||||
import Hasura.Metadata.Class
|
||||
import Hasura.RQL.DML.Internal
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.SQL.Types
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
|
||||
|
||||
data GQLExplain
|
||||
@ -79,7 +79,10 @@ resolveUnpreparedValue userInfo = \case
|
||||
-- NOTE: This function has a 'MonadTrace' constraint in master, but we don't need it
|
||||
-- here. We should evaluate if we need it here.
|
||||
explainQueryField
|
||||
:: (MonadError QErr m, MonadTx m)
|
||||
:: ( MonadError QErr m
|
||||
, MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
)
|
||||
=> UserInfo
|
||||
-> G.Name
|
||||
-> QueryRootField (UnpreparedValue 'Postgres)
|
||||
@ -90,7 +93,7 @@ explainQueryField userInfo fieldName rootField = do
|
||||
RFRemote _ -> throw400 InvalidParams "only hasura queries can be explained"
|
||||
RFAction _ -> throw400 InvalidParams "query actions cannot be explained"
|
||||
RFRaw _ -> pure $ FieldPlan fieldName Nothing Nothing
|
||||
RFDB qDB -> do
|
||||
RFDB _ pgExecCtx qDB -> do
|
||||
let (querySQL, remoteJoins) = case qDB of
|
||||
QDBSimple s -> first (DS.selectQuerySQL DS.JASMultipleRows) $ RR.getRemoteJoins s
|
||||
QDBPrimaryKey s -> first (DS.selectQuerySQL DS.JASSingleObject) $ RR.getRemoteJoins s
|
||||
@ -102,7 +105,8 @@ explainQueryField userInfo fieldName rootField = do
|
||||
withExplain = "EXPLAIN (FORMAT TEXT) " <> textSQL
|
||||
-- Reject if query contains any remote joins
|
||||
when (remoteJoins /= mempty) $ throw400 NotSupported "Remote relationships are not allowed in explain query"
|
||||
planLines <- liftTx $ map runIdentity <$>
|
||||
planLines <- liftEitherM $ runExceptT $ runLazyTx pgExecCtx Q.ReadOnly $
|
||||
liftTx $ map runIdentity <$>
|
||||
Q.listQE dmlTxErrorHandler (Q.fromText withExplain) () True
|
||||
pure $ FieldPlan fieldName (Just textSQL) $ Just planLines
|
||||
|
||||
@ -112,13 +116,12 @@ explainGQLQuery
|
||||
:: forall m
|
||||
. ( MonadError QErr m
|
||||
, MonadIO m
|
||||
, MonadMetadataStorage (MetadataStorageT m)
|
||||
, MonadBaseControl IO m
|
||||
)
|
||||
=> PGExecCtx
|
||||
-> SchemaCache
|
||||
=> SchemaCache
|
||||
-> GQLExplain
|
||||
-> m EncJSON
|
||||
explainGQLQuery pgExecCtx sc (GQLExplain query userVarsRaw maybeIsRelay) = do
|
||||
explainGQLQuery sc (GQLExplain query userVarsRaw maybeIsRelay) = do
|
||||
-- NOTE!: we will be executing what follows as though admin role. See e.g. notes in explainField:
|
||||
userInfo <- mkUserInfo (URBFromSessionVariablesFallback adminRoleName) UAdminSecretSent sessionVariables
|
||||
-- we don't need to check in allow list as we consider it an admin endpoint
|
||||
@ -132,7 +135,7 @@ explainGQLQuery pgExecCtx sc (GQLExplain query userVarsRaw maybeIsRelay) = do
|
||||
inlinedSelSet <- E.inlineSelectionSet fragments selSet
|
||||
(unpreparedQueries, _) <-
|
||||
E.parseGraphQLQuery graphQLContext varDefs (GH._grVariables query) inlinedSelSet
|
||||
runInTx $ encJFromJValue
|
||||
encJFromJValue
|
||||
<$> for (OMap.toList unpreparedQueries) (uncurry (explainQueryField userInfo))
|
||||
|
||||
G.TypedOperationDefinition G.OperationTypeMutation _ _ _ _ ->
|
||||
@ -142,12 +145,9 @@ explainGQLQuery pgExecCtx sc (GQLExplain query userVarsRaw maybeIsRelay) = do
|
||||
-- (Here the above fragment inlining is actually executed.)
|
||||
inlinedSelSet <- E.inlineSelectionSet fragments selSet
|
||||
(unpreparedQueries, _) <- E.parseGraphQLQuery graphQLContext varDefs (GH._grVariables query) inlinedSelSet
|
||||
validSubscriptionQueries <- for unpreparedQueries E.validateSubscriptionRootField
|
||||
(pgExecCtx, validSubscriptionQueries) <- E.validateSubscriptionRootField unpreparedQueries
|
||||
(plan, _) <- E.buildLiveQueryPlan pgExecCtx userInfo validSubscriptionQueries
|
||||
runInTx $ encJFromJValue <$> E.explainLiveQueryPlan plan
|
||||
encJFromJValue <$> E.explainLiveQueryPlan plan
|
||||
where
|
||||
queryType = bool E.QueryHasura E.QueryRelay $ Just True == maybeIsRelay
|
||||
sessionVariables = mkSessionVariablesText $ fromMaybe mempty userVarsRaw
|
||||
|
||||
runInTx :: LazyTx QErr EncJSON -> m EncJSON
|
||||
runInTx = liftEither <=< liftIO . runExceptT . runLazyTx pgExecCtx Q.ReadOnly
|
||||
|
@ -21,6 +21,7 @@ import Hasura.GraphQL.Parser.Class.Parse
|
||||
import Hasura.GraphQL.Parser.Internal.Types
|
||||
import Hasura.RQL.Types.Common
|
||||
import Hasura.RQL.Types.Error
|
||||
import Hasura.RQL.Types.Source
|
||||
import Hasura.RQL.Types.Table
|
||||
import Hasura.Session (RoleName)
|
||||
|
||||
@ -113,17 +114,19 @@ askRoleName
|
||||
=> m RoleName
|
||||
askRoleName = asks getter
|
||||
|
||||
type MonadTableInfo b r m = (MonadReader r m, Has (TableCache b) r, MonadError QErr m)
|
||||
type MonadTableInfo b r m = (MonadReader r m, Has (SourceCache b) r, MonadError QErr m)
|
||||
|
||||
-- | Looks up table information for the given table name. This function
|
||||
-- should never fail, since the schema cache construction process is
|
||||
-- supposed to ensure all dependencies are resolved.
|
||||
askTableInfo
|
||||
:: (Backend b, MonadTableInfo b r m)
|
||||
:: forall b r m. (Backend b, MonadTableInfo b r m)
|
||||
=> TableName b
|
||||
-> m (TableInfo b)
|
||||
askTableInfo tableName = do
|
||||
tableInfo <- asks $ Map.lookup tableName . getter
|
||||
let getTableInfo :: SourceCache b -> Maybe (TableInfo b)
|
||||
getTableInfo sc = Map.lookup tableName $ Map.unions $ map _pcTables $ Map.elems sc
|
||||
tableInfo <- asks $ getTableInfo . getter
|
||||
-- This should never fail, since the schema cache construction process is
|
||||
-- supposed to ensure that all dependencies are resolved.
|
||||
tableInfo `onNothing` throw500 ("askTableInfo: no info for " <>> tableName)
|
||||
|
@ -7,35 +7,36 @@ module Hasura.GraphQL.Schema
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.HashSet as Set
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.HashSet as Set
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
|
||||
import Control.Arrow.Extended
|
||||
import Control.Lens.Extended
|
||||
import Control.Monad.Unique
|
||||
import Data.Has
|
||||
import Data.List.Extended (duplicates)
|
||||
import Data.List.Extended (duplicates)
|
||||
|
||||
import qualified Hasura.Backends.Postgres.SQL.Types as PG
|
||||
import qualified Hasura.GraphQL.Parser as P
|
||||
import qualified Hasura.GraphQL.Schema.Postgres as PGS
|
||||
import qualified Hasura.Backends.Postgres.Execute.Types as PG
|
||||
import qualified Hasura.Backends.Postgres.SQL.Types as PG
|
||||
import qualified Hasura.GraphQL.Parser as P
|
||||
import qualified Hasura.GraphQL.Schema.Postgres as PGS
|
||||
|
||||
import Data.Text.Extended
|
||||
import Hasura.GraphQL.Context
|
||||
import Hasura.GraphQL.Execute.Types
|
||||
import Hasura.GraphQL.Parser (Kind (..), Parser, Schema (..),
|
||||
UnpreparedValue (..))
|
||||
import Hasura.GraphQL.Parser (Kind (..), Parser, Schema (..),
|
||||
UnpreparedValue (..))
|
||||
import Hasura.GraphQL.Parser.Class
|
||||
import Hasura.GraphQL.Parser.Internal.Parser (FieldParser (..))
|
||||
import Hasura.GraphQL.Parser.Internal.Parser (FieldParser (..))
|
||||
import Hasura.GraphQL.Schema.Action
|
||||
import Hasura.GraphQL.Schema.Backend
|
||||
import Hasura.GraphQL.Schema.Common
|
||||
import Hasura.GraphQL.Schema.Introspect
|
||||
import Hasura.GraphQL.Schema.Mutation
|
||||
import Hasura.GraphQL.Schema.Remote (buildRemoteParser)
|
||||
import Hasura.GraphQL.Schema.Remote (buildRemoteParser)
|
||||
import Hasura.GraphQL.Schema.Select
|
||||
import Hasura.GraphQL.Schema.Table
|
||||
import Hasura.RQL.DDL.Schema.Cache.Common
|
||||
@ -79,8 +80,7 @@ buildGQLContext
|
||||
, HasRemoteSchemaPermsCtx m
|
||||
)
|
||||
=> ( GraphQLQueryType
|
||||
, TableCache 'Postgres
|
||||
, FunctionCache
|
||||
, SourceCache 'Postgres
|
||||
, RemoteSchemaCache
|
||||
, ActionCache
|
||||
, NonObjectTypeMap
|
||||
@ -90,7 +90,7 @@ buildGQLContext
|
||||
, GQLContext
|
||||
)
|
||||
buildGQLContext =
|
||||
proc (queryType, allTables, allFunctions, allRemoteSchemas, allActions, nonObjectCustomTypes) -> do
|
||||
proc (queryType, pgSources, allRemoteSchemas, allActions, nonObjectCustomTypes) -> do
|
||||
|
||||
SQLGenCtx{ stringifyNum } <- bindA -< askSQLGenCtx
|
||||
remoteSchemaPermsCtx <- bindA -< askRemoteSchemaPermsCtx
|
||||
@ -98,7 +98,7 @@ buildGQLContext =
|
||||
let remoteSchemasRoles = concatMap (Map.keys . _rscPermissions . fst . snd) $ Map.toList allRemoteSchemas
|
||||
|
||||
let allRoles = Set.insert adminRoleName $
|
||||
(allTables ^.. folded.tiRolePermInfoMap.to Map.keys.folded)
|
||||
(pgSources ^.. folded.to _pcTables.folded.tiRolePermInfoMap.to Map.keys.folded)
|
||||
<> (allActionInfos ^.. folded.aiPermissions.to Map.keys.folded)
|
||||
<> Set.fromList (bool mempty remoteSchemasRoles $ remoteSchemaPermsCtx == RemoteSchemaPermsEnabled)
|
||||
allActionInfos = Map.elems allActions
|
||||
@ -109,7 +109,7 @@ buildGQLContext =
|
||||
-- build the admin DB-only context so that we can check against name clashes with remotes
|
||||
-- TODO: Is there a better way to check for conflicts without actually building the admin schema?
|
||||
adminHasuraDBContext <- bindA -<
|
||||
buildFullestDBSchema queryContext allTables allFunctions allActionInfos nonObjectCustomTypes
|
||||
buildFullestDBSchema queryContext pgSources allActionInfos nonObjectCustomTypes
|
||||
|
||||
-- TODO factor out the common function; throw500 in both cases:
|
||||
queryFieldNames :: [G.Name] <- bindA -<
|
||||
@ -138,10 +138,10 @@ buildGQLContext =
|
||||
( Set.toMap allRoles & Map.traverseWithKey \roleName () ->
|
||||
case queryType of
|
||||
QueryHasura ->
|
||||
buildRoleContext queryContext allTables allFunctions allRemoteSchemas allActionInfos
|
||||
buildRoleContext queryContext pgSources allRemoteSchemas allActionInfos
|
||||
nonObjectCustomTypes remotes roleName remoteSchemaPermsCtx
|
||||
QueryRelay ->
|
||||
buildRelayRoleContext queryContext allTables allFunctions allActionInfos
|
||||
buildRelayRoleContext queryContext pgSources allActionInfos
|
||||
nonObjectCustomTypes adminMutationRemotes roleName
|
||||
)
|
||||
unauthenticated <- bindA -< unauthenticatedContext adminQueryRemotes adminMutationRemotes remoteSchemaPermsCtx
|
||||
@ -151,10 +151,10 @@ runMonadSchema
|
||||
:: (Monad m)
|
||||
=> RoleName
|
||||
-> QueryContext
|
||||
-> Map.HashMap PG.QualifiedTable (TableInfo 'Postgres)
|
||||
-> P.SchemaT (P.ParseT Identity) (ReaderT (RoleName, Map.HashMap PG.QualifiedTable (TableInfo 'Postgres), QueryContext) m) a -> m a
|
||||
runMonadSchema roleName queryContext tableCache m =
|
||||
flip runReaderT (roleName, tableCache, queryContext) $ P.runSchemaT m
|
||||
-> SourceCache 'Postgres
|
||||
-> P.SchemaT (P.ParseT Identity) (ReaderT (RoleName, SourceCache 'Postgres, QueryContext) m) a -> m a
|
||||
runMonadSchema roleName queryContext pgSources m =
|
||||
flip runReaderT (roleName, pgSources, queryContext) $ P.runSchemaT m
|
||||
|
||||
buildRoleBasedRemoteSchemaParser
|
||||
:: forall m
|
||||
@ -176,13 +176,13 @@ buildRoleBasedRemoteSchemaParser role remoteSchemaCache = do
|
||||
-- TODO: Integrate relay schema
|
||||
buildRoleContext
|
||||
:: (MonadError QErr m, MonadIO m, MonadUnique m)
|
||||
=> QueryContext -> TableCache 'Postgres -> FunctionCache -> RemoteSchemaCache
|
||||
=> QueryContext -> SourceCache 'Postgres -> RemoteSchemaCache
|
||||
-> [ActionInfo 'Postgres] -> NonObjectTypeMap
|
||||
-> [( RemoteSchemaName , ParsedIntrospection)]
|
||||
-> RoleName
|
||||
-> RemoteSchemaPermsCtx
|
||||
-> m (RoleContext GQLContext)
|
||||
buildRoleContext queryContext (takeValidTables -> allTables) (takeValidFunctions -> allFunctions)
|
||||
buildRoleContext queryContext pgSources
|
||||
allRemoteSchemas allActionInfos nonObjectCustomTypes remotes roleName remoteSchemaPermsCtx = do
|
||||
|
||||
roleBasedRemoteSchemas <-
|
||||
@ -195,16 +195,25 @@ buildRoleContext queryContext (takeValidTables -> allTables) (takeValidFunctions
|
||||
let queryRemotes = getQueryRemotes $ snd <$> roleBasedRemoteSchemas
|
||||
mutationRemotes = getMutationRemotes $ snd <$> roleBasedRemoteSchemas
|
||||
|
||||
runMonadSchema roleName queryContext allTables $ do
|
||||
runMonadSchema roleName queryContext pgSources $ do
|
||||
fieldsList <- forM (toList pgSources) $ \(SourceInfo sourceName tables functions sourceConfig) -> do
|
||||
let validTables = takeValidTables tables
|
||||
validFunctions = takeValidFunctions functions
|
||||
tableNames = Map.keysSet validTables
|
||||
functionsWithSourceConfig = map (, (sourceName, sourceConfig)) validFunctions
|
||||
(functionsWithSourceConfig,,,)
|
||||
<$> buildPostgresQueryFields sourceName sourceConfig tableNames validFunctions
|
||||
<*> buildPGMutationFields Frontend sourceName sourceConfig tableNames
|
||||
<*> buildPGMutationFields Backend sourceName sourceConfig tableNames
|
||||
|
||||
let (allFunctions, queryPGFields, mutationFrontendFields, mutationBackendFields) = mconcat fieldsList
|
||||
|
||||
mutationParserFrontend <-
|
||||
buildPGMutationFields Frontend tableNames >>=
|
||||
buildMutationParser mutationRemotes allActionInfos nonObjectCustomTypes allFunctions
|
||||
buildMutationParser mutationRemotes allActionInfos nonObjectCustomTypes allFunctions mutationFrontendFields
|
||||
|
||||
mutationParserBackend <-
|
||||
buildPGMutationFields Backend tableNames >>=
|
||||
buildMutationParser mutationRemotes allActionInfos nonObjectCustomTypes allFunctions
|
||||
buildMutationParser mutationRemotes allActionInfos nonObjectCustomTypes allFunctions mutationBackendFields
|
||||
|
||||
queryPGFields <- buildPostgresQueryFields tableNames allFunctions
|
||||
subscriptionParser <- buildSubscriptionParser queryPGFields allActionInfos
|
||||
|
||||
queryParserFrontend <- buildQueryParser queryPGFields queryRemotes
|
||||
@ -219,8 +228,6 @@ buildRoleContext queryContext (takeValidTables -> allTables) (takeValidFunctions
|
||||
pure $ RoleContext frontendContext $ Just backendContext
|
||||
|
||||
where
|
||||
tableNames = Map.keysSet allTables
|
||||
|
||||
getQueryRemotes
|
||||
:: [ParsedIntrospection]
|
||||
-> [P.FieldParser (P.ParseT Identity) RemoteField]
|
||||
@ -254,26 +261,34 @@ takeValidFunctions = Map.elems . Map.filter functionFilter
|
||||
where
|
||||
functionFilter = not . isSystemDefined . fiSystemDefined
|
||||
|
||||
takeExposedAs :: FunctionExposedAs -> [FunctionInfo] -> [FunctionInfo]
|
||||
takeExposedAs x = filter ((== x) . fiExposedAs)
|
||||
takeExposedAs :: FunctionExposedAs -> (a -> FunctionInfo) -> [a] -> [a]
|
||||
takeExposedAs x f = filter ((== x) . fiExposedAs . f)
|
||||
|
||||
|
||||
buildFullestDBSchema
|
||||
:: (MonadError QErr m, MonadIO m, MonadUnique m)
|
||||
=> QueryContext -> TableCache 'Postgres -> FunctionCache -> [ActionInfo 'Postgres] -> NonObjectTypeMap
|
||||
=> QueryContext -> SourceCache 'Postgres -> [ActionInfo 'Postgres] -> NonObjectTypeMap
|
||||
-> m ( Parser 'Output (P.ParseT Identity) (OMap.InsOrdHashMap G.Name (QueryRootField (UnpreparedValue 'Postgres)))
|
||||
, Maybe (Parser 'Output (P.ParseT Identity) (OMap.InsOrdHashMap G.Name (MutationRootField (UnpreparedValue 'Postgres))))
|
||||
)
|
||||
buildFullestDBSchema queryContext (takeValidTables -> allTables) (takeValidFunctions -> allFunctions)
|
||||
allActionInfos nonObjectCustomTypes = do
|
||||
runMonadSchema adminRoleName queryContext allTables $ do
|
||||
buildFullestDBSchema queryContext pgSources allActionInfos nonObjectCustomTypes =
|
||||
runMonadSchema adminRoleName queryContext pgSources $ do
|
||||
fieldsList <- forM (toList pgSources) $ \(SourceInfo sourceName tables functions sourceConfig) -> do
|
||||
let validTables = takeValidTables tables
|
||||
validFunctions = takeValidFunctions functions
|
||||
tableNames = Map.keysSet validTables
|
||||
functionsWithSourceConfig = map (, (sourceName, sourceConfig)) validFunctions
|
||||
(functionsWithSourceConfig,,)
|
||||
<$> buildPGMutationFields Frontend sourceName sourceConfig tableNames
|
||||
<*> buildPostgresQueryFields sourceName sourceConfig tableNames validFunctions
|
||||
|
||||
let (allFunctions, mutationPGFields, queryPGFields) = mconcat fieldsList
|
||||
|
||||
mutationParserFrontend <-
|
||||
buildPGMutationFields Frontend tableNames >>=
|
||||
-- NOTE: we omit remotes here on purpose since we're trying to check name
|
||||
-- clashes with remotes:
|
||||
buildMutationParser mempty allActionInfos nonObjectCustomTypes allFunctions
|
||||
buildMutationParser mempty allActionInfos nonObjectCustomTypes allFunctions mutationPGFields
|
||||
|
||||
queryPGFields <- buildPostgresQueryFields tableNames allFunctions
|
||||
subscriptionParser <- buildSubscriptionParser queryPGFields allActionInfos
|
||||
|
||||
queryParserFrontend <- buildQueryParser queryPGFields mempty
|
||||
@ -281,28 +296,37 @@ buildFullestDBSchema queryContext (takeValidTables -> allTables) (takeValidFunct
|
||||
|
||||
pure (queryParserFrontend, mutationParserFrontend)
|
||||
|
||||
where
|
||||
tableNames = Map.keysSet allTables
|
||||
|
||||
buildRelayRoleContext
|
||||
:: (MonadError QErr m, MonadIO m, MonadUnique m)
|
||||
=> QueryContext -> TableCache 'Postgres -> FunctionCache -> [ActionInfo 'Postgres] -> NonObjectTypeMap
|
||||
=> QueryContext -> SourceCache 'Postgres -> [ActionInfo 'Postgres] -> NonObjectTypeMap
|
||||
-> [P.FieldParser (P.ParseT Identity) RemoteField]
|
||||
-> RoleName
|
||||
-> m (RoleContext GQLContext)
|
||||
buildRelayRoleContext queryContext (takeValidTables -> allTables) (takeValidFunctions -> allFunctions)
|
||||
buildRelayRoleContext queryContext pgSources
|
||||
allActionInfos nonObjectCustomTypes mutationRemotes roleName =
|
||||
|
||||
runMonadSchema roleName queryContext allTables $ do
|
||||
runMonadSchema roleName queryContext pgSources $ do
|
||||
fieldsList <- forM (toList pgSources) $ \(SourceInfo sourceName tables functions sourceConfig) -> do
|
||||
let validTables = takeValidTables tables
|
||||
validFunctions = takeValidFunctions functions
|
||||
tableNames = Map.keysSet validTables
|
||||
functionsWithSourceConfig = map (, (sourceName, sourceConfig)) validFunctions
|
||||
(functionsWithSourceConfig,,,)
|
||||
<$> buildRelayPostgresQueryFields sourceName sourceConfig tableNames validFunctions
|
||||
<*> buildPGMutationFields Frontend sourceName sourceConfig tableNames
|
||||
<*> buildPGMutationFields Backend sourceName sourceConfig tableNames
|
||||
|
||||
-- Add node root field
|
||||
nodeField_ <- nodeField
|
||||
let (allFunctions, queryPGFields', mutationFrontendFields, mutationBackendFields) = mconcat fieldsList
|
||||
queryPGFields = nodeField_:queryPGFields'
|
||||
|
||||
mutationParserFrontend <-
|
||||
buildPGMutationFields Frontend tableNames >>=
|
||||
buildMutationParser mutationRemotes allActionInfos nonObjectCustomTypes allFunctions
|
||||
buildMutationParser mutationRemotes allActionInfos nonObjectCustomTypes allFunctions mutationFrontendFields
|
||||
|
||||
mutationParserBackend <-
|
||||
buildPGMutationFields Backend tableNames >>=
|
||||
buildMutationParser mutationRemotes allActionInfos nonObjectCustomTypes allFunctions
|
||||
buildMutationParser mutationRemotes allActionInfos nonObjectCustomTypes allFunctions mutationBackendFields
|
||||
|
||||
queryPGFields <- buildRelayPostgresQueryFields tableNames allFunctions
|
||||
subscriptionParser <- P.safeSelectionSet subscriptionRoot Nothing queryPGFields
|
||||
<&> fmap (fmap (P.handleTypename (RFRaw . J.String. G.unName)))
|
||||
queryParserFrontend <- queryWithIntrospectionHelper queryPGFields
|
||||
@ -316,8 +340,6 @@ buildRelayRoleContext queryContext (takeValidTables -> allTables) (takeValidFunc
|
||||
(finalizeParser <$> mutationParserBackend)
|
||||
pure $ RoleContext frontendContext $ Just backendContext
|
||||
|
||||
where
|
||||
tableNames = Map.keysSet allTables
|
||||
|
||||
-- The `unauthenticatedContext` is used when the user queries the graphql-engine
|
||||
-- with a role that it's unaware of. Before remote schema permissions, remotes
|
||||
@ -406,10 +428,12 @@ buildPostgresQueryFields
|
||||
, MonadRole r m
|
||||
, Has QueryContext r
|
||||
)
|
||||
=> HashSet PG.QualifiedTable
|
||||
=> SourceName
|
||||
-> SourceConfig 'Postgres
|
||||
-> HashSet PG.QualifiedTable
|
||||
-> [FunctionInfo]
|
||||
-> m [P.FieldParser n (QueryRootField (UnpreparedValue 'Postgres))]
|
||||
buildPostgresQueryFields allTables (takeExposedAs FEAQuery -> queryFunctions) = do
|
||||
buildPostgresQueryFields sourceName sourceConfig allTables (takeExposedAs FEAQuery id -> queryFunctions) = do
|
||||
tableSelectExpParsers <- for (toList allTables) \table -> do
|
||||
selectPerms <- tableSelectPermissions table
|
||||
customRootFields <- _tcCustomRootFields . _tciCustomConfig . _tiCoreInfo <$> askTableInfo @'Postgres table
|
||||
@ -421,9 +445,9 @@ buildPostgresQueryFields allTables (takeExposedAs FEAQuery -> queryFunctions) =
|
||||
pkName = tableGQLName <> $$(G.litName "_by_pk")
|
||||
pkDesc = G.Description $ "fetch data from the table: " <> table <<> " using primary key columns"
|
||||
catMaybes <$> sequenceA
|
||||
[ requiredFieldParser (RFDB . QDBSimple) $ selectTable table (fromMaybe tableGQLName $ _tcrfSelect customRootFields) (Just fieldsDesc) perms
|
||||
, mapMaybeFieldParser (RFDB . QDBPrimaryKey) $ selectTableByPk table (fromMaybe pkName $ _tcrfSelectByPk customRootFields) (Just pkDesc) perms
|
||||
, mapMaybeFieldParser (RFDB . QDBAggregation) $ selectTableAggregate table (fromMaybe aggName $ _tcrfSelectAggregate customRootFields) (Just aggDesc) perms
|
||||
[ requiredFieldParser (asDbRootField . QDBSimple) $ selectTable table (fromMaybe tableGQLName $ _tcrfSelect customRootFields) (Just fieldsDesc) perms
|
||||
, mapMaybeFieldParser (asDbRootField . QDBPrimaryKey) $ selectTableByPk table (fromMaybe pkName $ _tcrfSelectByPk customRootFields) (Just pkDesc) perms
|
||||
, mapMaybeFieldParser (asDbRootField . QDBAggregation) $ selectTableAggregate table (fromMaybe aggName $ _tcrfSelectAggregate customRootFields) (Just aggDesc) perms
|
||||
]
|
||||
functionSelectExpParsers <- for queryFunctions \function -> do
|
||||
let targetTable = fiReturnType function
|
||||
@ -435,11 +459,15 @@ buildPostgresQueryFields allTables (takeExposedAs FEAQuery -> queryFunctions) =
|
||||
aggName = displayName <> $$(G.litName "_aggregate")
|
||||
aggDesc = G.Description $ "execute function " <> functionName <<> " and query aggregates on result of table type " <>> targetTable
|
||||
catMaybes <$> sequenceA
|
||||
[ requiredFieldParser (RFDB . QDBSimple) $ selectFunction function displayName (Just functionDesc) perms
|
||||
, mapMaybeFieldParser (RFDB . QDBAggregation) $ selectFunctionAggregate function aggName (Just aggDesc) perms
|
||||
[ requiredFieldParser (asDbRootField . QDBSimple) $ selectFunction function displayName (Just functionDesc) perms
|
||||
, mapMaybeFieldParser (asDbRootField . QDBAggregation) $ selectFunctionAggregate function aggName (Just aggDesc) perms
|
||||
]
|
||||
pure $ (concat . catMaybes) (tableSelectExpParsers <> functionSelectExpParsers)
|
||||
where
|
||||
asDbRootField =
|
||||
let pgExecCtx = PG._pscExecCtx sourceConfig
|
||||
in RFDB sourceName pgExecCtx
|
||||
|
||||
mapMaybeFieldParser :: (a -> b) -> m (Maybe (P.FieldParser n a)) -> m (Maybe (P.FieldParser n b))
|
||||
mapMaybeFieldParser f = fmap $ fmap $ fmap f
|
||||
|
||||
@ -494,10 +522,12 @@ buildRelayPostgresQueryFields
|
||||
, MonadRole r m
|
||||
, Has QueryContext r
|
||||
)
|
||||
=> HashSet PG.QualifiedTable
|
||||
=> SourceName
|
||||
-> SourceConfig 'Postgres
|
||||
-> HashSet PG.QualifiedTable
|
||||
-> [FunctionInfo]
|
||||
-> m [P.FieldParser n (QueryRootField (UnpreparedValue 'Postgres))]
|
||||
buildRelayPostgresQueryFields allTables (takeExposedAs FEAQuery -> queryFunctions) = do
|
||||
buildRelayPostgresQueryFields sourceName sourceConfig allTables (takeExposedAs FEAQuery id -> queryFunctions) = do
|
||||
tableConnectionFields <- for (toList allTables) $ \table -> runMaybeT do
|
||||
pkeyColumns <- MaybeT $ (^? tiCoreInfo.tciPrimaryKey._Just.pkColumns)
|
||||
<$> askTableInfo table
|
||||
@ -519,9 +549,12 @@ buildRelayPostgresQueryFields allTables (takeExposedAs FEAQuery -> queryFunction
|
||||
<<> " which returns " <>> returnTable
|
||||
lift $ selectFunctionConnection function fieldName fieldDesc pkeyColumns selectPerms
|
||||
|
||||
nodeField_ <- fmap (RFDB . QDBPrimaryKey) <$> nodeField
|
||||
pure $ (:) nodeField_ $ map (fmap (RFDB . QDBConnection)) $ catMaybes $
|
||||
pure $ map (fmap (asDbRootField . QDBConnection)) $ catMaybes $
|
||||
tableConnectionFields <> functionConnectionFields
|
||||
where
|
||||
asDbRootField =
|
||||
let pgExecCtx = PG._pscExecCtx sourceConfig
|
||||
in RFDB sourceName pgExecCtx
|
||||
|
||||
queryRootFromFields
|
||||
:: forall n m
|
||||
@ -635,9 +668,9 @@ buildSubscriptionParser pgQueryFields allActions = do
|
||||
buildPGMutationFields
|
||||
:: forall m n r
|
||||
. (MonadSchema n m, MonadTableInfo 'Postgres r m, MonadRole r m, Has QueryContext r)
|
||||
=> Scenario -> HashSet PG.QualifiedTable
|
||||
=> Scenario -> SourceName -> SourceConfig 'Postgres -> HashSet PG.QualifiedTable
|
||||
-> m [P.FieldParser n (MutationRootField (UnpreparedValue 'Postgres))]
|
||||
buildPGMutationFields scenario allTables = do
|
||||
buildPGMutationFields scenario sourceName sourceConfig allTables = do
|
||||
concat . catMaybes <$> for (toList allTables) \table -> do
|
||||
tableCoreInfo <- _tiCoreInfo <$> askTableInfo @'Postgres table
|
||||
tableGQLName <- getTableGQLName @'Postgres table
|
||||
@ -665,7 +698,7 @@ buildPGMutationFields scenario allTables = do
|
||||
-- select permissions
|
||||
insertOne <- for _permSel \selPerms ->
|
||||
insertOneIntoTable table (fromMaybe insertOneName $ _tcrfInsertOne customRootFields) (Just insertOneDesc) insertPerms selPerms _permUpd
|
||||
pure $ fmap (RFDB . MDBInsert) <$> insert : maybeToList insertOne
|
||||
pure $ fmap (asDbRootField . MDBInsert) <$> insert : maybeToList insertOne
|
||||
|
||||
updates <- fmap join $ whenMaybe (isMutable viIsUpdatable viewInfo) $ for _permUpd \updatePerms -> do
|
||||
let updateName = $$(G.litName "update_") <> tableGQLName
|
||||
@ -678,7 +711,7 @@ buildPGMutationFields scenario allTables = do
|
||||
-- them, which at the very least requires select permissions
|
||||
updateByPk <- join <$> for _permSel
|
||||
(updateTableByPk table (fromMaybe updateByPkName $ _tcrfUpdateByPk customRootFields) (Just updateByPkDesc) updatePerms)
|
||||
pure $ fmap (RFDB . MDBUpdate) <$> catMaybes [update, updateByPk]
|
||||
pure $ fmap (asDbRootField . MDBUpdate) <$> catMaybes [update, updateByPk]
|
||||
|
||||
-- when the table/view is mutable and there exists a delete permission
|
||||
deletes <- fmap join $ whenMaybe (isMutable viIsDeletable viewInfo) $
|
||||
@ -690,11 +723,15 @@ buildPGMutationFields scenario allTables = do
|
||||
deleteByPk <- fmap join $ for _permSel $
|
||||
buildDeleteByPkField table tableGQLName (_tcrfDeleteByPk customRootFields) deletePermission
|
||||
|
||||
pure $ fmap (RFDB . MDBDelete) <$> delete : maybeToList deleteByPk
|
||||
pure $ fmap (asDbRootField . MDBDelete) <$> delete : maybeToList deleteByPk
|
||||
|
||||
pure $ concat $ catMaybes [inserts, updates, deletes]
|
||||
|
||||
where
|
||||
asDbRootField =
|
||||
let pgExecCtx = PG._pscExecCtx sourceConfig
|
||||
in RFDB sourceName pgExecCtx
|
||||
|
||||
buildDeleteField table tableGQLName customName deletePermission selectPermission = do
|
||||
let deleteName = $$(G.litName "delete_") <> tableGQLName
|
||||
deleteDesc = G.Description $ "delete data from the table: " <>> table
|
||||
@ -721,25 +758,29 @@ buildMutationParser
|
||||
=> [P.FieldParser n RemoteField]
|
||||
-> [ActionInfo 'Postgres]
|
||||
-> NonObjectTypeMap
|
||||
-> [FunctionInfo]
|
||||
-> [(FunctionInfo, (SourceName, SourceConfig 'Postgres))]
|
||||
-- ^ all "valid" functions
|
||||
-> [P.FieldParser n (MutationRootField (UnpreparedValue 'Postgres))]
|
||||
-> m (Maybe (Parser 'Output n (OMap.InsOrdHashMap G.Name (MutationRootField (UnpreparedValue 'Postgres)))))
|
||||
buildMutationParser allRemotes allActions nonObjectCustomTypes
|
||||
(takeExposedAs FEAMutation -> mutationFunctions) pgMutationFields = do
|
||||
(takeExposedAs FEAMutation fst -> mutationFunctions) pgMutationFields = do
|
||||
|
||||
-- NOTE: this is basically copied from functionSelectExpParsers body
|
||||
functionMutationExpParsers <- for mutationFunctions \function@FunctionInfo{..} -> do
|
||||
functionMutationExpParsers <- for mutationFunctions \(function@FunctionInfo{..}, (sourceName, sourceConfig)) -> do
|
||||
selectPerms <- tableSelectPermissions fiReturnType
|
||||
for selectPerms \perms -> do
|
||||
displayName <- PG.qualifiedObjectToName fiName
|
||||
let functionDesc = G.Description $
|
||||
"execute VOLATILE function " <> fiName <<> " which returns " <>> fiReturnType
|
||||
asDbRootField =
|
||||
let pgExecCtx = PG._pscExecCtx sourceConfig
|
||||
in RFDB sourceName pgExecCtx
|
||||
|
||||
catMaybes <$> sequenceA
|
||||
[ requiredFieldParser (RFDB . MDBFunction) $
|
||||
[ requiredFieldParser (asDbRootField . MDBFunction) $
|
||||
selectFunction function displayName (Just functionDesc) perms
|
||||
-- FWIW: The equivalent of this is possible for mutations; do we want that?:
|
||||
-- , mapMaybeFieldParser (RFDB . QDBAggregation) $ selectFunctionAggregate function aggName (Just aggDesc) perms
|
||||
-- , mapMaybeFieldParser (asDbRootField . QDBAggregation) $ selectFunctionAggregate function aggName (Just aggDesc) perms
|
||||
]
|
||||
|
||||
actionParsers <- for allActions $ \actionInfo ->
|
||||
|
@ -66,6 +66,7 @@ actionExecute nonObjectTypeMap actionInfo = runMaybeT do
|
||||
, _aaeForwardClientHeaders = _adForwardClientHeaders definition
|
||||
, _aaeStrfyNum = stringifyNum
|
||||
, _aaeTimeOut = _adTimeout definition
|
||||
, _aaeSource = getActionSourceInfo (_aiOutputObject actionInfo)
|
||||
}
|
||||
where
|
||||
ActionInfo actionName outputObject definition permissions comment = actionInfo
|
||||
@ -149,6 +150,7 @@ actionAsyncQuery actionInfo = runMaybeT do
|
||||
, _aaaqFields = fields
|
||||
, _aaaqDefinitionList = mkDefinitionList outputObject
|
||||
, _aaaqStringifyNum = stringifyNum
|
||||
, _aaaqSource = getActionSourceInfo (_aiOutputObject actionInfo)
|
||||
}
|
||||
where
|
||||
ActionInfo actionName outputObject definition permissions comment = actionInfo
|
||||
@ -164,8 +166,9 @@ actionOutputFields
|
||||
:: forall m n r. (BackendSchema 'Postgres, MonadSchema n m, MonadTableInfo 'Postgres r m, MonadRole r m, Has QueryContext r)
|
||||
=> AnnotatedObjectType 'Postgres
|
||||
-> m (Parser 'Output n (RQL.AnnFieldsG 'Postgres (UnpreparedValue 'Postgres)))
|
||||
actionOutputFields outputObject = do
|
||||
let scalarOrEnumFields = map scalarOrEnumFieldParser $ toList $ _otdFields outputObject
|
||||
actionOutputFields annotatedObject = do
|
||||
let outputObject = _aotDefinition annotatedObject
|
||||
scalarOrEnumFields = map scalarOrEnumFieldParser $ toList $ _otdFields outputObject
|
||||
relationshipFields <- forM (_otdRelationships outputObject) $ traverse relationshipFieldParser
|
||||
let allFieldParsers = scalarOrEnumFields <>
|
||||
maybe [] (catMaybes . toList) relationshipFields
|
||||
@ -194,7 +197,7 @@ actionOutputFields outputObject = do
|
||||
:: TypeRelationship (TableInfo 'Postgres) (ColumnInfo 'Postgres)
|
||||
-> m (Maybe (FieldParser n (RQL.AnnFieldG 'Postgres (UnpreparedValue 'Postgres))))
|
||||
relationshipFieldParser typeRelationship = runMaybeT do
|
||||
let TypeRelationship relName relType tableInfo fieldMapping = typeRelationship
|
||||
let TypeRelationship relName relType _ tableInfo fieldMapping = typeRelationship
|
||||
tableName = _tciName $ _tiCoreInfo tableInfo
|
||||
fieldName = unRelationshipName relName
|
||||
roleName <- lift askRoleName
|
||||
@ -214,13 +217,14 @@ actionOutputFields outputObject = do
|
||||
RQL.AnnRelationSelectG tableRelName columnMapping selectExp
|
||||
|
||||
mkDefinitionList :: AnnotatedObjectType 'Postgres -> [(PGCol, ScalarType 'Postgres)]
|
||||
mkDefinitionList ObjectTypeDefinition{..} =
|
||||
mkDefinitionList AnnotatedObjectType{..} =
|
||||
flip map (toList _otdFields) $ \ObjectFieldDefinition{..} ->
|
||||
(unsafePGCol . G.unName . unObjectFieldName $ _ofdName,) $
|
||||
case Map.lookup _ofdName fieldReferences of
|
||||
Nothing -> fieldTypeToScalarType $ snd _ofdType
|
||||
Just columnInfo -> unsafePGColumnToBackend $ pgiType columnInfo
|
||||
where
|
||||
ObjectTypeDefinition{..} = _aotDefinition
|
||||
fieldReferences =
|
||||
Map.unions $ map _trFieldMapping $ maybe [] toList _otdRelationships
|
||||
|
||||
|
@ -1,21 +1,21 @@
|
||||
{-# LANGUAGE AllowAmbiguousTypes #-} -- TODO avoid this language feature
|
||||
{-# LANGUAGE AllowAmbiguousTypes #-}
|
||||
|
||||
module Hasura.GraphQL.Schema.Backend where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import Data.Has
|
||||
import Data.Aeson
|
||||
import Data.Has
|
||||
|
||||
import qualified Hasura.RQL.IR.Select as IR
|
||||
import qualified Hasura.RQL.IR.Select as IR
|
||||
|
||||
import Language.GraphQL.Draft.Syntax (Nullability, Name)
|
||||
import Hasura.GraphQL.Parser ( InputFieldsParser, Kind (..), Parser
|
||||
, UnpreparedValue (..), Opaque
|
||||
, Definition, EnumValueInfo, FieldParser)
|
||||
import Hasura.GraphQL.Parser (Definition, EnumValueInfo, FieldParser,
|
||||
InputFieldsParser, Kind (..), Opaque, Parser,
|
||||
UnpreparedValue (..))
|
||||
import Hasura.GraphQL.Parser.Class
|
||||
import Hasura.GraphQL.Schema.Common
|
||||
import Hasura.RQL.Types hiding (EnumValueInfo)
|
||||
import Hasura.RQL.Types hiding (EnumValueInfo)
|
||||
import Language.GraphQL.Draft.Syntax (Name, Nullability)
|
||||
|
||||
|
||||
class Backend b => BackendSchema (b :: BackendType) where
|
||||
@ -78,6 +78,6 @@ class Backend b => BackendSchema (b :: BackendType) where
|
||||
, MonadRole r m
|
||||
, Has QueryContext r
|
||||
)
|
||||
=> m (Parser 'Output n (HashMap (TableName b) (SelPermInfo b, PrimaryKeyColumns b, AnnotatedFields b)))
|
||||
=> m (Parser 'Output n (HashMap (TableName b) (SourceName, SourceConfig b, SelPermInfo b, PrimaryKeyColumns b, AnnotatedFields b)))
|
||||
|
||||
type ComparisonExp b = OpExpG b (UnpreparedValue b)
|
||||
|
@ -22,35 +22,37 @@ module Hasura.GraphQL.Schema.Select
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Aeson.Extended as J
|
||||
import qualified Data.Aeson.Internal as J
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashSet as Set
|
||||
import qualified Data.List.NonEmpty as NE
|
||||
import qualified Data.Sequence as Seq
|
||||
import qualified Data.Sequence.NonEmpty as NESeq
|
||||
import qualified Data.Text as T
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Aeson.Extended as J
|
||||
import qualified Data.Aeson.Internal as J
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashSet as Set
|
||||
import qualified Data.List.NonEmpty as NE
|
||||
import qualified Data.Sequence as Seq
|
||||
import qualified Data.Sequence.NonEmpty as NESeq
|
||||
import qualified Data.Text as T
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
|
||||
import Control.Lens hiding (index)
|
||||
import Control.Lens hiding (index)
|
||||
import Data.Has
|
||||
import Data.Int (Int32)
|
||||
import Data.Int (Int32)
|
||||
import Data.Parser.JSONPath
|
||||
import Data.Text.Extended
|
||||
import Data.Traversable (mapAccumL)
|
||||
import Data.Traversable (mapAccumL)
|
||||
|
||||
import qualified Hasura.Backends.Postgres.SQL.Types as PG
|
||||
import qualified Hasura.GraphQL.Execute.Types as ET
|
||||
import qualified Hasura.GraphQL.Parser as P
|
||||
import qualified Hasura.GraphQL.Parser.Internal.Parser as P
|
||||
import qualified Hasura.RQL.IR.BoolExp as IR
|
||||
import qualified Hasura.RQL.IR.OrderBy as IR
|
||||
import qualified Hasura.RQL.IR.Select as IR
|
||||
import qualified Hasura.Backends.Postgres.Execute.Types as PG
|
||||
import qualified Hasura.Backends.Postgres.SQL.Types as PG
|
||||
import qualified Hasura.GraphQL.Execute.Types as ET
|
||||
import qualified Hasura.GraphQL.Parser as P
|
||||
import qualified Hasura.GraphQL.Parser.Internal.Parser as P
|
||||
import qualified Hasura.RQL.IR.BoolExp as IR
|
||||
import qualified Hasura.RQL.IR.OrderBy as IR
|
||||
import qualified Hasura.RQL.IR.Select as IR
|
||||
|
||||
import Hasura.GraphQL.Parser (FieldParser, InputFieldsParser, Kind (..),
|
||||
Parser, UnpreparedValue (..), mkParameter)
|
||||
import Hasura.GraphQL.Context
|
||||
import Hasura.GraphQL.Parser (FieldParser, InputFieldsParser, Kind (..),
|
||||
Parser, UnpreparedValue (..), mkParameter)
|
||||
import Hasura.GraphQL.Parser.Class
|
||||
import Hasura.GraphQL.Schema.Backend
|
||||
import Hasura.GraphQL.Schema.BoolExp
|
||||
@ -59,7 +61,7 @@ import Hasura.GraphQL.Schema.OrderBy
|
||||
import Hasura.GraphQL.Schema.Remote
|
||||
import Hasura.GraphQL.Schema.Table
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Server.Utils (executeJSONPath)
|
||||
import Hasura.Server.Utils (executeJSONPath)
|
||||
|
||||
|
||||
-- 1. top level selection functions
|
||||
@ -1230,18 +1232,29 @@ nodePG
|
||||
, MonadRole r m
|
||||
, Has QueryContext r
|
||||
)
|
||||
=> m (P.Parser 'Output n (HashMap (TableName 'Postgres) (SelPermInfo 'Postgres, PrimaryKeyColumns 'Postgres, AnnotatedFields 'Postgres)))
|
||||
=> m (P.Parser 'Output n
|
||||
( HashMap (TableName 'Postgres)
|
||||
( SourceName
|
||||
, SourceConfig 'Postgres
|
||||
, SelPermInfo 'Postgres
|
||||
, PrimaryKeyColumns 'Postgres
|
||||
, AnnotatedFields 'Postgres
|
||||
)
|
||||
)
|
||||
)
|
||||
nodePG = memoizeOn 'nodePG () do
|
||||
let idDescription = G.Description "A globally unique identifier"
|
||||
idField = P.selection_ $$(G.litName "id") (Just idDescription) P.identifier
|
||||
nodeInterfaceDescription = G.Description "An object with globally unique ID"
|
||||
allTables :: TableCache 'Postgres <- asks getter
|
||||
tables :: HashMap (TableName 'Postgres) (Parser 'Output n (SelPermInfo 'Postgres, NESeq (ColumnInfo 'Postgres), AnnotatedFields 'Postgres)) <-
|
||||
Map.mapMaybe id <$> flip Map.traverseWithKey allTables \table _ -> runMaybeT do
|
||||
sources :: SourceCache 'Postgres <- asks getter
|
||||
let allTables = Map.fromList $ flip concatMap (Map.toList sources) $ -- FIXME? When source name is used in type generation?
|
||||
\(source, sourceCache) -> map (, (source, _pcConfiguration sourceCache)) $ Map.keys $ _pcTables sourceCache
|
||||
tables <-
|
||||
Map.mapMaybe id <$> flip Map.traverseWithKey allTables \table (source, sourceConfig) -> runMaybeT do
|
||||
tablePkeyColumns <- MaybeT $ (^? tiCoreInfo.tciPrimaryKey._Just.pkColumns) <$> askTableInfo table
|
||||
selectPermissions <- MaybeT $ tableSelectPermissions table
|
||||
annotatedFieldsParser <- lift $ tableSelectionSet table selectPermissions
|
||||
pure $ (selectPermissions, tablePkeyColumns,) <$> annotatedFieldsParser
|
||||
pure $ (source, sourceConfig, selectPermissions, tablePkeyColumns,) <$> annotatedFieldsParser
|
||||
pure $ P.selectionSetInterface $$(G.litName "Node")
|
||||
(Just nodeInterfaceDescription) [idField] tables
|
||||
|
||||
@ -1253,7 +1266,7 @@ nodeField
|
||||
, MonadRole r m
|
||||
, Has QueryContext r
|
||||
)
|
||||
=> m (P.FieldParser n (SelectExp 'Postgres))
|
||||
=> m (P.FieldParser n (QueryRootField (UnpreparedValue 'Postgres)))
|
||||
nodeField = do
|
||||
let idDescription = G.Description "A globally unique id"
|
||||
idArgument = P.field $$(G.litName "id") (Just idDescription) P.identifier
|
||||
@ -1262,11 +1275,12 @@ nodeField = do
|
||||
return $ P.subselection $$(G.litName "node") Nothing idArgument nodeObject `P.bindField`
|
||||
\(ident, parseds) -> do
|
||||
NodeIdV1 (V1NodeId table columnValues) <- parseNodeId ident
|
||||
(perms, pkeyColumns, fields) <-
|
||||
(source, sourceConfig, perms, pkeyColumns, fields) <-
|
||||
onNothing (Map.lookup table parseds) $
|
||||
withArgsPath $ throwInvalidNodeId $ "the table " <>> ident
|
||||
whereExp <- buildNodeIdBoolExp columnValues pkeyColumns
|
||||
return $ IR.AnnSelectG
|
||||
let pgExecCtx = PG._pscExecCtx sourceConfig
|
||||
return $ RFDB source pgExecCtx $ QDBPrimaryKey $ IR.AnnSelectG
|
||||
{ IR._asnFields = fields
|
||||
, IR._asnFrom = IR.FromTable table
|
||||
, IR._asnPerm = tablePermissionsInfo perms
|
||||
|
@ -18,6 +18,7 @@ module Hasura.GraphQL.Transport.HTTP
|
||||
) where
|
||||
|
||||
import Control.Monad.Morph (hoist)
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
|
||||
import Hasura.EncJSON
|
||||
import Hasura.GraphQL.Context
|
||||
@ -44,6 +45,7 @@ import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Hasura.Backends.Postgres.Execute.RemoteJoin as RJ
|
||||
import qualified Hasura.GraphQL.Execute as E
|
||||
import qualified Hasura.GraphQL.Execute.Action as EA
|
||||
import qualified Hasura.GraphQL.Execute.Query as EQ
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Hasura.Server.Telemetry.Counters as Telem
|
||||
@ -70,7 +72,7 @@ class Monad m => MonadExecuteQuery m where
|
||||
cacheLookup
|
||||
:: [QueryRootField (UnpreparedValue 'Postgres)]
|
||||
-- ^ Used to check that the query is cacheable
|
||||
-> ExecutionPlan (Maybe (Maybe (RJ.RemoteJoins 'Postgres)))
|
||||
-> ExecutionPlan action (Maybe (Maybe (RJ.RemoteJoins 'Postgres)))
|
||||
-- ^ Used to check if the elaborated query supports caching
|
||||
-> QueryCacheKey
|
||||
-- ^ Key that uniquely identifies the result of a query execution
|
||||
@ -114,7 +116,7 @@ instance MonadExecuteQuery m => MonadExecuteQuery (MetadataStorageT m) where
|
||||
cacheStore a b = hoist (hoist lift) $ cacheStore a b
|
||||
|
||||
-- | A partial result, e.g. from a remote schema or postgres, which we'll
|
||||
-- assemble into the final result for the client.
|
||||
-- assemble into the final result for the client.
|
||||
--
|
||||
-- Nothing to do with graphql fragments...
|
||||
data ResultsFragment = ResultsFragment
|
||||
@ -129,6 +131,7 @@ runGQ
|
||||
:: forall m
|
||||
. ( HasVersion
|
||||
, MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
, MonadError QErr m
|
||||
, MonadReader E.ExecutionCtx m
|
||||
, E.MonadGQLExecutionCheck m
|
||||
@ -149,13 +152,13 @@ runGQ
|
||||
-> m (HttpResponse EncJSON)
|
||||
runGQ env logger reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
||||
(telemTimeTot_DT, (telemCacheHit, (telemQueryType, telemTimeIO_DT, telemLocality, resp))) <- withElapsedTime $ do
|
||||
E.ExecutionCtx _ sqlGenCtx pgExecCtx {- planCache -} sc scVer httpManager enableAL <- ask
|
||||
E.ExecutionCtx _ sqlGenCtx {- planCache -} sc scVer httpManager enableAL <- ask
|
||||
|
||||
-- run system authorization on the GraphQL API
|
||||
reqParsed <- E.checkGQLExecution userInfo (reqHeaders, ipAddress) enableAL sc reqUnparsed
|
||||
>>= flip onLeft throwError
|
||||
|
||||
(telemCacheHit, execPlan) <- E.getResolvedExecPlan env logger pgExecCtx {- planCache -}
|
||||
(telemCacheHit, execPlan) <- E.getResolvedExecPlan env logger {- planCache -}
|
||||
userInfo sqlGenCtx sc scVer queryType
|
||||
httpManager reqHeaders (reqUnparsed, reqParsed)
|
||||
(telemCacheHit,) <$> case execPlan of
|
||||
@ -168,12 +171,15 @@ runGQ env logger reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
||||
pure (Telem.Query, 0, Telem.Local, HttpResponse cachedResponseData responseHeaders)
|
||||
Nothing -> do
|
||||
conclusion <- runExceptT $ forWithKey queryPlans $ \fieldName -> \case
|
||||
E.ExecStepDB (tx, genSql) -> doQErr $ do
|
||||
E.ExecStepDB pgExecCtx (tx, genSql) -> doQErr $ do
|
||||
(telemTimeIO_DT, resp) <-
|
||||
runQueryDB reqId reqUnparsed fieldName tx genSql
|
||||
runQueryDB reqId reqUnparsed fieldName pgExecCtx tx genSql
|
||||
return $ ResultsFragment telemTimeIO_DT Telem.Local resp []
|
||||
E.ExecStepRemote rsi gqlReq ->
|
||||
runRemoteGQ httpManager fieldName rsi gqlReq
|
||||
E.ExecStepAction aep -> do
|
||||
(time, r) <- doQErr $ EA.runActionExecution aep
|
||||
pure $ ResultsFragment time Telem.Empty r []
|
||||
E.ExecStepRaw json ->
|
||||
buildRaw json
|
||||
out@(_, _, _, HttpResponse responseData _) <- buildResult Telem.Query conclusion responseHeaders
|
||||
@ -182,11 +188,14 @@ runGQ env logger reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
||||
|
||||
E.MutationExecutionPlan mutationPlans -> do
|
||||
conclusion <- runExceptT $ forWithKey mutationPlans $ \fieldName -> \case
|
||||
E.ExecStepDB (tx, responseHeaders) -> doQErr $ do
|
||||
(telemTimeIO_DT, resp) <- runMutationDB reqId reqUnparsed userInfo tx
|
||||
E.ExecStepDB pgExecCtx (tx, responseHeaders) -> doQErr $ do
|
||||
(telemTimeIO_DT, resp) <- runMutationDB reqId reqUnparsed userInfo pgExecCtx tx
|
||||
return $ ResultsFragment telemTimeIO_DT Telem.Local resp responseHeaders
|
||||
E.ExecStepRemote rsi gqlReq ->
|
||||
runRemoteGQ httpManager fieldName rsi gqlReq
|
||||
E.ExecStepAction (aep, hdrs) -> do
|
||||
(time, r) <- doQErr $ EA.runActionExecution aep
|
||||
pure $ ResultsFragment time Telem.Empty r hdrs
|
||||
E.ExecStepRaw json ->
|
||||
buildRaw json
|
||||
buildResult Telem.Mutation conclusion []
|
||||
@ -257,6 +266,7 @@ buildRaw json = do
|
||||
runGQBatched
|
||||
:: ( HasVersion
|
||||
, MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
, MonadError QErr m
|
||||
, MonadReader E.ExecutionCtx m
|
||||
, E.MonadGQLExecutionCheck m
|
||||
@ -306,13 +316,14 @@ runQueryDB
|
||||
=> RequestId
|
||||
-> GQLReqUnparsed
|
||||
-> G.Name -- ^ name of the root field we're fetching
|
||||
-> PGExecCtx
|
||||
-> Tracing.TraceT (LazyTxT QErr IO) EncJSON
|
||||
-> Maybe EQ.PreparedSql
|
||||
-> m (DiffTime, EncJSON)
|
||||
-- ^ Also return the time spent in the PG query; for telemetry.
|
||||
runQueryDB reqId query fieldName tx genSql = do
|
||||
runQueryDB reqId query fieldName pgExecCtx tx genSql = do
|
||||
-- log the generated SQL and the graphql query
|
||||
E.ExecutionCtx logger _ pgExecCtx _ _ _ _ <- ask
|
||||
E.ExecutionCtx logger _ _ _ _ _ <- ask
|
||||
logQueryLog logger query ((fieldName,) <$> genSql) reqId
|
||||
withElapsedTime $ trace ("Postgres Query for root field " <> G.unName fieldName) $
|
||||
Tracing.interpTraceT id $ hoist (runQueryTx pgExecCtx) tx
|
||||
@ -327,12 +338,13 @@ runMutationDB
|
||||
=> RequestId
|
||||
-> GQLReqUnparsed
|
||||
-> UserInfo
|
||||
-> PGExecCtx
|
||||
-> Tracing.TraceT (LazyTxT QErr IO) EncJSON
|
||||
-> m (DiffTime, EncJSON)
|
||||
-- ^ Also return 'Mutation' when the operation was a mutation, and the time
|
||||
-- spent in the PG query; for telemetry.
|
||||
runMutationDB reqId query userInfo tx = do
|
||||
E.ExecutionCtx logger _ pgExecCtx _ _ _ _ <- ask
|
||||
runMutationDB reqId query userInfo pgExecCtx tx = do
|
||||
E.ExecutionCtx logger _ _ _ _ _ <- ask
|
||||
-- log the graphql query
|
||||
logQueryLog logger query Nothing reqId
|
||||
ctx <- Tracing.currentContext
|
||||
|
@ -67,6 +67,7 @@ import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.Session
|
||||
|
||||
import qualified Hasura.GraphQL.Execute as E
|
||||
import qualified Hasura.GraphQL.Execute.Action as EA
|
||||
import qualified Hasura.GraphQL.Execute.LiveQuery as LQ
|
||||
import qualified Hasura.GraphQL.Execute.LiveQuery.Poll as LQ
|
||||
import qualified Hasura.GraphQL.Execute.Query as EQ
|
||||
@ -220,7 +221,6 @@ mkWsErrorLog uv ci ev =
|
||||
data WSServerEnv
|
||||
= WSServerEnv
|
||||
{ _wseLogger :: !(L.Logger L.Hasura)
|
||||
, _wseRunTx :: !PGExecCtx
|
||||
, _wseLiveQMap :: !LQ.LiveQueriesState
|
||||
, _wseGCtxMap :: !(IO (SchemaCache, SchemaCacheVer))
|
||||
-- ^ an action that always returns the latest version of the schema cache. See 'SchemaCacheRef'.
|
||||
@ -332,6 +332,7 @@ onStart
|
||||
, Tracing.MonadTrace m
|
||||
, MonadExecuteQuery m
|
||||
, EQ.MonadQueryInstrumentation m
|
||||
, MC.MonadBaseControl IO m
|
||||
, MonadMetadataStorage (MetadataStorageT m)
|
||||
)
|
||||
=> Env.Environment -> WSServerEnv -> WSConn -> StartMsg -> m ()
|
||||
@ -359,8 +360,8 @@ onStart env serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
||||
|
||||
reqParsedE <- lift $ E.checkGQLExecution userInfo (reqHdrs, ipAddress) enableAL sc q
|
||||
reqParsed <- onLeft reqParsedE (withComplete . preExecErr requestId)
|
||||
execPlanE <- runExceptT $ E.getResolvedExecPlan env logger pgExecCtx
|
||||
{- planCache -} userInfo sqlGenCtx sc scVer queryType httpMgr reqHdrs (q, reqParsed)
|
||||
execPlanE <- runExceptT $ E.getResolvedExecPlan env logger {- planCache -}
|
||||
userInfo sqlGenCtx sc scVer queryType httpMgr reqHdrs (q, reqParsed)
|
||||
|
||||
(telemCacheHit, execPlan) <- onLeft execPlanE (withComplete . preExecErr requestId)
|
||||
|
||||
@ -376,13 +377,16 @@ onStart env serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
||||
sendSuccResp cachedResponseData $ LQ.LiveQueryMetadata 0
|
||||
Nothing -> do
|
||||
conclusion <- runExceptT $ forWithKey queryPlan $ \fieldName -> \case
|
||||
E.ExecStepDB (tx, genSql) -> doQErr $ Tracing.trace "Postgres Query" $ do
|
||||
E.ExecStepDB pgExecCtx (tx, genSql) -> doQErr $ Tracing.trace "Postgres Query" $ do
|
||||
logQueryLog logger q ((fieldName,) <$> genSql) requestId
|
||||
(telemTimeIO_DT, resp) <- Tracing.interpTraceT id $ withElapsedTime $
|
||||
hoist (runQueryTx pgExecCtx) tx
|
||||
return $ ResultsFragment telemTimeIO_DT Telem.Local resp []
|
||||
E.ExecStepRemote rsi gqlReq -> do
|
||||
runRemoteGQ fieldName userInfo reqHdrs rsi gqlReq
|
||||
E.ExecStepAction actionExecPlan -> do
|
||||
(time, r) <- doQErr $ EA.runActionExecution actionExecPlan
|
||||
pure $ ResultsFragment time Telem.Empty r []
|
||||
E.ExecStepRaw json ->
|
||||
buildRaw json
|
||||
buildResult Telem.Query telemCacheHit timerTot requestId conclusion
|
||||
@ -395,7 +399,7 @@ onStart env serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
||||
E.MutationExecutionPlan mutationPlan -> do
|
||||
conclusion <- runExceptT $ forWithKey mutationPlan $ \fieldName -> \case
|
||||
-- Ignoring response headers since we can't send them over WebSocket
|
||||
E.ExecStepDB (tx, _responseHeaders) -> doQErr $ Tracing.trace "Mutate" do
|
||||
E.ExecStepDB pgExecCtx (tx, _responseHeaders) -> doQErr $ Tracing.trace "Mutate" do
|
||||
logQueryLog logger q Nothing requestId
|
||||
ctx <- Tracing.currentContext
|
||||
(telemTimeIO_DT, resp) <- Tracing.interpTraceT
|
||||
@ -404,6 +408,9 @@ onStart env serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
||||
. withTraceContext ctx . withUserInfo userInfo
|
||||
) $ withElapsedTime tx
|
||||
return $ ResultsFragment telemTimeIO_DT Telem.Local resp []
|
||||
E.ExecStepAction (actionExecPlan, hdrs) -> do
|
||||
(time, r) <- doQErr $ EA.runActionExecution actionExecPlan
|
||||
pure $ ResultsFragment time Telem.Empty r hdrs
|
||||
E.ExecStepRemote rsi gqlReq -> do
|
||||
runRemoteGQ fieldName userInfo reqHdrs rsi gqlReq
|
||||
E.ExecStepRaw json ->
|
||||
@ -453,7 +460,7 @@ onStart env serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
||||
value <- mapExceptT lift $ extractFieldFromResponse (G.unName fieldName) resp
|
||||
return $ ResultsFragment telemTimeIO_DT Telem.Remote (JO.toEncJSON value) []
|
||||
|
||||
WSServerEnv logger pgExecCtx lqMap getSchemaCache httpMgr _ sqlGenCtx {- planCache -}
|
||||
WSServerEnv logger lqMap getSchemaCache httpMgr _ sqlGenCtx {- planCache -}
|
||||
_ enableAL _keepAliveDelay = serverEnv
|
||||
|
||||
WSConnData userInfoR opMap errRespTy queryType = WS.getData wsConn
|
||||
@ -531,6 +538,7 @@ onMessage
|
||||
, Tracing.HasReporter m
|
||||
, MonadExecuteQuery m
|
||||
, EQ.MonadQueryInstrumentation m
|
||||
, MC.MonadBaseControl IO m
|
||||
, MonadMetadataStorage (MetadataStorageT m)
|
||||
)
|
||||
=> Env.Environment
|
||||
@ -694,7 +702,6 @@ onClose logger lqMap wsConn = do
|
||||
createWSServerEnv
|
||||
:: (MonadIO m)
|
||||
=> L.Logger L.Hasura
|
||||
-> PGExecCtx
|
||||
-> LQ.LiveQueriesState
|
||||
-> IO (SchemaCache, SchemaCacheVer)
|
||||
-> H.Manager
|
||||
@ -704,11 +711,11 @@ createWSServerEnv
|
||||
-> KeepAliveDelay
|
||||
-- -> E.PlanCache
|
||||
-> m WSServerEnv
|
||||
createWSServerEnv logger isPgCtx lqState getSchemaCache httpManager
|
||||
createWSServerEnv logger lqState getSchemaCache httpManager
|
||||
corsPolicy sqlGenCtx enableAL keepAliveDelay {- planCache -} = do
|
||||
wsServer <- liftIO $ STM.atomically $ WS.createWSServer logger
|
||||
return $
|
||||
WSServerEnv logger isPgCtx lqState getSchemaCache httpManager corsPolicy
|
||||
WSServerEnv logger lqState getSchemaCache httpManager corsPolicy
|
||||
sqlGenCtx {- planCache -} wsServer enableAL keepAliveDelay
|
||||
|
||||
createWSServerApp
|
||||
|
@ -5,7 +5,7 @@ module Hasura.Metadata.Class
|
||||
, MetadataStorageT(..)
|
||||
, runMetadataStorageT
|
||||
, MonadMetadataStorage(..)
|
||||
, MonadScheduledEvents(..)
|
||||
, MonadMetadataStorageQueryAPI(..)
|
||||
)
|
||||
where
|
||||
|
||||
@ -84,6 +84,8 @@ class (MonadError QErr m) => MonadMetadataStorage m where
|
||||
notifySchemaCacheSync :: InstanceId -> CacheInvalidations -> m ()
|
||||
processSchemaSyncEventPayload :: InstanceId -> Value -> m SchemaSyncEventProcessResult
|
||||
|
||||
checkMetadataStorageHealth :: m Bool
|
||||
|
||||
-- Scheduled triggers
|
||||
-- TODO:-
|
||||
-- Ideally we would've liked to avoid having functions that are specific to
|
||||
@ -107,6 +109,7 @@ class (MonadError QErr m) => MonadMetadataStorage m where
|
||||
fetchUndeliveredActionEvents :: m [ActionLogItem]
|
||||
setActionStatus :: ActionId -> AsyncActionStatus -> m ()
|
||||
fetchActionResponse :: ActionId -> m ActionLogResponse
|
||||
clearActionData :: ActionName -> m ()
|
||||
|
||||
instance (MonadMetadataStorage m) => MonadMetadataStorage (ReaderT r m) where
|
||||
fetchMetadata = lift fetchMetadata
|
||||
@ -114,6 +117,8 @@ instance (MonadMetadataStorage m) => MonadMetadataStorage (ReaderT r m) where
|
||||
notifySchemaCacheSync a b = lift $ notifySchemaCacheSync a b
|
||||
processSchemaSyncEventPayload a b = lift $ processSchemaSyncEventPayload a b
|
||||
|
||||
checkMetadataStorageHealth = lift checkMetadataStorageHealth
|
||||
|
||||
getDeprivedCronTriggerStats = lift getDeprivedCronTriggerStats
|
||||
getScheduledEventsForDelivery = lift getScheduledEventsForDelivery
|
||||
insertScheduledEvent = lift . insertScheduledEvent
|
||||
@ -127,6 +132,7 @@ instance (MonadMetadataStorage m) => MonadMetadataStorage (ReaderT r m) where
|
||||
fetchUndeliveredActionEvents = lift fetchUndeliveredActionEvents
|
||||
setActionStatus a b = lift $ setActionStatus a b
|
||||
fetchActionResponse = lift . fetchActionResponse
|
||||
clearActionData = lift . clearActionData
|
||||
|
||||
instance (MonadMetadataStorage m) => MonadMetadataStorage (StateT s m) where
|
||||
fetchMetadata = lift fetchMetadata
|
||||
@ -134,6 +140,8 @@ instance (MonadMetadataStorage m) => MonadMetadataStorage (StateT s m) where
|
||||
notifySchemaCacheSync a b = lift $ notifySchemaCacheSync a b
|
||||
processSchemaSyncEventPayload a b = lift $ processSchemaSyncEventPayload a b
|
||||
|
||||
checkMetadataStorageHealth = lift checkMetadataStorageHealth
|
||||
|
||||
getDeprivedCronTriggerStats = lift getDeprivedCronTriggerStats
|
||||
getScheduledEventsForDelivery = lift getScheduledEventsForDelivery
|
||||
insertScheduledEvent = lift . insertScheduledEvent
|
||||
@ -147,6 +155,7 @@ instance (MonadMetadataStorage m) => MonadMetadataStorage (StateT s m) where
|
||||
fetchUndeliveredActionEvents = lift fetchUndeliveredActionEvents
|
||||
setActionStatus a b = lift $ setActionStatus a b
|
||||
fetchActionResponse = lift . fetchActionResponse
|
||||
clearActionData = lift . clearActionData
|
||||
|
||||
instance (MonadMetadataStorage m) => MonadMetadataStorage (Tracing.TraceT m) where
|
||||
fetchMetadata = lift fetchMetadata
|
||||
@ -154,6 +163,8 @@ instance (MonadMetadataStorage m) => MonadMetadataStorage (Tracing.TraceT m) whe
|
||||
notifySchemaCacheSync a b = lift $ notifySchemaCacheSync a b
|
||||
processSchemaSyncEventPayload a b = lift $ processSchemaSyncEventPayload a b
|
||||
|
||||
checkMetadataStorageHealth = lift checkMetadataStorageHealth
|
||||
|
||||
getDeprivedCronTriggerStats = lift getDeprivedCronTriggerStats
|
||||
getScheduledEventsForDelivery = lift getScheduledEventsForDelivery
|
||||
insertScheduledEvent = lift . insertScheduledEvent
|
||||
@ -167,13 +178,16 @@ instance (MonadMetadataStorage m) => MonadMetadataStorage (Tracing.TraceT m) whe
|
||||
fetchUndeliveredActionEvents = lift fetchUndeliveredActionEvents
|
||||
setActionStatus a b = lift $ setActionStatus a b
|
||||
fetchActionResponse = lift . fetchActionResponse
|
||||
clearActionData = lift . clearActionData
|
||||
|
||||
instance (MonadMetadataStorage m) => MonadMetadataStorage (LazyTxT QErr m) where
|
||||
instance (MonadMetadataStorage m) => MonadMetadataStorage (ExceptT QErr m) where
|
||||
fetchMetadata = lift fetchMetadata
|
||||
setMetadata = lift . setMetadata
|
||||
notifySchemaCacheSync a b = lift $ notifySchemaCacheSync a b
|
||||
processSchemaSyncEventPayload a b = lift $ processSchemaSyncEventPayload a b
|
||||
|
||||
checkMetadataStorageHealth = lift checkMetadataStorageHealth
|
||||
|
||||
getDeprivedCronTriggerStats = lift getDeprivedCronTriggerStats
|
||||
getScheduledEventsForDelivery = lift getScheduledEventsForDelivery
|
||||
insertScheduledEvent = lift . insertScheduledEvent
|
||||
@ -187,6 +201,7 @@ instance (MonadMetadataStorage m) => MonadMetadataStorage (LazyTxT QErr m) where
|
||||
fetchUndeliveredActionEvents = lift fetchUndeliveredActionEvents
|
||||
setActionStatus a b = lift $ setActionStatus a b
|
||||
fetchActionResponse = lift . fetchActionResponse
|
||||
clearActionData = lift . clearActionData
|
||||
|
||||
instance (MonadMetadataStorage m) => MonadMetadataStorage (MetadataT m) where
|
||||
fetchMetadata = lift fetchMetadata
|
||||
@ -194,6 +209,8 @@ instance (MonadMetadataStorage m) => MonadMetadataStorage (MetadataT m) where
|
||||
notifySchemaCacheSync a b = lift $ notifySchemaCacheSync a b
|
||||
processSchemaSyncEventPayload a b = lift $ processSchemaSyncEventPayload a b
|
||||
|
||||
checkMetadataStorageHealth = lift checkMetadataStorageHealth
|
||||
|
||||
getDeprivedCronTriggerStats = lift getDeprivedCronTriggerStats
|
||||
getScheduledEventsForDelivery = lift getScheduledEventsForDelivery
|
||||
insertScheduledEvent = lift . insertScheduledEvent
|
||||
@ -207,6 +224,7 @@ instance (MonadMetadataStorage m) => MonadMetadataStorage (MetadataT m) where
|
||||
fetchUndeliveredActionEvents = lift fetchUndeliveredActionEvents
|
||||
setActionStatus a b = lift $ setActionStatus a b
|
||||
fetchActionResponse = lift . fetchActionResponse
|
||||
clearActionData = lift . clearActionData
|
||||
|
||||
{- Note [Generic MetadataStorageT transformer]
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -266,6 +284,7 @@ newtype MetadataStorageT m a
|
||||
, MFunctor
|
||||
, Tracing.HasReporter
|
||||
, Tracing.MonadTrace
|
||||
, MonadResolveSource
|
||||
)
|
||||
|
||||
deriving instance (MonadBase IO m) => MonadBase IO (MetadataStorageT m)
|
||||
@ -284,6 +303,8 @@ instance (Monad m, Monad (t m), MonadTrans t, MonadMetadataStorage (MetadataStor
|
||||
notifySchemaCacheSync a b = hoist lift $ notifySchemaCacheSync a b
|
||||
processSchemaSyncEventPayload a b = hoist lift $ processSchemaSyncEventPayload a b
|
||||
|
||||
checkMetadataStorageHealth = hoist lift checkMetadataStorageHealth
|
||||
|
||||
getDeprivedCronTriggerStats = hoist lift getDeprivedCronTriggerStats
|
||||
getScheduledEventsForDelivery = hoist lift getScheduledEventsForDelivery
|
||||
insertScheduledEvent = hoist lift . insertScheduledEvent
|
||||
@ -297,8 +318,10 @@ instance (Monad m, Monad (t m), MonadTrans t, MonadMetadataStorage (MetadataStor
|
||||
fetchUndeliveredActionEvents = hoist lift fetchUndeliveredActionEvents
|
||||
setActionStatus a b = hoist lift $ setActionStatus a b
|
||||
fetchActionResponse = hoist lift . fetchActionResponse
|
||||
clearActionData = hoist lift . clearActionData
|
||||
|
||||
class (MonadMetadataStorage m) => MonadScheduledEvents m where
|
||||
-- | Operations from @'MonadMetadataStorage' used in '/v1/query' and '/v1/metadata' APIs
|
||||
class (MonadMetadataStorage m) => MonadMetadataStorageQueryAPI m where
|
||||
-- | Record a cron/one-off event
|
||||
createScheduledEvent :: ScheduledEventSeed -> m ()
|
||||
createScheduledEvent = insertScheduledEvent
|
||||
@ -307,7 +330,12 @@ class (MonadMetadataStorage m) => MonadScheduledEvents m where
|
||||
dropFutureCronEvents :: TriggerName -> m ()
|
||||
dropFutureCronEvents = clearFutureCronEvents
|
||||
|
||||
instance (MonadScheduledEvents m) => MonadScheduledEvents (ReaderT r m)
|
||||
instance (MonadScheduledEvents m) => MonadScheduledEvents (StateT s m)
|
||||
instance (MonadScheduledEvents m) => MonadScheduledEvents (Tracing.TraceT m)
|
||||
instance (MonadScheduledEvents m) => MonadScheduledEvents (MetadataT m)
|
||||
-- | Delete async action logs
|
||||
deleteActionData :: ActionName -> m ()
|
||||
deleteActionData = clearActionData
|
||||
|
||||
instance (MonadMetadataStorageQueryAPI m) => MonadMetadataStorageQueryAPI (ReaderT r m)
|
||||
instance (MonadMetadataStorageQueryAPI m) => MonadMetadataStorageQueryAPI (StateT s m)
|
||||
instance (MonadMetadataStorageQueryAPI m) => MonadMetadataStorageQueryAPI (Tracing.TraceT m)
|
||||
instance (MonadMetadataStorageQueryAPI m) => MonadMetadataStorageQueryAPI (MetadataT m)
|
||||
-- instance (MonadMetadataStorageQueryAPI m) => MonadMetadataStorageQueryAPI (LazyTxT QErr m)
|
||||
|
@ -27,7 +27,6 @@ import qualified Data.Aeson.TH as J
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
|
||||
import Control.Lens ((.~))
|
||||
@ -36,6 +35,7 @@ import Data.Text.Extended
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.EncJSON
|
||||
import Hasura.GraphQL.Utils
|
||||
import Hasura.Metadata.Class
|
||||
import Hasura.RQL.DDL.CustomTypes (lookupPGScalar)
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Session
|
||||
@ -152,7 +152,7 @@ data DropAction
|
||||
$(J.deriveJSON (J.aesonDrop 3 J.snakeCase) ''DropAction)
|
||||
|
||||
runDropAction
|
||||
:: (QErrM m, CacheRWM m, MonadTx m, MetadataM m)
|
||||
:: (QErrM m, CacheRWM m, MetadataM m, MonadMetadataStorageQueryAPI m)
|
||||
=> DropAction -> m EncJSON
|
||||
runDropAction (DropAction actionName clearDataM)= do
|
||||
void $ getActionInfo actionName
|
||||
@ -160,7 +160,7 @@ runDropAction (DropAction actionName clearDataM)= do
|
||||
$ buildSchemaCache
|
||||
$ dropActionInMetadata actionName
|
||||
when (shouldClearActionData clearData) $
|
||||
liftTx $ clearActionDataFromCatalog actionName
|
||||
deleteActionData actionName
|
||||
return successMsg
|
||||
where
|
||||
-- When clearData is not present we assume that
|
||||
@ -171,13 +171,6 @@ dropActionInMetadata :: ActionName -> MetadataModifier
|
||||
dropActionInMetadata name =
|
||||
MetadataModifier $ metaActions %~ OMap.delete name
|
||||
|
||||
clearActionDataFromCatalog :: ActionName -> Q.TxE QErr ()
|
||||
clearActionDataFromCatalog actionName =
|
||||
Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
DELETE FROM hdb_catalog.hdb_action_log
|
||||
WHERE action_name = $1
|
||||
|] (Identity actionName) True
|
||||
|
||||
newtype ActionMetadataField
|
||||
= ActionMetadataField { unActionMetadataField :: Text }
|
||||
deriving (Show, Eq, J.FromJSON, J.ToJSON)
|
||||
|
@ -37,26 +37,38 @@ import Hasura.SQL.Types
|
||||
|
||||
data AddComputedField
|
||||
= AddComputedField
|
||||
{ _afcTable :: !QualifiedTable
|
||||
{ _afcSource :: !SourceName
|
||||
, _afcTable :: !QualifiedTable
|
||||
, _afcName :: !ComputedFieldName
|
||||
, _afcDefinition :: !ComputedFieldDefinition
|
||||
, _afcComment :: !(Maybe Text)
|
||||
} deriving (Show, Eq, Generic)
|
||||
instance NFData AddComputedField
|
||||
instance Cacheable AddComputedField
|
||||
$(deriveJSON (aesonDrop 4 snakeCase) ''AddComputedField)
|
||||
$(deriveToJSON (aesonDrop 4 snakeCase) ''AddComputedField)
|
||||
|
||||
instance FromJSON AddComputedField where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
AddComputedField
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "table"
|
||||
<*> o .: "name"
|
||||
<*> o .: "definition"
|
||||
<*> o .:? "commment"
|
||||
|
||||
runAddComputedField :: (MonadError QErr m, CacheRWM m, MetadataM m) => AddComputedField -> m EncJSON
|
||||
runAddComputedField q = do
|
||||
withPathK "table" $ askTabInfo table
|
||||
let metadataObj = MOTableObj table $ MTOComputedField computedFieldName
|
||||
withPathK "table" $ askTabInfo source table
|
||||
let metadataObj = MOSourceObjId source $
|
||||
SMOTableObj table $ MTOComputedField computedFieldName
|
||||
metadata = ComputedFieldMetadata computedFieldName (_afcDefinition q) (_afcComment q)
|
||||
buildSchemaCacheFor metadataObj
|
||||
$ MetadataModifier
|
||||
$ metaTables.ix table.tmComputedFields
|
||||
$ tableMetadataSetter source table.tmComputedFields
|
||||
%~ OMap.insert computedFieldName metadata
|
||||
pure successMsg
|
||||
where
|
||||
source = _afcSource q
|
||||
table = _afcTable q
|
||||
computedFieldName = _afcName q
|
||||
|
||||
@ -238,7 +250,8 @@ addComputedFieldP2Setup trackedTables table computedField definition rawFunction
|
||||
|
||||
data DropComputedField
|
||||
= DropComputedField
|
||||
{ _dccTable :: !QualifiedTable
|
||||
{ _dccSource :: !SourceName
|
||||
, _dccTable :: !QualifiedTable
|
||||
, _dccName :: !ComputedFieldName
|
||||
, _dccCascade :: !Bool
|
||||
} deriving (Show, Eq)
|
||||
@ -247,32 +260,34 @@ $(deriveToJSON (aesonDrop 4 snakeCase) ''DropComputedField)
|
||||
instance FromJSON DropComputedField where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
DropComputedField
|
||||
<$> o .: "table"
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "table"
|
||||
<*> o .: "name"
|
||||
<*> o .:? "cascade" .!= False
|
||||
|
||||
runDropComputedField
|
||||
:: (QErrM m, CacheRWM m, MetadataM m)
|
||||
=> DropComputedField -> m EncJSON
|
||||
runDropComputedField (DropComputedField table computedField cascade) = do
|
||||
runDropComputedField (DropComputedField source table computedField cascade) = do
|
||||
-- Validation
|
||||
fields <- withPathK "table" $ _tciFieldInfoMap <$> askTableCoreInfo table
|
||||
fields <- withPathK "table" $ _tciFieldInfoMap <$> askTableCoreInfo source table
|
||||
void $ withPathK "name" $ askComputedFieldInfo fields computedField
|
||||
|
||||
-- Dependencies check
|
||||
sc <- askSchemaCache
|
||||
let deps = getDependentObjs sc $ SOTableObj table $ TOComputedField computedField
|
||||
let deps = getDependentObjs sc $ SOSourceObj source $
|
||||
SOITableObj table $ TOComputedField computedField
|
||||
when (not cascade && not (null deps)) $ reportDeps deps
|
||||
|
||||
withNewInconsistentObjsCheck do
|
||||
metadataModifiers <- mapM purgeComputedFieldDependency deps
|
||||
buildSchemaCache $ MetadataModifier $
|
||||
metaTables.ix table
|
||||
tableMetadataSetter source table
|
||||
%~ (dropComputedFieldInMetadata computedField) . foldl' (.) id metadataModifiers
|
||||
pure successMsg
|
||||
where
|
||||
purgeComputedFieldDependency = \case
|
||||
(SOTableObj qt (TOPerm roleName permType)) | qt == table ->
|
||||
(SOSourceObj _ (SOITableObj qt (TOPerm roleName permType))) | qt == table ->
|
||||
pure $ dropPermissionInMetadata roleName permType
|
||||
d -> throw500 $ "unexpected dependency for computed field "
|
||||
<> computedField <<> "; " <> reportSchemaObj d
|
||||
|
@ -12,6 +12,7 @@ import Hasura.Prelude
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashSet as Set
|
||||
import qualified Data.List.Extended as L
|
||||
import qualified Data.List.NonEmpty as NE
|
||||
import qualified Data.Text as T
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
|
||||
@ -53,16 +54,16 @@ GraphQL types. To support this, we have to take a few extra steps:
|
||||
-- scalars).
|
||||
validateCustomTypeDefinitions
|
||||
:: (MonadValidate [CustomTypeValidationError] m)
|
||||
=> TableCache 'Postgres
|
||||
=> SourceCache 'Postgres
|
||||
-> CustomTypes
|
||||
-> HashSet (ScalarType 'Postgres)
|
||||
-- ^ all Postgres base types. See Note [Postgres scalars in custom types]
|
||||
-> m (AnnotatedCustomTypes 'Postgres)
|
||||
validateCustomTypeDefinitions tableCache customTypes allPGScalars = do
|
||||
validateCustomTypeDefinitions sources customTypes allPGScalars = do
|
||||
unless (null duplicateTypes) $ dispute $ pure $ DuplicateTypeNames duplicateTypes
|
||||
traverse_ validateEnum enumDefinitions
|
||||
reusedPGScalars <- execWriterT $ traverse_ validateInputObject inputObjectDefinitions
|
||||
annotatedObjects <- mapFromL (unObjectTypeName . _otdName) <$>
|
||||
annotatedObjects <- mapFromL (unObjectTypeName . _otdName . _aotDefinition) <$>
|
||||
traverse validateObject objectDefinitions
|
||||
let scalarTypeMap = Map.map NOCTScalar $
|
||||
Map.map ASTCustom scalarTypes <> Map.mapWithKey ASTReusedScalar reusedPGScalars
|
||||
@ -71,6 +72,7 @@ validateCustomTypeDefinitions tableCache customTypes allPGScalars = do
|
||||
nonObjectTypeMap = scalarTypeMap <> enumTypeMap <> inputObjectTypeMap
|
||||
pure $ AnnotatedCustomTypes nonObjectTypeMap annotatedObjects
|
||||
where
|
||||
sourceTables = Map.map _pcTables sources
|
||||
inputObjectDefinitions = fromMaybe [] $ _ctInputObjects customTypes
|
||||
objectDefinitions = fromMaybe [] $ _ctObjects customTypes
|
||||
scalarDefinitions = fromMaybe [] $ _ctScalars customTypes
|
||||
@ -183,32 +185,40 @@ validateCustomTypeDefinitions tableCache customTypes allPGScalars = do
|
||||
let scalarOrEnumFieldMap = Map.fromList $
|
||||
map (_ofdName &&& (fst . _ofdType)) $ toList $ scalarOrEnumFields
|
||||
|
||||
annotatedRelationships <- forM maybeRelationships $ \relationships ->
|
||||
annotatedRelationships <- forM maybeRelationships $ \relationships -> do
|
||||
let headSource NE.:| rest = _trSource <$> relationships
|
||||
-- this check is needed to ensure that custom type relationships are all defined to a single source
|
||||
unless (all (headSource ==) rest) $
|
||||
refute $ pure $ ObjectRelationshipMultiSources objectTypeName
|
||||
forM relationships $ \TypeRelationship{..} -> do
|
||||
--check that the table exists
|
||||
remoteTableInfo <- onNothing (Map.lookup _trRemoteTable tableCache) $
|
||||
refute $ pure $ ObjectRelationshipTableDoesNotExist
|
||||
objectTypeName _trName _trRemoteTable
|
||||
--check that the table exists
|
||||
remoteTableInfo <- onNothing (Map.lookup headSource sourceTables >>= Map.lookup _trRemoteTable) $
|
||||
refute $ pure $ ObjectRelationshipTableDoesNotExist
|
||||
objectTypeName _trName _trRemoteTable
|
||||
|
||||
-- check that the column mapping is sane
|
||||
annotatedFieldMapping <- flip Map.traverseWithKey _trFieldMapping $
|
||||
\fieldName columnName -> do
|
||||
case Map.lookup fieldName scalarOrEnumFieldMap of
|
||||
Nothing -> dispute $ pure $ ObjectRelationshipFieldDoesNotExist
|
||||
objectTypeName _trName fieldName
|
||||
Just fieldType ->
|
||||
-- the field should be a non-list type scalar
|
||||
when (G.isListType fieldType) $
|
||||
dispute $ pure $ ObjectRelationshipFieldListType
|
||||
objectTypeName _trName fieldName
|
||||
-- check that the column mapping is sane
|
||||
annotatedFieldMapping <- flip Map.traverseWithKey _trFieldMapping $
|
||||
\fieldName columnName -> do
|
||||
case Map.lookup fieldName scalarOrEnumFieldMap of
|
||||
Nothing -> dispute $ pure $ ObjectRelationshipFieldDoesNotExist
|
||||
objectTypeName _trName fieldName
|
||||
Just fieldType ->
|
||||
-- the field should be a non-list type scalar
|
||||
when (G.isListType fieldType) $
|
||||
dispute $ pure $ ObjectRelationshipFieldListType
|
||||
objectTypeName _trName fieldName
|
||||
|
||||
-- the column should be a column of the table
|
||||
onNothing (getColumnInfoM remoteTableInfo (fromCol @'Postgres columnName)) $ refute $ pure $
|
||||
ObjectRelationshipColumnDoesNotExist objectTypeName _trName _trRemoteTable columnName
|
||||
-- the column should be a column of the table
|
||||
onNothing (getColumnInfoM remoteTableInfo (fromCol @'Postgres columnName)) $ refute $ pure $
|
||||
ObjectRelationshipColumnDoesNotExist objectTypeName _trName _trRemoteTable columnName
|
||||
|
||||
pure $ TypeRelationship _trName _trType remoteTableInfo annotatedFieldMapping
|
||||
pure $ TypeRelationship _trName _trType _trSource remoteTableInfo annotatedFieldMapping
|
||||
|
||||
pure $ ObjectTypeDefinition objectTypeName (_otdDescription objectDefinition)
|
||||
let maybeSource = (_trSource . NE.head) <$> annotatedRelationships
|
||||
sourceConfig = maybeSource >>= \source -> _pcConfiguration <$> Map.lookup source sources
|
||||
|
||||
pure $ flip AnnotatedObjectType sourceConfig $
|
||||
ObjectTypeDefinition objectTypeName (_otdDescription objectDefinition)
|
||||
scalarOrEnumFields annotatedRelationships
|
||||
|
||||
-- see Note [Postgres scalars in custom types]
|
||||
@ -249,6 +259,8 @@ data CustomTypeValidationError
|
||||
| ObjectRelationshipColumnDoesNotExist
|
||||
!ObjectTypeName !RelationshipName !QualifiedTable !PGCol
|
||||
-- ^ The column specified in the relationship mapping does not exist
|
||||
| ObjectRelationshipMultiSources !ObjectTypeName
|
||||
-- ^ Object relationship refers to table in multiple sources
|
||||
| DuplicateEnumValues !EnumTypeName !(Set.HashSet G.EnumValue)
|
||||
-- ^ duplicate enum values
|
||||
deriving (Show, Eq)
|
||||
@ -299,6 +311,9 @@ showCustomTypeValidationError = \case
|
||||
<<> " for relationship " <> relName <<> " of object type " <> objType
|
||||
<<> " does not exist"
|
||||
|
||||
ObjectRelationshipMultiSources objType ->
|
||||
"the object " <> objType <<> " has relationships refers to tables in multiple sources"
|
||||
|
||||
DuplicateEnumValues tyName values ->
|
||||
"the enum type " <> tyName <<> " has duplicate values: " <> dquoteList values
|
||||
|
||||
@ -320,13 +335,13 @@ clearCustomTypesInMetadata =
|
||||
|
||||
resolveCustomTypes
|
||||
:: (MonadError QErr m)
|
||||
=> TableCache 'Postgres
|
||||
=> SourceCache 'Postgres
|
||||
-> CustomTypes
|
||||
-> HashSet (ScalarType 'Postgres)
|
||||
-> m (AnnotatedCustomTypes 'Postgres)
|
||||
resolveCustomTypes tableCache customTypes allPGScalars =
|
||||
resolveCustomTypes sources customTypes allPGScalars =
|
||||
either (throw400 ConstraintViolation . showErrors) pure
|
||||
=<< runValidateT (validateCustomTypeDefinitions tableCache customTypes allPGScalars)
|
||||
=<< runValidateT (validateCustomTypeDefinitions sources customTypes allPGScalars)
|
||||
where
|
||||
showErrors :: [CustomTypeValidationError] -> Text
|
||||
showErrors allErrors =
|
||||
|
@ -1,32 +1,15 @@
|
||||
module Hasura.RQL.DDL.Deps
|
||||
( purgeRel
|
||||
, parseDropNotice
|
||||
, getIndirectDeps
|
||||
, reportDeps
|
||||
( reportDeps
|
||||
, reportDepsExt
|
||||
)
|
||||
where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.HashSet as HS
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
|
||||
import Data.Text.Extended
|
||||
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.RQL.Types
|
||||
|
||||
purgeRel :: QualifiedTable -> RelName -> Q.Tx ()
|
||||
purgeRel (QualifiedObject sn tn) rn =
|
||||
Q.unitQ [Q.sql|
|
||||
DELETE FROM hdb_catalog.hdb_relationship
|
||||
WHERE table_schema = $1
|
||||
AND table_name = $2
|
||||
AND rel_name = $3
|
||||
|] (sn, tn, rn) False
|
||||
|
||||
reportDeps :: (QErrM m) => [SchemaObjId] -> m ()
|
||||
reportDeps deps =
|
||||
throw400 DependencyError $
|
||||
@ -39,69 +22,3 @@ reportDepsExt deps unknownDeps =
|
||||
"cannot drop due to the following dependent objects : " <> depObjsTxt
|
||||
where
|
||||
depObjsTxt = commaSeparated $ reportSchemaObjs deps:unknownDeps
|
||||
|
||||
parseDropNotice :: (QErrM m ) => Text -> m [Either Text SchemaObjId]
|
||||
parseDropNotice t = do
|
||||
cascadeLines <- getCascadeLines
|
||||
mapM parseCascadeLine cascadeLines
|
||||
where
|
||||
dottedTxtToQualTable dt =
|
||||
case T.split (=='.') dt of
|
||||
[tn] -> return $ QualifiedObject publicSchema $ TableName tn
|
||||
[sn, tn] -> return $ QualifiedObject (SchemaName sn) $ TableName tn
|
||||
_ -> throw400 ParseFailed $ "parsing dotted table failed : " <> dt
|
||||
|
||||
getCascadeLines = do
|
||||
detailLines <- case T.stripPrefix "NOTICE:" t of
|
||||
Just rest -> case T.splitOn "DETAIL:" $ T.strip rest of
|
||||
[singleDetail] -> return [singleDetail]
|
||||
[_, detailTxt] -> return $ T.lines $ T.strip detailTxt
|
||||
_ -> throw500 "splitOn DETAIL has unexpected structure"
|
||||
Nothing -> throw500 "unexpected beginning of notice"
|
||||
let cascadeLines = mapMaybe (T.stripPrefix "drop cascades to") detailLines
|
||||
when (length detailLines /= length cascadeLines) $
|
||||
throw500 "unexpected lines in drop notice"
|
||||
return $ map T.strip cascadeLines
|
||||
|
||||
parseCascadeLine cl
|
||||
| T.isPrefixOf "view" cl =
|
||||
case T.words cl of
|
||||
[_, vn] -> do
|
||||
qt <- dottedTxtToQualTable vn
|
||||
return $ Right $ SOTable qt
|
||||
_ -> throw500 $ "failed to parse view cascade line : " <> cl
|
||||
| T.isPrefixOf "constraint" cl =
|
||||
case T.words cl of
|
||||
[_, cn, _, _, tn] -> do
|
||||
qt <- dottedTxtToQualTable tn
|
||||
return $ Right $ SOTableObj qt $
|
||||
TOForeignKey $ ConstraintName cn
|
||||
_ -> throw500 $ "failed to parse constraint cascade line : " <> cl
|
||||
| otherwise = return $ Left cl
|
||||
|
||||
getPGDeps :: Q.Tx () -> Q.TxE QErr [Either Text SchemaObjId]
|
||||
getPGDeps tx = do
|
||||
dropNotices <- Q.catchE defaultTxErrorHandler $ do
|
||||
Q.unitQ "SAVEPOINT hdb_get_pg_deps" () False
|
||||
dropNotices <- snd <$> Q.withNotices tx
|
||||
Q.unitQ "ROLLBACK TO SAVEPOINT hdb_get_pg_deps" () False
|
||||
Q.unitQ "RELEASE SAVEPOINT hdb_get_pg_deps" () False
|
||||
return dropNotices
|
||||
case dropNotices of
|
||||
[] -> return []
|
||||
[notice] -> parseDropNotice notice
|
||||
_ -> throw500 "unexpected number of notices when getting dependencies"
|
||||
|
||||
getIndirectDeps
|
||||
:: (CacheRM m, MonadTx m)
|
||||
=> [SchemaObjId] -> Q.Tx ()
|
||||
-> m ([SchemaObjId], [Text])
|
||||
getIndirectDeps initDeps tx = do
|
||||
sc <- askSchemaCache
|
||||
-- Now, trial run the drop sql to get pg dependencies
|
||||
pgDeps <- liftTx $ getPGDeps tx
|
||||
let (unparsedLines, parsedObjIds) = partitionEithers pgDeps
|
||||
indirectDeps = HS.fromList $ parsedObjIds <>
|
||||
concatMap (getDependentObjs sc) parsedObjIds
|
||||
newDeps = indirectDeps `HS.difference` HS.fromList initDeps
|
||||
return (HS.toList newDeps, unparsedLines)
|
||||
|
@ -7,6 +7,7 @@ module Hasura.RQL.DDL.EventTrigger
|
||||
, RedeliverEventQuery
|
||||
, runRedeliverEvent
|
||||
, runInvokeEventTrigger
|
||||
, createPostgresTableEventTrigger
|
||||
|
||||
-- TODO(from master): review
|
||||
, mkEventTriggerInfo
|
||||
@ -20,20 +21,21 @@ module Hasura.RQL.DDL.EventTrigger
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as HM
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Text.Extended as T
|
||||
import qualified Data.Text.Lazy as TL
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Text.Shakespeare.Text as ST
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as HM
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Text.Lazy as TL
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Text.Shakespeare.Text as ST
|
||||
|
||||
import Control.Lens ((.~))
|
||||
import Control.Lens ((.~))
|
||||
import Data.Aeson
|
||||
import Data.Text.Extended
|
||||
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as S
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as S
|
||||
|
||||
import Hasura.Backends.Postgres.Execute.Types
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.EncJSON
|
||||
import Hasura.RQL.DDL.Headers
|
||||
@ -138,24 +140,21 @@ archiveEvents trn =
|
||||
WHERE trigger_name = $1
|
||||
|] (Identity trn) False
|
||||
|
||||
fetchEvent :: EventId -> Q.TxE QErr (EventId, Bool)
|
||||
fetchEvent eid = do
|
||||
checkEvent :: EventId -> Q.TxE QErr ()
|
||||
checkEvent eid = do
|
||||
events <- Q.listQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
SELECT l.id, l.locked IS NOT NULL AND l.locked >= (NOW() - interval '30 minute')
|
||||
SELECT l.locked IS NOT NULL AND l.locked >= (NOW() - interval '30 minute')
|
||||
FROM hdb_catalog.event_log l
|
||||
JOIN hdb_catalog.event_triggers e
|
||||
ON l.trigger_name = e.name
|
||||
WHERE l.id = $1
|
||||
|] (Identity eid) True
|
||||
event <- getEvent events
|
||||
assertEventUnlocked event
|
||||
return event
|
||||
where
|
||||
getEvent [] = throw400 NotExists "event not found"
|
||||
getEvent (x:_) = return x
|
||||
|
||||
assertEventUnlocked (_, locked) = when locked $
|
||||
assertEventUnlocked (Identity locked) = when locked $
|
||||
throw400 Busy "event is already being processed"
|
||||
|
||||
markForDelivery :: EventId -> Q.TxE QErr ()
|
||||
@ -169,12 +168,12 @@ markForDelivery eid =
|
||||
WHERE id = $1
|
||||
|] (Identity eid) True
|
||||
|
||||
subTableP1 :: (UserInfoM m, QErrM m, CacheRM m) => CreateEventTriggerQuery -> m (QualifiedTable, Bool, EventTriggerConf)
|
||||
subTableP1 (CreateEventTriggerQuery name qt insert update delete enableManual retryConf webhook webhookFromEnv mheaders replace) = do
|
||||
ti <- askTableCoreInfo qt
|
||||
resolveEventTriggerQuery :: (UserInfoM m, QErrM m, CacheRM m) => CreateEventTriggerQuery -> m (TableCoreInfo 'Postgres, Bool, EventTriggerConf)
|
||||
resolveEventTriggerQuery (CreateEventTriggerQuery source name qt insert update delete enableManual retryConf webhook webhookFromEnv mheaders replace) = do
|
||||
ti <- askTableCoreInfo source qt
|
||||
-- can only replace for same table
|
||||
when replace $ do
|
||||
ti' <- _tiCoreInfo <$> askTabInfoFromTrigger name
|
||||
ti' <- _tiCoreInfo <$> askTabInfoFromTrigger source name
|
||||
when (_tciName ti' /= _tciName ti) $ throw400 NotSupported "cannot replace table or schema for trigger"
|
||||
|
||||
assertCols ti insert
|
||||
@ -182,7 +181,7 @@ subTableP1 (CreateEventTriggerQuery name qt insert update delete enableManual re
|
||||
assertCols ti delete
|
||||
|
||||
let rconf = fromMaybe defaultRetryConf retryConf
|
||||
return (qt, replace, EventTriggerConf name (TriggerOpsDef insert update delete enableManual) webhook webhookFromEnv rconf mheaders)
|
||||
return (ti, replace, EventTriggerConf name (TriggerOpsDef insert update delete enableManual) webhook webhookFromEnv rconf mheaders)
|
||||
where
|
||||
assertCols _ Nothing = return ()
|
||||
assertCols ti (Just sos) = do
|
||||
@ -194,10 +193,11 @@ subTableP1 (CreateEventTriggerQuery name qt insert update delete enableManual re
|
||||
mkEventTriggerInfo
|
||||
:: QErrM m
|
||||
=> Env.Environment
|
||||
-> SourceName
|
||||
-> QualifiedTable
|
||||
-> EventTriggerConf
|
||||
-> m (EventTriggerInfo, [SchemaDependency])
|
||||
mkEventTriggerInfo env qt (EventTriggerConf name def webhook webhookFromEnv rconf mheaders) = do
|
||||
mkEventTriggerInfo env source qt (EventTriggerConf name def webhook webhookFromEnv rconf mheaders) = do
|
||||
webhookConf <- case (webhook, webhookFromEnv) of
|
||||
(Just w, Nothing) -> return $ WCValue w
|
||||
(Nothing, Just wEnv) -> return $ WCEnv wEnv
|
||||
@ -206,11 +206,11 @@ mkEventTriggerInfo env qt (EventTriggerConf name def webhook webhookFromEnv rcon
|
||||
webhookInfo <- getWebhookInfoFromConf env webhookConf
|
||||
headerInfos <- getHeaderInfosFromConf env headerConfs
|
||||
let eTrigInfo = EventTriggerInfo name def rconf webhookInfo headerInfos
|
||||
tabDep = SchemaDependency (SOTable qt) DRParent
|
||||
pure (eTrigInfo, tabDep:getTrigDefDeps qt def)
|
||||
tabDep = SchemaDependency (SOSourceObj source $ SOITable qt) DRParent
|
||||
pure (eTrigInfo, tabDep:getTrigDefDeps source qt def)
|
||||
|
||||
getTrigDefDeps :: QualifiedTable -> TriggerOpsDef -> [SchemaDependency]
|
||||
getTrigDefDeps qt (TriggerOpsDef mIns mUpd mDel _) =
|
||||
getTrigDefDeps :: SourceName -> QualifiedTable -> TriggerOpsDef -> [SchemaDependency]
|
||||
getTrigDefDeps source qt (TriggerOpsDef mIns mUpd mDel _) =
|
||||
mconcat $ catMaybes [ subsOpSpecDeps <$> mIns
|
||||
, subsOpSpecDeps <$> mUpd
|
||||
, subsOpSpecDeps <$> mDel
|
||||
@ -220,45 +220,72 @@ getTrigDefDeps qt (TriggerOpsDef mIns mUpd mDel _) =
|
||||
subsOpSpecDeps os =
|
||||
let cols = getColsFromSub $ sosColumns os
|
||||
colDeps = flip map cols $ \col ->
|
||||
SchemaDependency (SOTableObj qt (TOCol col)) DRColumn
|
||||
SchemaDependency (SOSourceObj source $ SOITableObj qt (TOCol col)) DRColumn
|
||||
payload = maybe [] getColsFromSub (sosPayload os)
|
||||
payloadDeps = flip map payload $ \col ->
|
||||
SchemaDependency (SOTableObj qt (TOCol col)) DRPayload
|
||||
SchemaDependency (SOSourceObj source $ SOITableObj qt (TOCol col)) DRPayload
|
||||
in colDeps <> payloadDeps
|
||||
getColsFromSub sc = case sc of
|
||||
SubCStar -> []
|
||||
SubCArray pgcols -> pgcols
|
||||
|
||||
createEventTriggerQueryMetadata
|
||||
:: (QErrM m, UserInfoM m, CacheRWM m, MetadataM m)
|
||||
=> CreateEventTriggerQuery -> m (TableCoreInfo 'Postgres, EventTriggerConf)
|
||||
createEventTriggerQueryMetadata q = do
|
||||
(tableCoreInfo, replace, triggerConf) <- resolveEventTriggerQuery q
|
||||
let table = cetqTable q
|
||||
source = cetqSource q
|
||||
triggerName = etcName triggerConf
|
||||
metadataObj = MOSourceObjId source $ SMOTableObj table $ MTOTrigger triggerName
|
||||
buildSchemaCacheFor metadataObj
|
||||
$ MetadataModifier
|
||||
$ tableMetadataSetter source table.tmEventTriggers %~
|
||||
if replace then ix triggerName .~ triggerConf
|
||||
else OMap.insert triggerName triggerConf
|
||||
pure (tableCoreInfo, triggerConf)
|
||||
|
||||
runCreateEventTriggerQuery
|
||||
:: (QErrM m, UserInfoM m, CacheRWM m, MetadataM m)
|
||||
=> CreateEventTriggerQuery -> m EncJSON
|
||||
runCreateEventTriggerQuery q = do
|
||||
(qt, replace, etc) <- subTableP1 q
|
||||
let triggerName = etcName etc
|
||||
metadataObj = MOTableObj qt $ MTOTrigger triggerName
|
||||
buildSchemaCacheFor metadataObj
|
||||
$ MetadataModifier
|
||||
$ metaTables.ix qt.tmEventTriggers %~
|
||||
if replace then ix triggerName .~ etc
|
||||
else OMap.insert triggerName etc
|
||||
void $ createEventTriggerQueryMetadata q
|
||||
pure successMsg
|
||||
|
||||
-- | Create the table event trigger in the database in a @'/v1/query' API
|
||||
-- transaction as soon as after @'runCreateEventTriggerQuery' is called and
|
||||
-- in building schema cache.
|
||||
createPostgresTableEventTrigger
|
||||
:: (MonadTx m, HasSQLGenCtx m)
|
||||
=> QualifiedTable
|
||||
-> [ColumnInfo 'Postgres]
|
||||
-> TriggerName
|
||||
-> TriggerOpsDef
|
||||
-> m ()
|
||||
createPostgresTableEventTrigger table columns triggerName opsDefinition = do
|
||||
-- Clean all existing triggers
|
||||
liftTx $ delTriggerQ triggerName -- executes DROP IF EXISTS.. sql
|
||||
-- Create the given triggers
|
||||
mkAllTriggersQ triggerName table columns opsDefinition
|
||||
|
||||
runDeleteEventTriggerQuery
|
||||
:: (MonadTx m, CacheRWM m, MetadataM m)
|
||||
:: (MonadError QErr m, CacheRWM m, MonadIO m, MetadataM m)
|
||||
=> DeleteEventTriggerQuery -> m EncJSON
|
||||
runDeleteEventTriggerQuery (DeleteEventTriggerQuery name) = do
|
||||
tables <- scTables <$> askSchemaCache
|
||||
runDeleteEventTriggerQuery (DeleteEventTriggerQuery source name) = do
|
||||
-- liftTx $ delEventTriggerFromCatalog name
|
||||
SourceInfo _ tables _ sourceConfig <- askPGSourceCache source
|
||||
let maybeTable = HM.lookup name $ HM.unions $
|
||||
flip map (HM.toList tables) $ \(table, tableInfo) ->
|
||||
HM.map (const table) $ _tiEventTriggerInfoMap tableInfo
|
||||
table <- onNothing maybeTable $ throw400 NotExists $
|
||||
"event trigger with name " <> name T.<<> " not exists"
|
||||
"event trigger with name " <> name <<> " not exists"
|
||||
|
||||
withNewInconsistentObjsCheck
|
||||
$ buildSchemaCache
|
||||
$ MetadataModifier
|
||||
$ metaTables.ix table %~ dropEventTriggerInMetadata name
|
||||
liftTx do
|
||||
$ tableMetadataSetter source table %~ dropEventTriggerInMetadata name
|
||||
|
||||
liftEitherM $ liftIO $ runPgSourceWriteTx sourceConfig $ do
|
||||
delTriggerQ name
|
||||
archiveEvents name
|
||||
pure successMsg
|
||||
@ -267,18 +294,18 @@ dropEventTriggerInMetadata :: TriggerName -> TableMetadata -> TableMetadata
|
||||
dropEventTriggerInMetadata name =
|
||||
tmEventTriggers %~ OMap.delete name
|
||||
|
||||
deliverEvent
|
||||
:: (QErrM m, MonadTx m)
|
||||
=> RedeliverEventQuery -> m EncJSON
|
||||
deliverEvent (RedeliverEventQuery eventId) = do
|
||||
_ <- liftTx $ fetchEvent eventId
|
||||
liftTx $ markForDelivery eventId
|
||||
return successMsg
|
||||
deliverEvent ::EventId -> Q.TxE QErr ()
|
||||
deliverEvent eventId = do
|
||||
checkEvent eventId
|
||||
markForDelivery eventId
|
||||
|
||||
runRedeliverEvent
|
||||
:: (MonadTx m)
|
||||
:: (MonadIO m, CacheRM m, QErrM m)
|
||||
=> RedeliverEventQuery -> m EncJSON
|
||||
runRedeliverEvent = deliverEvent
|
||||
runRedeliverEvent (RedeliverEventQuery eventId source) = do
|
||||
sourceConfig <- _pcConfiguration <$> askPGSourceCache source
|
||||
liftEitherM $ liftIO $ runPgSourceWriteTx sourceConfig $ deliverEvent eventId
|
||||
pure successMsg
|
||||
|
||||
insertManualEvent
|
||||
:: QualifiedTable
|
||||
@ -297,13 +324,15 @@ insertManualEvent qt trn rowData = do
|
||||
getEid (x:_) = return x
|
||||
|
||||
runInvokeEventTrigger
|
||||
:: (QErrM m, CacheRM m, MonadTx m)
|
||||
:: (MonadIO m, QErrM m, CacheRM m)
|
||||
=> InvokeEventTriggerQuery -> m EncJSON
|
||||
runInvokeEventTrigger (InvokeEventTriggerQuery name payload) = do
|
||||
trigInfo <- askEventTriggerInfo name
|
||||
runInvokeEventTrigger (InvokeEventTriggerQuery name source payload) = do
|
||||
trigInfo <- askEventTriggerInfo source name
|
||||
assertManual $ etiOpsDef trigInfo
|
||||
ti <- askTabInfoFromTrigger name
|
||||
eid <- liftTx $ insertManualEvent (_tciName $ _tiCoreInfo ti) name payload
|
||||
ti <- askTabInfoFromTrigger source name
|
||||
sourceConfig <- _pcConfiguration <$> askPGSourceCache source
|
||||
eid <- liftEitherM $ liftIO $ runPgSourceWriteTx sourceConfig $
|
||||
insertManualEvent (_tciName $ _tiCoreInfo ti) name payload
|
||||
return $ encJFromJValue $ object ["event_id" .= eid]
|
||||
where
|
||||
assertManual (TriggerOpsDef _ _ _ man) = case man of
|
||||
|
@ -12,22 +12,13 @@ module Hasura.RQL.DDL.Metadata
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
-- <<<<<<< HEAD TODO: karthikeyan
|
||||
-- import qualified Data.Aeson.Ordered as AO
|
||||
-- import qualified Data.HashMap.Strict as HM
|
||||
-- import qualified Data.HashMap.Strict.InsOrd as HMIns
|
||||
-- import qualified Data.HashSet as HS
|
||||
-- import qualified Data.HashSet.InsOrd as HSIns
|
||||
-- import qualified Data.List as L
|
||||
-- import qualified Database.PG.Query as Q
|
||||
-- import Data.Text.NonEmpty
|
||||
-- =======
|
||||
import qualified Data.Aeson.Ordered as AO
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.HashSet as HS
|
||||
import qualified Data.List as L
|
||||
-- >>>>>>> main
|
||||
import qualified Database.PG.Query as Q
|
||||
|
||||
import Control.Lens ((.~), (^?))
|
||||
import Data.Aeson
|
||||
|
||||
import Hasura.RQL.DDL.Action
|
||||
@ -46,10 +37,24 @@ import Hasura.RQL.DDL.Metadata.Types
|
||||
import Hasura.RQL.Types
|
||||
|
||||
runClearMetadata
|
||||
:: (CacheRWM m, MetadataM m, MonadTx m)
|
||||
:: (CacheRWM m, MetadataM m, MonadIO m, QErrM m)
|
||||
=> ClearMetadata -> m EncJSON
|
||||
runClearMetadata _ = do
|
||||
runReplaceMetadata emptyMetadata
|
||||
metadata <- getMetadata
|
||||
-- We can infer whether the server is started with `--database-url` option
|
||||
-- (or corresponding env variable) by checking the existence of @'defaultSource'
|
||||
-- in current metadata.
|
||||
let maybeDefaultSourceMetadata = metadata ^? metaSources.ix defaultSource
|
||||
emptyMetadata' = case maybeDefaultSourceMetadata of
|
||||
Nothing -> emptyMetadata
|
||||
Just defaultSourceMetadata ->
|
||||
-- If default postgres source is defined, we need to set metadata
|
||||
-- which contains only default source without any tables and functions.
|
||||
let emptyDefaultSource = SourceMetadata defaultSource mempty mempty
|
||||
$ _smConfiguration defaultSourceMetadata
|
||||
in emptyMetadata
|
||||
& metaSources %~ OMap.insert defaultSource emptyDefaultSource
|
||||
runReplaceMetadata $ RMWithSources emptyMetadata'
|
||||
|
||||
{- Note [Clear postgres schema for dropped triggers]
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -67,19 +72,35 @@ runReplaceMetadata
|
||||
:: ( QErrM m
|
||||
, CacheRWM m
|
||||
, MetadataM m
|
||||
, MonadTx m
|
||||
, MonadIO m
|
||||
)
|
||||
=> Metadata -> m EncJSON
|
||||
runReplaceMetadata metadata = do
|
||||
=> ReplaceMetadata -> m EncJSON
|
||||
runReplaceMetadata replaceMetadata = do
|
||||
oldMetadata <- getMetadata
|
||||
metadata <- case replaceMetadata of
|
||||
RMWithSources m -> pure m
|
||||
RMWithoutSources MetadataNoSources{..} -> do
|
||||
defaultSourceMetadata <- onNothing (OMap.lookup defaultSource $ _metaSources oldMetadata) $
|
||||
throw400 NotSupported $ "cannot import metadata without sources since no default source is defined"
|
||||
let newDefaultSourceMetadata = defaultSourceMetadata
|
||||
{ _smTables = _mnsTables
|
||||
, _smFunctions = _mnsFunctions
|
||||
}
|
||||
pure $ (metaSources.ix defaultSource .~ newDefaultSourceMetadata) oldMetadata
|
||||
putMetadata metadata
|
||||
buildSchemaCacheStrict
|
||||
-- See Note [Clear postgres schema for dropped triggers]
|
||||
let getTriggersMap = OMap.unions . map _tmEventTriggers . OMap.elems . _metaTables
|
||||
oldTriggersMap = getTriggersMap oldMetadata
|
||||
newTriggersMap = getTriggersMap metadata
|
||||
droppedTriggers = OMap.keys $ oldTriggersMap `OMap.difference` newTriggersMap
|
||||
for_ droppedTriggers $ \name -> liftTx $ delTriggerQ name >> archiveEvents name
|
||||
for_ (OMap.toList $ _metaSources metadata) $ \(source, newSourceCache) ->
|
||||
onJust (OMap.lookup source $ _metaSources oldMetadata) $ \oldSourceCache -> do
|
||||
let getTriggersMap = OMap.unions . map _tmEventTriggers . OMap.elems . _smTables
|
||||
oldTriggersMap = getTriggersMap oldSourceCache
|
||||
newTriggersMap = getTriggersMap newSourceCache
|
||||
droppedTriggers = OMap.keys $ oldTriggersMap `OMap.difference` newTriggersMap
|
||||
sourceConfig <- _pcConfiguration <$> askPGSourceCache source
|
||||
for_ droppedTriggers $
|
||||
\name -> liftEitherM $ liftIO $ runExceptT $
|
||||
runLazyTx (_pscExecCtx sourceConfig) Q.ReadWrite $
|
||||
liftTx $ delTriggerQ name >> archiveEvents name
|
||||
|
||||
pure successMsg
|
||||
|
||||
@ -97,6 +118,7 @@ runReloadMetadata (ReloadMetadata reloadRemoteSchemas) = do
|
||||
cacheInvalidations = CacheInvalidations
|
||||
{ ciMetadata = True
|
||||
, ciRemoteSchemas = remoteSchemaInvalidations
|
||||
, ciSources = HS.singleton defaultSource
|
||||
}
|
||||
metadata <- getMetadata
|
||||
buildSchemaCacheWithOptions CatalogUpdate cacheInvalidations metadata
|
||||
@ -137,19 +159,20 @@ runDropInconsistentMetadata _ = do
|
||||
|
||||
purgeMetadataObj :: MetadataObjId -> MetadataModifier
|
||||
purgeMetadataObj = \case
|
||||
MOTable qt -> dropTableInMetadata qt
|
||||
MOTableObj qt tableObj ->
|
||||
MetadataModifier $
|
||||
metaTables.ix qt %~ case tableObj of
|
||||
MOSource source -> MetadataModifier $ metaSources %~ OMap.delete source
|
||||
MOSourceObjId source sourceObjId -> case sourceObjId of
|
||||
SMOTable qt -> dropTableInMetadata source qt
|
||||
SMOTableObj qt tableObj -> MetadataModifier $
|
||||
tableMetadataSetter source qt %~ case tableObj of
|
||||
MTORel rn _ -> dropRelationshipInMetadata rn
|
||||
MTOPerm rn pt -> dropPermissionInMetadata rn pt
|
||||
MTOTrigger trn -> dropEventTriggerInMetadata trn
|
||||
MTOComputedField ccn -> dropComputedFieldInMetadata ccn
|
||||
MTORemoteRelationship rn -> dropRemoteRelationshipInMetadata rn
|
||||
MOFunction qf -> dropFunctionInMetadata qf
|
||||
SMOFunction qf -> dropFunctionInMetadata source qf
|
||||
MORemoteSchema rsn -> dropRemoteSchemaInMetadata rsn
|
||||
MORemoteSchemaPermissions rsName role -> dropRemoteSchemaPermissionInMetadata rsName role
|
||||
MOCustomTypes -> clearCustomTypesInMetadata
|
||||
MOAction action -> dropActionInMetadata action
|
||||
MOAction action -> dropActionInMetadata action -- Nothing
|
||||
MOActionPermission action role -> dropActionPermissionInMetadata action role
|
||||
MOCronTrigger ctName -> dropCronTriggerInMetadata ctName
|
||||
|
@ -12,11 +12,11 @@ where
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd as OM
|
||||
import qualified Data.HashSet.InsOrd as SetIns
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Vector as V
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Language.GraphQL.Draft.Parser as G
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Language.Haskell.TH.Syntax as TH
|
||||
@ -38,22 +38,15 @@ import Hasura.RQL.DDL.Metadata.Types
|
||||
import Hasura.RQL.Types
|
||||
|
||||
genMetadata :: Gen Metadata
|
||||
genMetadata = do
|
||||
version <- arbitrary
|
||||
genMetadata =
|
||||
Metadata
|
||||
<$> arbitrary
|
||||
<*> genFunctionsMetadata version
|
||||
<*> arbitrary
|
||||
<*> arbitrary
|
||||
<*> arbitrary
|
||||
<*> arbitrary
|
||||
<*> arbitrary
|
||||
<*> arbitrary
|
||||
where
|
||||
genFunctionsMetadata :: MetadataVersion -> Gen Functions
|
||||
genFunctionsMetadata = \case
|
||||
MVVersion1 -> OM.fromList . map (\qf -> (qf, FunctionMetadata qf emptyFunctionConfig)) <$> arbitrary
|
||||
MVVersion2 -> arbitrary
|
||||
|
||||
instance (Arbitrary k, Eq k, Hashable k, Arbitrary v) => Arbitrary (InsOrdHashMap k v) where
|
||||
arbitrary = OM.fromList <$> arbitrary
|
||||
@ -70,6 +63,18 @@ instance Arbitrary MetadataVersion where
|
||||
instance Arbitrary FunctionMetadata where
|
||||
arbitrary = genericArbitrary
|
||||
|
||||
instance Arbitrary PostgresPoolSettings where
|
||||
arbitrary = genericArbitrary
|
||||
|
||||
instance Arbitrary PostgresSourceConnInfo where
|
||||
arbitrary = genericArbitrary
|
||||
|
||||
instance Arbitrary SourceConfiguration where
|
||||
arbitrary = genericArbitrary
|
||||
|
||||
instance Arbitrary SourceMetadata where
|
||||
arbitrary = genericArbitrary
|
||||
|
||||
instance Arbitrary TableCustomRootFields where
|
||||
arbitrary = uniqueRootFields
|
||||
where
|
||||
|
@ -10,6 +10,7 @@ module Hasura.RQL.DDL.Metadata.Types
|
||||
, DumpInternalState(..)
|
||||
, GetInconsistentMetadata(..)
|
||||
, DropInconsistentMetadata(..)
|
||||
, ReplaceMetadata(..)
|
||||
) where
|
||||
|
||||
import Hasura.Prelude
|
||||
@ -71,3 +72,20 @@ $(deriveToJSON defaultOptions ''DropInconsistentMetadata)
|
||||
|
||||
instance FromJSON DropInconsistentMetadata where
|
||||
parseJSON _ = return DropInconsistentMetadata
|
||||
|
||||
data ReplaceMetadata
|
||||
= RMWithSources !Metadata
|
||||
| RMWithoutSources !MetadataNoSources
|
||||
deriving (Show, Eq)
|
||||
|
||||
instance FromJSON ReplaceMetadata where
|
||||
parseJSON = withObject "Object" $ \o -> do
|
||||
version <- o .:? "version" .!= MVVersion1
|
||||
case version of
|
||||
MVVersion3 -> RMWithSources <$> parseJSON (Object o)
|
||||
_ -> RMWithoutSources <$> parseJSON (Object o)
|
||||
|
||||
instance ToJSON ReplaceMetadata where
|
||||
toJSON = \case
|
||||
RMWithSources v -> toJSON v
|
||||
RMWithoutSources v -> toJSON v
|
||||
|
@ -31,8 +31,6 @@ module Hasura.RQL.DDL.Permission
|
||||
|
||||
, SetPermComment(..)
|
||||
, runSetPermComment
|
||||
|
||||
, fetchPermDef
|
||||
) where
|
||||
|
||||
import Hasura.Prelude
|
||||
@ -40,8 +38,8 @@ import Hasura.Prelude
|
||||
import qualified Data.HashMap.Strict as HM
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.HashSet as HS
|
||||
import qualified Database.PG.Query as Q
|
||||
|
||||
import Control.Lens ((.~))
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
@ -52,8 +50,8 @@ import Hasura.EncJSON
|
||||
import Hasura.RQL.DDL.Permission.Internal
|
||||
import Hasura.RQL.DML.Internal hiding (askPermInfo)
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.SQL.Types
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
|
||||
|
||||
|
||||
@ -90,17 +88,18 @@ type CreateInsPerm b = CreatePerm (InsPerm b)
|
||||
|
||||
procSetObj
|
||||
:: (QErrM m)
|
||||
=> QualifiedTable
|
||||
=> SourceName
|
||||
-> QualifiedTable
|
||||
-> FieldInfoMap (FieldInfo 'Postgres)
|
||||
-> Maybe (ColumnValues Value)
|
||||
-> m (PreSetColsPartial 'Postgres, [Text], [SchemaDependency])
|
||||
procSetObj tn fieldInfoMap mObj = do
|
||||
procSetObj source tn fieldInfoMap mObj = do
|
||||
(setColTups, deps) <- withPathK "set" $
|
||||
fmap unzip $ forM (HM.toList setObj) $ \(pgCol, val) -> do
|
||||
ty <- askPGType fieldInfoMap pgCol $
|
||||
"column " <> pgCol <<> " not found in table " <>> tn
|
||||
sqlExp <- valueParser (CollectableTypeScalar ty) val
|
||||
let dep = mkColDep (getDepReason sqlExp) tn pgCol
|
||||
let dep = mkColDep (getDepReason sqlExp) source tn pgCol
|
||||
return ((pgCol, sqlExp), dep)
|
||||
return (HM.fromList setColTups, depHeaders, deps)
|
||||
where
|
||||
@ -116,7 +115,8 @@ class (ToJSON a) => IsPerm a where
|
||||
|
||||
buildPermInfo
|
||||
:: (QErrM m, TableCoreInfoRM 'Postgres m)
|
||||
=> QualifiedTable
|
||||
=> SourceName
|
||||
-> QualifiedTable
|
||||
-> FieldInfoMap (FieldInfo 'Postgres)
|
||||
-> PermDef a
|
||||
-> m (WithDeps (PermInfo a))
|
||||
@ -135,26 +135,31 @@ class (ToJSON a) => IsPerm a where
|
||||
runCreatePerm
|
||||
:: (UserInfoM m, CacheRWM m, IsPerm a, MonadError QErr m, MetadataM m)
|
||||
=> CreatePerm a -> m EncJSON
|
||||
runCreatePerm (WithTable tn pd) = do
|
||||
let pt = permAccToType $ getPermAcc1 pd
|
||||
runCreatePerm (WithTable source tn pd) = do
|
||||
tableInfo <- askTabInfo source tn
|
||||
let permAcc = getPermAcc1 pd
|
||||
pt = permAccToType permAcc
|
||||
ptText = permTypeToCode pt
|
||||
role = _pdRole pd
|
||||
metadataObject = MOTableObj tn $ MTOPerm role pt
|
||||
metadataObject = MOSourceObjId source $ SMOTableObj tn $ MTOPerm role pt
|
||||
onJust (getPermInfoMaybe role permAcc tableInfo) $ const $ throw400 AlreadyExists $
|
||||
ptText <> " permission already defined on table " <> tn <<> " with role " <>> role
|
||||
buildSchemaCacheFor metadataObject
|
||||
$ MetadataModifier
|
||||
$ metaTables.ix tn %~ addPermToMetadata pd
|
||||
$ tableMetadataSetter source tn %~ addPermToMetadata pd
|
||||
pure successMsg
|
||||
|
||||
runDropPerm
|
||||
:: (IsPerm a, UserInfoM m, CacheRWM m, MonadTx m, MetadataM m)
|
||||
:: (IsPerm a, UserInfoM m, CacheRWM m, MonadError QErr m, MetadataM m)
|
||||
=> DropPerm a -> m EncJSON
|
||||
runDropPerm dp@(DropPerm table role) = do
|
||||
tabInfo <- askTabInfo table
|
||||
runDropPerm dp@(DropPerm source table role) = do
|
||||
tabInfo <- askTabInfo source table
|
||||
let permType = permAccToType $ getPermAcc2 dp
|
||||
askPermInfo tabInfo role $ getPermAcc2 dp
|
||||
void $ askPermInfo tabInfo role $ getPermAcc2 dp
|
||||
withNewInconsistentObjsCheck
|
||||
$ buildSchemaCache
|
||||
$ MetadataModifier
|
||||
$ metaTables.ix table %~ dropPermissionInMetadata role permType
|
||||
$ tableMetadataSetter source table %~ dropPermissionInMetadata role permType
|
||||
return successMsg
|
||||
|
||||
dropPermissionInMetadata
|
||||
@ -167,20 +172,21 @@ dropPermissionInMetadata rn = \case
|
||||
|
||||
buildInsPermInfo
|
||||
:: (QErrM m, TableCoreInfoRM 'Postgres m)
|
||||
=> QualifiedTable
|
||||
=> SourceName
|
||||
-> QualifiedTable
|
||||
-> FieldInfoMap (FieldInfo 'Postgres)
|
||||
-> PermDef (InsPerm 'Postgres)
|
||||
-> m (WithDeps (InsPermInfo 'Postgres))
|
||||
buildInsPermInfo tn fieldInfoMap (PermDef _rn (InsPerm checkCond set mCols mBackendOnly) _) =
|
||||
buildInsPermInfo source tn fieldInfoMap (PermDef _rn (InsPerm checkCond set mCols mBackendOnly) _) =
|
||||
withPathK "permission" $ do
|
||||
(be, beDeps) <- withPathK "check" $ procBoolExp tn fieldInfoMap checkCond
|
||||
(setColsSQL, setHdrs, setColDeps) <- procSetObj tn fieldInfoMap set
|
||||
(be, beDeps) <- withPathK "check" $ procBoolExp source tn fieldInfoMap checkCond
|
||||
(setColsSQL, setHdrs, setColDeps) <- procSetObj source tn fieldInfoMap set
|
||||
void $ withPathK "columns" $ indexedForM insCols $ \col ->
|
||||
askPGType fieldInfoMap col ""
|
||||
let fltrHeaders = getDependentHeaders checkCond
|
||||
reqHdrs = fltrHeaders `union` setHdrs
|
||||
insColDeps = map (mkColDep DRUntyped tn) insCols
|
||||
deps = mkParentDep tn : beDeps ++ setColDeps ++ insColDeps
|
||||
insColDeps = map (mkColDep DRUntyped source tn) insCols
|
||||
deps = mkParentDep source tn : beDeps ++ setColDeps ++ insColDeps
|
||||
insColsWithoutPresets = insCols \\ HM.keys setColsSQL
|
||||
return (InsPermInfo (HS.fromList insColsWithoutPresets) be setColsSQL backendOnly reqHdrs, deps)
|
||||
where
|
||||
@ -202,15 +208,16 @@ instance IsPerm (InsPerm 'Postgres) where
|
||||
|
||||
buildSelPermInfo
|
||||
:: (QErrM m, TableCoreInfoRM 'Postgres m)
|
||||
=> QualifiedTable
|
||||
=> SourceName
|
||||
-> QualifiedTable
|
||||
-> FieldInfoMap (FieldInfo 'Postgres)
|
||||
-> SelPerm 'Postgres
|
||||
-> m (WithDeps (SelPermInfo 'Postgres))
|
||||
buildSelPermInfo tn fieldInfoMap sp = withPathK "permission" $ do
|
||||
buildSelPermInfo source tn fieldInfoMap sp = withPathK "permission" $ do
|
||||
let pgCols = convColSpec fieldInfoMap $ spColumns sp
|
||||
|
||||
(be, beDeps) <- withPathK "filter" $
|
||||
procBoolExp tn fieldInfoMap $ spFilter sp
|
||||
procBoolExp source tn fieldInfoMap $ spFilter sp
|
||||
|
||||
-- check if the columns exist
|
||||
void $ withPathK "columns" $ indexedForM pgCols $ \pgCol ->
|
||||
@ -227,8 +234,8 @@ buildSelPermInfo tn fieldInfoMap sp = withPathK "permission" $ do
|
||||
<<> " are auto-derived from the permissions on its returning table "
|
||||
<> returnTable <<> " and cannot be specified manually"
|
||||
|
||||
let deps = mkParentDep tn : beDeps ++ map (mkColDep DRUntyped tn) pgCols
|
||||
++ map (mkComputedFieldDep DRUntyped tn) scalarComputedFields
|
||||
let deps = mkParentDep source tn : beDeps ++ map (mkColDep DRUntyped source tn) pgCols
|
||||
++ map (mkComputedFieldDep DRUntyped source tn) scalarComputedFields
|
||||
depHeaders = getDependentHeaders $ spFilter sp
|
||||
mLimit = spLimit sp
|
||||
|
||||
@ -250,8 +257,8 @@ type instance PermInfo (SelPerm b) = SelPermInfo b
|
||||
|
||||
instance IsPerm (SelPerm 'Postgres) where
|
||||
permAccessor = PASelect
|
||||
buildPermInfo tn fieldInfoMap (PermDef _ a _) =
|
||||
buildSelPermInfo tn fieldInfoMap a
|
||||
buildPermInfo source tn fieldInfoMap (PermDef _ a _) =
|
||||
buildSelPermInfo source tn fieldInfoMap a
|
||||
|
||||
addPermToMetadata permDef =
|
||||
tmSelectPermissions %~ OMap.insert (_pdRole permDef) permDef
|
||||
@ -260,24 +267,25 @@ type CreateUpdPerm b = CreatePerm (UpdPerm b)
|
||||
|
||||
buildUpdPermInfo
|
||||
:: (QErrM m, TableCoreInfoRM 'Postgres m)
|
||||
=> QualifiedTable
|
||||
=> SourceName
|
||||
-> QualifiedTable
|
||||
-> FieldInfoMap (FieldInfo 'Postgres)
|
||||
-> UpdPerm 'Postgres
|
||||
-> m (WithDeps (UpdPermInfo 'Postgres))
|
||||
buildUpdPermInfo tn fieldInfoMap (UpdPerm colSpec set fltr check) = do
|
||||
buildUpdPermInfo source tn fieldInfoMap (UpdPerm colSpec set fltr check) = do
|
||||
(be, beDeps) <- withPathK "filter" $
|
||||
procBoolExp tn fieldInfoMap fltr
|
||||
procBoolExp source tn fieldInfoMap fltr
|
||||
|
||||
checkExpr <- traverse (withPathK "check" . procBoolExp tn fieldInfoMap) check
|
||||
checkExpr <- traverse (withPathK "check" . procBoolExp source tn fieldInfoMap) check
|
||||
|
||||
(setColsSQL, setHeaders, setColDeps) <- procSetObj tn fieldInfoMap set
|
||||
(setColsSQL, setHeaders, setColDeps) <- procSetObj source tn fieldInfoMap set
|
||||
|
||||
-- check if the columns exist
|
||||
void $ withPathK "columns" $ indexedForM updCols $ \updCol ->
|
||||
askPGType fieldInfoMap updCol relInUpdErr
|
||||
|
||||
let updColDeps = map (mkColDep DRUntyped tn) updCols
|
||||
deps = mkParentDep tn : beDeps ++ maybe [] snd checkExpr ++ updColDeps ++ setColDeps
|
||||
let updColDeps = map (mkColDep DRUntyped source tn) updCols
|
||||
deps = mkParentDep source tn : beDeps ++ maybe [] snd checkExpr ++ updColDeps ++ setColDeps
|
||||
depHeaders = getDependentHeaders fltr
|
||||
reqHeaders = depHeaders `union` setHeaders
|
||||
updColsWithoutPreSets = updCols \\ HM.keys setColsSQL
|
||||
@ -293,8 +301,8 @@ type instance PermInfo (UpdPerm b) = UpdPermInfo b
|
||||
|
||||
instance IsPerm (UpdPerm 'Postgres) where
|
||||
permAccessor = PAUpdate
|
||||
buildPermInfo tn fieldInfoMap (PermDef _ a _) =
|
||||
buildUpdPermInfo tn fieldInfoMap a
|
||||
buildPermInfo source tn fieldInfoMap (PermDef _ a _) =
|
||||
buildUpdPermInfo source tn fieldInfoMap a
|
||||
|
||||
addPermToMetadata permDef =
|
||||
tmUpdatePermissions %~ OMap.insert (_pdRole permDef) permDef
|
||||
@ -303,14 +311,15 @@ type CreateDelPerm b = CreatePerm (DelPerm b)
|
||||
|
||||
buildDelPermInfo
|
||||
:: (QErrM m, TableCoreInfoRM 'Postgres m)
|
||||
=> QualifiedTable
|
||||
=> SourceName
|
||||
-> QualifiedTable
|
||||
-> FieldInfoMap (FieldInfo 'Postgres)
|
||||
-> DelPerm 'Postgres
|
||||
-> m (WithDeps (DelPermInfo 'Postgres))
|
||||
buildDelPermInfo tn fieldInfoMap (DelPerm fltr) = do
|
||||
buildDelPermInfo source tn fieldInfoMap (DelPerm fltr) = do
|
||||
(be, beDeps) <- withPathK "filter" $
|
||||
procBoolExp tn fieldInfoMap fltr
|
||||
let deps = mkParentDep tn : beDeps
|
||||
procBoolExp source tn fieldInfoMap fltr
|
||||
let deps = mkParentDep source tn : beDeps
|
||||
depHeaders = getDependentHeaders fltr
|
||||
return (DelPermInfo tn be depHeaders, deps)
|
||||
|
||||
@ -319,70 +328,56 @@ type instance PermInfo (DelPerm b) = DelPermInfo b
|
||||
|
||||
instance IsPerm (DelPerm 'Postgres) where
|
||||
permAccessor = PADelete
|
||||
buildPermInfo tn fieldInfoMap (PermDef _ a _) =
|
||||
buildDelPermInfo tn fieldInfoMap a
|
||||
buildPermInfo source tn fieldInfoMap (PermDef _ a _) =
|
||||
buildDelPermInfo source tn fieldInfoMap a
|
||||
|
||||
addPermToMetadata permDef =
|
||||
tmDeletePermissions %~ OMap.insert (_pdRole permDef) permDef
|
||||
|
||||
data SetPermComment
|
||||
= SetPermComment
|
||||
{ apTable :: !QualifiedTable
|
||||
{ apSource :: !SourceName
|
||||
, apTable :: !QualifiedTable
|
||||
, apRole :: !RoleName
|
||||
, apPermission :: !PermType
|
||||
, apComment :: !(Maybe Text)
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveJSON (aesonDrop 2 snakeCase) ''SetPermComment)
|
||||
$(deriveToJSON (aesonDrop 2 snakeCase) ''SetPermComment)
|
||||
|
||||
setPermCommentP1 :: (UserInfoM m, QErrM m, CacheRM m) => SetPermComment -> m ()
|
||||
setPermCommentP1 (SetPermComment qt rn pt _) = do
|
||||
tabInfo <- askTabInfo qt
|
||||
action tabInfo
|
||||
where
|
||||
action tabInfo = case pt of
|
||||
PTInsert -> assertPermDefined rn PAInsert tabInfo
|
||||
PTSelect -> assertPermDefined rn PASelect tabInfo
|
||||
PTUpdate -> assertPermDefined rn PAUpdate tabInfo
|
||||
PTDelete -> assertPermDefined rn PADelete tabInfo
|
||||
|
||||
setPermCommentP2 :: (QErrM m, MonadTx m) => SetPermComment -> m EncJSON
|
||||
setPermCommentP2 apc = do
|
||||
liftTx $ setPermCommentTx apc
|
||||
return successMsg
|
||||
instance FromJSON SetPermComment where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
SetPermComment
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "table"
|
||||
<*> o .: "role"
|
||||
<*> o .: "permission"
|
||||
<*> o .:? "comment"
|
||||
|
||||
runSetPermComment
|
||||
:: (QErrM m, CacheRM m, MonadTx m, UserInfoM m)
|
||||
:: (QErrM m, CacheRWM m, MetadataM m)
|
||||
=> SetPermComment -> m EncJSON
|
||||
runSetPermComment defn = do
|
||||
setPermCommentP1 defn
|
||||
setPermCommentP2 defn
|
||||
runSetPermComment (SetPermComment source table role permType comment) = do
|
||||
tableInfo <- askTabInfo source table
|
||||
|
||||
setPermCommentTx
|
||||
:: SetPermComment
|
||||
-> Q.TxE QErr ()
|
||||
setPermCommentTx (SetPermComment (QualifiedObject sn tn) rn pt comment) =
|
||||
Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
UPDATE hdb_catalog.hdb_permission
|
||||
SET comment = $1
|
||||
WHERE table_schema = $2
|
||||
AND table_name = $3
|
||||
AND role_name = $4
|
||||
AND perm_type = $5
|
||||
|] (comment, sn, tn, rn, permTypeToCode pt) True
|
||||
-- assert permission exists and return appropriate permission modifier
|
||||
permModifier <- case permType of
|
||||
PTInsert -> do
|
||||
assertPermDefined role PAInsert tableInfo
|
||||
pure $ tmInsertPermissions.ix role.pdComment .~ comment
|
||||
PTSelect -> do
|
||||
assertPermDefined role PASelect tableInfo
|
||||
pure $ tmSelectPermissions.ix role.pdComment .~ comment
|
||||
PTUpdate -> do
|
||||
assertPermDefined role PAUpdate tableInfo
|
||||
pure $ tmUpdatePermissions.ix role.pdComment .~ comment
|
||||
PTDelete -> do
|
||||
assertPermDefined role PADelete tableInfo
|
||||
pure $ tmDeletePermissions.ix role.pdComment .~ comment
|
||||
|
||||
fetchPermDef
|
||||
:: QualifiedTable
|
||||
-> RoleName
|
||||
-> PermType
|
||||
-> Q.TxE QErr (Value, Maybe Text)
|
||||
fetchPermDef (QualifiedObject sn tn) rn pt =
|
||||
first Q.getAltJ . Q.getRow <$> Q.withQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
SELECT perm_def::json, comment
|
||||
FROM hdb_catalog.hdb_permission
|
||||
WHERE table_schema = $1
|
||||
AND table_name = $2
|
||||
AND role_name = $3
|
||||
AND perm_type = $4
|
||||
|] (sn, tn, rn, permTypeToCode pt) True
|
||||
let metadataObject = MOSourceObjId source $
|
||||
SMOTableObj table $ MTOPerm role permType
|
||||
buildSchemaCacheFor metadataObject
|
||||
$ MetadataModifier
|
||||
$ tableMetadataSetter source table %~ permModifier
|
||||
pure successMsg
|
||||
|
@ -20,9 +20,9 @@ import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Backends.Postgres.Translate.BoolExp
|
||||
import Hasura.Backends.Postgres.Translate.Column
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.SQL.Types
|
||||
import Hasura.Server.Utils
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
|
||||
|
||||
convColSpec :: FieldInfoMap (FieldInfo 'Postgres) -> PermColSpec -> [PGCol]
|
||||
@ -125,13 +125,14 @@ data CreatePermP1Res a
|
||||
|
||||
procBoolExp
|
||||
:: (QErrM m, TableCoreInfoRM 'Postgres m)
|
||||
=> QualifiedTable
|
||||
=> SourceName
|
||||
-> QualifiedTable
|
||||
-> FieldInfoMap (FieldInfo 'Postgres)
|
||||
-> BoolExp 'Postgres
|
||||
-> m (AnnBoolExpPartialSQL 'Postgres, [SchemaDependency])
|
||||
procBoolExp tn fieldInfoMap be = do
|
||||
procBoolExp source tn fieldInfoMap be = do
|
||||
abe <- annBoolExp valueParser fieldInfoMap $ unBoolExp be
|
||||
let deps = getBoolExpDeps tn abe
|
||||
let deps = getBoolExpDeps source tn abe
|
||||
return (abe, deps)
|
||||
|
||||
isReqUserId :: Text -> Bool
|
||||
@ -198,10 +199,18 @@ injectDefaults qv qt =
|
||||
|
||||
data DropPerm a
|
||||
= DropPerm
|
||||
{ dipTable :: !QualifiedTable
|
||||
, dipRole :: !RoleName
|
||||
{ dipSource :: !SourceName
|
||||
, dipTable :: !QualifiedTable
|
||||
, dipRole :: !RoleName
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveJSON (aesonDrop 3 snakeCase){omitNothingFields=True} ''DropPerm)
|
||||
$(deriveToJSON (aesonDrop 3 snakeCase){omitNothingFields=True} ''DropPerm)
|
||||
|
||||
instance FromJSON (DropPerm a) where
|
||||
parseJSON = withObject "DropPerm" $ \o ->
|
||||
DropPerm
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "table"
|
||||
<*> o .: "role"
|
||||
|
||||
type family PermInfo a = r | r -> a
|
||||
|
@ -21,7 +21,7 @@ import Data.Aeson.Types
|
||||
import Data.Text.Extended
|
||||
import Data.Tuple (swap)
|
||||
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Backends.Postgres.SQL.Types hiding (TableName)
|
||||
import Hasura.EncJSON
|
||||
import Hasura.RQL.DDL.Deps
|
||||
import Hasura.RQL.DDL.Permission
|
||||
@ -30,15 +30,16 @@ import Hasura.RQL.Types
|
||||
runCreateRelationship
|
||||
:: (MonadError QErr m, CacheRWM m, ToJSON a, MetadataM m)
|
||||
=> RelType -> WithTable (RelDef a) -> m EncJSON
|
||||
runCreateRelationship relType (WithTable tableName relDef) = do
|
||||
runCreateRelationship relType (WithTable source tableName relDef) = do
|
||||
let relName = _rdName relDef
|
||||
-- Check if any field with relationship name already exists in the table
|
||||
tableFields <- _tciFieldInfoMap <$> askTableCoreInfo tableName
|
||||
tableFields <- _tciFieldInfoMap <$> askTableCoreInfo source tableName
|
||||
onJust (HM.lookup (fromRel relName) tableFields) $ const $
|
||||
throw400 AlreadyExists $
|
||||
"field with name " <> relName <<> " already exists in table " <>> tableName
|
||||
let comment = _rdComment relDef
|
||||
metadataObj = MOTableObj tableName $ MTORel relName relType
|
||||
metadataObj = MOSourceObjId source $
|
||||
SMOTableObj tableName $ MTORel relName relType
|
||||
addRelationshipToMetadata <- case relType of
|
||||
ObjRel -> do
|
||||
value <- decodeValue $ toJSON $ _rdUsing relDef
|
||||
@ -49,24 +50,24 @@ runCreateRelationship relType (WithTable tableName relDef) = do
|
||||
|
||||
buildSchemaCacheFor metadataObj
|
||||
$ MetadataModifier
|
||||
$ metaTables.ix tableName %~ addRelationshipToMetadata
|
||||
$ tableMetadataSetter source tableName %~ addRelationshipToMetadata
|
||||
pure successMsg
|
||||
|
||||
runDropRel :: (MonadError QErr m, CacheRWM m, MetadataM m) => DropRel -> m EncJSON
|
||||
runDropRel (DropRel qt rn cascade) = do
|
||||
runDropRel (DropRel source qt rn cascade) = do
|
||||
depObjs <- collectDependencies
|
||||
withNewInconsistentObjsCheck do
|
||||
metadataModifiers <- traverse purgeRelDep depObjs
|
||||
buildSchemaCache $ MetadataModifier $
|
||||
metaTables.ix qt %~
|
||||
tableMetadataSetter source qt %~
|
||||
dropRelationshipInMetadata rn . foldr (.) id metadataModifiers
|
||||
pure successMsg
|
||||
where
|
||||
collectDependencies = do
|
||||
tabInfo <- askTableCoreInfo qt
|
||||
tabInfo <- askTableCoreInfo source qt
|
||||
void $ askRelType (_tciFieldInfoMap tabInfo) rn ""
|
||||
sc <- askSchemaCache
|
||||
let depObjs = getDependentObjs sc (SOTableObj qt $ TORel rn)
|
||||
let depObjs = getDependentObjs sc (SOSourceObj source $ SOITableObj qt $ TORel rn)
|
||||
when (depObjs /= [] && not cascade) $ reportDeps depObjs
|
||||
pure depObjs
|
||||
|
||||
@ -81,26 +82,27 @@ dropRelationshipInMetadata relName =
|
||||
|
||||
objRelP2Setup
|
||||
:: (QErrM m)
|
||||
=> QualifiedTable
|
||||
=> SourceName
|
||||
-> TableName 'Postgres
|
||||
-> HashSet (ForeignKey 'Postgres)
|
||||
-> RelDef ObjRelUsing
|
||||
-> m (RelInfo 'Postgres, [SchemaDependency])
|
||||
objRelP2Setup qt foreignKeys (RelDef rn ru _) = case ru of
|
||||
objRelP2Setup source qt foreignKeys (RelDef rn ru _) = case ru of
|
||||
RUManual rm -> do
|
||||
let refqt = rmTable rm
|
||||
(lCols, rCols) = unzip $ HM.toList $ rmColumns rm
|
||||
mkDependency tableName reason col = SchemaDependency (SOTableObj tableName $ TOCol col) reason
|
||||
mkDependency tableName reason col = SchemaDependency (SOSourceObj source $ SOITableObj tableName $ TOCol col) reason
|
||||
dependencies = map (mkDependency qt DRLeftColumn) lCols
|
||||
<> map (mkDependency refqt DRRightColumn) rCols
|
||||
pure (RelInfo rn ObjRel (rmColumns rm) refqt True True, dependencies)
|
||||
RUFKeyOn columnName -> do
|
||||
ForeignKey constraint foreignTable colMap <- getRequiredFkey columnName (HS.toList foreignKeys)
|
||||
let dependencies =
|
||||
[ SchemaDependency (SOTableObj qt $ TOForeignKey (_cName constraint)) DRFkey
|
||||
, SchemaDependency (SOTableObj qt $ TOCol columnName) DRUsingColumn
|
||||
[ SchemaDependency (SOSourceObj source $ SOITableObj qt $ TOForeignKey (_cName constraint)) DRFkey
|
||||
, SchemaDependency (SOSourceObj source $ SOITableObj qt $ TOCol columnName) DRUsingColumn
|
||||
-- this needs to be added explicitly to handle the remote table being untracked. In this case,
|
||||
-- neither the using_col nor the constraint name will help.
|
||||
, SchemaDependency (SOTable foreignTable) DRRemoteTable
|
||||
, SchemaDependency (SOSourceObj source $ SOITable foreignTable) DRRemoteTable
|
||||
]
|
||||
-- TODO(PDV?): this is too optimistic. Some object relationships are nullable, but
|
||||
-- we are marking some as non-nullable here. This should really be done by
|
||||
@ -110,26 +112,27 @@ objRelP2Setup qt foreignKeys (RelDef rn ru _) = case ru of
|
||||
arrRelP2Setup
|
||||
:: (QErrM m)
|
||||
=> HashMap QualifiedTable (HashSet (ForeignKey 'Postgres))
|
||||
-> SourceName
|
||||
-> QualifiedTable
|
||||
-> ArrRelDef
|
||||
-> m (RelInfo 'Postgres, [SchemaDependency])
|
||||
arrRelP2Setup foreignKeys qt (RelDef rn ru _) = case ru of
|
||||
arrRelP2Setup foreignKeys source qt (RelDef rn ru _) = case ru of
|
||||
RUManual rm -> do
|
||||
let refqt = rmTable rm
|
||||
(lCols, rCols) = unzip $ HM.toList $ rmColumns rm
|
||||
deps = map (\c -> SchemaDependency (SOTableObj qt $ TOCol c) DRLeftColumn) lCols
|
||||
<> map (\c -> SchemaDependency (SOTableObj refqt $ TOCol c) DRRightColumn) rCols
|
||||
deps = map (\c -> SchemaDependency (SOSourceObj source $ SOITableObj qt $ TOCol c) DRLeftColumn) lCols
|
||||
<> map (\c -> SchemaDependency (SOSourceObj source $ SOITableObj refqt $ TOCol c) DRRightColumn) rCols
|
||||
pure (RelInfo rn ArrRel (rmColumns rm) refqt True True, deps)
|
||||
RUFKeyOn (ArrRelUsingFKeyOn refqt refCol) -> do
|
||||
foreignTableForeignKeys <- getTableInfo refqt foreignKeys
|
||||
let keysThatReferenceUs = filter ((== qt) . _fkForeignTable) (HS.toList foreignTableForeignKeys)
|
||||
ForeignKey constraint _ colMap <- getRequiredFkey refCol keysThatReferenceUs
|
||||
let deps = [ SchemaDependency (SOTableObj refqt $ TOForeignKey (_cName constraint)) DRRemoteFkey
|
||||
, SchemaDependency (SOTableObj refqt $ TOCol refCol) DRUsingColumn
|
||||
let deps = [ SchemaDependency (SOSourceObj source $ SOITableObj refqt $ TOForeignKey (_cName constraint)) DRRemoteFkey
|
||||
, SchemaDependency (SOSourceObj source $ SOITableObj refqt $ TOCol refCol) DRUsingColumn
|
||||
-- we don't need to necessarily track the remote table like we did in
|
||||
-- case of obj relationships as the remote table is indirectly
|
||||
-- tracked by tracking the constraint name and 'using_col'
|
||||
, SchemaDependency (SOTable refqt) DRRemoteTable
|
||||
, SchemaDependency (SOSourceObj source $ SOITable refqt) DRRemoteTable
|
||||
]
|
||||
mapping = HM.fromList $ map swap $ HM.toList colMap
|
||||
pure (RelInfo rn ArrRel mapping refqt False False, deps)
|
||||
@ -137,7 +140,7 @@ arrRelP2Setup foreignKeys qt (RelDef rn ru _) = case ru of
|
||||
purgeRelDep
|
||||
:: (QErrM m)
|
||||
=> SchemaObjId -> m (TableMetadata -> TableMetadata)
|
||||
purgeRelDep (SOTableObj _ (TOPerm rn pt)) = pure $ dropPermissionInMetadata rn pt
|
||||
purgeRelDep (SOSourceObj _ (SOITableObj _ (TOPerm rn pt))) = pure $ dropPermissionInMetadata rn pt
|
||||
purgeRelDep d = throw500 $ "unexpected dependency of relationship : "
|
||||
<> reportSchemaObj d
|
||||
|
||||
@ -145,17 +148,17 @@ runSetRelComment
|
||||
:: (CacheRWM m, MonadError QErr m, MetadataM m)
|
||||
=> SetRelComment -> m EncJSON
|
||||
runSetRelComment defn = do
|
||||
tabInfo <- askTableCoreInfo qt
|
||||
tabInfo <- askTableCoreInfo source qt
|
||||
relType <- riType <$> askRelType (_tciFieldInfoMap tabInfo) rn ""
|
||||
let metadataObj = MOTableObj qt $ MTORel rn relType
|
||||
let metadataObj = MOSourceObjId source $ SMOTableObj qt $ MTORel rn relType
|
||||
buildSchemaCacheFor metadataObj
|
||||
$ MetadataModifier
|
||||
$ metaTables.ix qt %~ case relType of
|
||||
$ tableMetadataSetter source qt %~ case relType of
|
||||
ObjRel -> tmObjectRelationships.ix rn.rdComment .~ comment
|
||||
ArrRel -> tmArrayRelationships.ix rn.rdComment .~ comment
|
||||
pure successMsg
|
||||
where
|
||||
SetRelComment qt rn comment = defn
|
||||
SetRelComment source qt rn comment = defn
|
||||
|
||||
getRequiredFkey
|
||||
:: (QErrM m)
|
||||
|
@ -13,9 +13,9 @@ import qualified Data.HashMap.Strict as Map
|
||||
|
||||
renameRelP2
|
||||
:: (QErrM m, CacheRM m)
|
||||
=> QualifiedTable -> RelName -> RelInfo 'Postgres -> m MetadataModifier
|
||||
renameRelP2 qt newRN relInfo = withNewInconsistentObjsCheck $ do
|
||||
tabInfo <- askTableCoreInfo qt
|
||||
=> SourceName -> QualifiedTable -> RelName -> RelInfo 'Postgres -> m MetadataModifier
|
||||
renameRelP2 source qt newRN relInfo = withNewInconsistentObjsCheck $ do
|
||||
tabInfo <- askTableCoreInfo source qt
|
||||
-- check for conflicts in fieldInfoMap
|
||||
case Map.lookup (fromRel newRN) $ _tciFieldInfoMap tabInfo of
|
||||
Nothing -> return ()
|
||||
@ -24,16 +24,16 @@ renameRelP2 qt newRN relInfo = withNewInconsistentObjsCheck $ do
|
||||
<<> " to " <> newRN <<> " in table " <> qt <<>
|
||||
" as a column/relationship with the name already exists"
|
||||
-- update metadata
|
||||
execWriterT $ renameRelationshipInMetadata qt oldRN (riType relInfo) newRN
|
||||
execWriterT $ renameRelationshipInMetadata source qt oldRN (riType relInfo) newRN
|
||||
where
|
||||
oldRN = riName relInfo
|
||||
|
||||
runRenameRel
|
||||
:: (MonadError QErr m, CacheRWM m, MetadataM m)
|
||||
=> RenameRel -> m EncJSON
|
||||
runRenameRel (RenameRel qt rn newRN) = do
|
||||
tabInfo <- askTableCoreInfo qt
|
||||
runRenameRel (RenameRel source qt rn newRN) = do
|
||||
tabInfo <- askTableCoreInfo source qt
|
||||
ri <- askRelType (_tciFieldInfoMap tabInfo) rn ""
|
||||
withNewInconsistentObjsCheck $
|
||||
renameRelP2 qt newRN ri >>= buildSchemaCache
|
||||
renameRelP2 source qt newRN ri >>= buildSchemaCache
|
||||
pure successMsg
|
||||
|
@ -19,13 +19,14 @@ import Hasura.RQL.Types
|
||||
runCreateRemoteRelationship
|
||||
:: (MonadError QErr m, CacheRWM m, MetadataM m) => RemoteRelationship -> m EncJSON
|
||||
runCreateRemoteRelationship RemoteRelationship{..} = do
|
||||
void $ askTabInfo rtrTable
|
||||
let metadataObj = MOTableObj rtrTable $ MTORemoteRelationship rtrName
|
||||
void $ askTabInfo rtrSource rtrTable
|
||||
let metadataObj = MOSourceObjId rtrSource $
|
||||
SMOTableObj rtrTable $ MTORemoteRelationship rtrName
|
||||
metadata = RemoteRelationshipMetadata rtrName $
|
||||
RemoteRelationshipDef rtrRemoteSchema rtrHasuraFields rtrRemoteField
|
||||
buildSchemaCacheFor metadataObj
|
||||
$ MetadataModifier
|
||||
$ metaTables.ix rtrTable.tmRemoteRelationships
|
||||
$ tableMetadataSetter rtrSource rtrTable.tmRemoteRelationships
|
||||
%~ OMap.insert rtrName metadata
|
||||
pure successMsg
|
||||
|
||||
@ -42,11 +43,12 @@ resolveRemoteRelationship remoteRelationship
|
||||
validateRemoteRelationship remoteRelationship remoteSchemaMap pgColumns
|
||||
remoteField <- onLeft eitherRemoteField $ throw400 RemoteSchemaError . errorToText
|
||||
let table = rtrTable remoteRelationship
|
||||
source = rtrSource remoteRelationship
|
||||
schemaDependencies =
|
||||
let tableDep = SchemaDependency (SOTable table) DRTable
|
||||
let tableDep = SchemaDependency (SOSourceObj source $ SOITable table) DRTable
|
||||
columnsDep =
|
||||
map
|
||||
(flip SchemaDependency DRRemoteRelationship . SOTableObj table . TOCol . pgiColumn)
|
||||
(flip SchemaDependency DRRemoteRelationship . SOSourceObj source . SOITableObj table . TOCol . pgiColumn)
|
||||
$ HS.toList $ _rfiHasuraFields remoteField
|
||||
remoteSchemaDep =
|
||||
SchemaDependency (SORemoteSchema $ rtrRemoteSchema remoteRelationship) DRRemoteSchema
|
||||
@ -56,26 +58,28 @@ resolveRemoteRelationship remoteRelationship
|
||||
|
||||
runUpdateRemoteRelationship :: (MonadError QErr m, CacheRWM m, MetadataM m) => RemoteRelationship -> m EncJSON
|
||||
runUpdateRemoteRelationship RemoteRelationship{..} = do
|
||||
fieldInfoMap <- askFieldInfoMap rtrTable
|
||||
fieldInfoMap <- askFieldInfoMap rtrSource rtrTable
|
||||
void $ askRemoteRel fieldInfoMap rtrName
|
||||
let metadataObj = MOTableObj rtrTable $ MTORemoteRelationship rtrName
|
||||
let metadataObj = MOSourceObjId rtrSource $
|
||||
SMOTableObj rtrTable $ MTORemoteRelationship rtrName
|
||||
metadata = RemoteRelationshipMetadata rtrName $
|
||||
RemoteRelationshipDef rtrRemoteSchema rtrHasuraFields rtrRemoteField
|
||||
buildSchemaCacheFor metadataObj
|
||||
$ MetadataModifier
|
||||
$ metaTables.ix rtrTable.tmRemoteRelationships
|
||||
$ tableMetadataSetter rtrSource rtrTable.tmRemoteRelationships
|
||||
%~ OMap.insert rtrName metadata
|
||||
pure successMsg
|
||||
|
||||
runDeleteRemoteRelationship
|
||||
:: (MonadError QErr m, CacheRWM m, MetadataM m) => DeleteRemoteRelationship -> m EncJSON
|
||||
runDeleteRemoteRelationship (DeleteRemoteRelationship table relName)= do
|
||||
fieldInfoMap <- askFieldInfoMap table
|
||||
runDeleteRemoteRelationship (DeleteRemoteRelationship source table relName)= do
|
||||
fieldInfoMap <- askFieldInfoMap source table
|
||||
void $ askRemoteRel fieldInfoMap relName
|
||||
let metadataObj = MOTableObj table $ MTORemoteRelationship relName
|
||||
let metadataObj = MOSourceObjId source $
|
||||
SMOTableObj table $ MTORemoteRelationship relName
|
||||
buildSchemaCacheFor metadataObj
|
||||
$ MetadataModifier
|
||||
$ metaTables.ix table %~ dropRemoteRelationshipInMetadata relName
|
||||
$ tableMetadataSetter source table %~ dropRemoteRelationshipInMetadata relName
|
||||
pure successMsg
|
||||
|
||||
dropRemoteRelationshipInMetadata
|
||||
|
@ -14,10 +14,10 @@ module Hasura.RQL.DDL.RemoteSchema
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.DDL.RemoteSchema.Permission
|
||||
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.HashSet as S
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.HashSet as S
|
||||
|
||||
import Control.Monad.Unique
|
||||
import Data.Text.Extended
|
||||
@ -26,7 +26,7 @@ import Hasura.EncJSON
|
||||
import Hasura.GraphQL.RemoteServer
|
||||
import Hasura.RQL.DDL.Deps
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
|
||||
import Hasura.Session
|
||||
|
||||
@ -34,7 +34,6 @@ runAddRemoteSchema
|
||||
:: ( HasVersion
|
||||
, QErrM m
|
||||
, CacheRWM m
|
||||
, MonadTx m
|
||||
, MonadIO m
|
||||
, MonadUnique m
|
||||
, HasHttpManager m
|
||||
@ -56,7 +55,6 @@ runAddRemoteSchema env q@(AddRemoteSchemaQuery name defn comment) = do
|
||||
runAddRemoteSchemaPermissions
|
||||
:: ( QErrM m
|
||||
, CacheRWM m
|
||||
, MonadTx m
|
||||
, HasRemoteSchemaPermsCtx m
|
||||
, MetadataM m
|
||||
)
|
||||
@ -89,7 +87,6 @@ runAddRemoteSchemaPermissions q = do
|
||||
runDropRemoteSchemaPermissions
|
||||
:: ( QErrM m
|
||||
, CacheRWM m
|
||||
, MonadTx m
|
||||
, MetadataM m
|
||||
)
|
||||
=> DropRemoteSchemaPermissions
|
||||
|
@ -23,7 +23,7 @@ import qualified Data.Time.Clock as C
|
||||
-- be created
|
||||
runCreateCronTrigger
|
||||
:: ( CacheRWM m, MonadIO m
|
||||
, MetadataM m, MonadScheduledEvents m
|
||||
, MetadataM m, MonadMetadataStorageQueryAPI m
|
||||
)
|
||||
=> CreateCronTrigger -> m EncJSON
|
||||
runCreateCronTrigger CreateCronTrigger {..} = do
|
||||
@ -79,7 +79,7 @@ updateCronTrigger
|
||||
:: ( CacheRWM m
|
||||
, MonadIO m
|
||||
, MetadataM m
|
||||
, MonadScheduledEvents m
|
||||
, MonadMetadataStorageQueryAPI m
|
||||
)
|
||||
=> CronTriggerMetadata -> m EncJSON
|
||||
updateCronTrigger cronTriggerMetadata = do
|
||||
@ -97,7 +97,7 @@ updateCronTrigger cronTriggerMetadata = do
|
||||
runDeleteCronTrigger
|
||||
:: ( CacheRWM m
|
||||
, MetadataM m
|
||||
, MonadScheduledEvents m
|
||||
, MonadMetadataStorageQueryAPI m
|
||||
)
|
||||
=> ScheduledTriggerName -> m EncJSON
|
||||
runDeleteCronTrigger (ScheduledTriggerName stName) = do
|
||||
@ -113,7 +113,7 @@ dropCronTriggerInMetadata name =
|
||||
MetadataModifier $ metaCronTriggers %~ OMap.delete name
|
||||
|
||||
runCreateScheduledEvent
|
||||
:: (MonadScheduledEvents m) => CreateScheduledEvent -> m EncJSON
|
||||
:: (MonadMetadataStorageQueryAPI m) => CreateScheduledEvent -> m EncJSON
|
||||
runCreateScheduledEvent =
|
||||
(createScheduledEvent . SESOneOff) >=> \() -> pure successMsg
|
||||
|
||||
|
@ -42,6 +42,7 @@ import qualified Database.PG.Query as Q
|
||||
import qualified Database.PostgreSQL.LibPQ as PQ
|
||||
import qualified Text.Regex.TDFA as TDFA
|
||||
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
@ -102,13 +103,16 @@ isSchemaCacheBuildRequiredRunSQL RunSQL {..} =
|
||||
{ TDFA.captureGroups = False }
|
||||
"\\balter\\b|\\bdrop\\b|\\breplace\\b|\\bcreate function\\b|\\bcomment on\\b")
|
||||
|
||||
runRunSQL :: (MonadTx m, CacheRWM m, HasSQLGenCtx m, MetadataM m) => RunSQL -> m EncJSON
|
||||
runRunSQL q@RunSQL {..}
|
||||
runRunSQL :: (MonadIO m, MonadBaseControl IO m, MonadError QErr m, CacheRWM m, HasSQLGenCtx m, MetadataM m)
|
||||
=> SourceName -> RunSQL -> m EncJSON
|
||||
runRunSQL source q@RunSQL {..}
|
||||
-- see Note [Checking metadata consistency in run_sql]
|
||||
| isSchemaCacheBuildRequiredRunSQL q
|
||||
= withMetadataCheck rCascade $ execRawSQL rSql
|
||||
= withMetadataCheck source rCascade rTxAccessMode $ execRawSQL rSql
|
||||
| otherwise
|
||||
= execRawSQL rSql
|
||||
= (_pcConfiguration <$> askPGSourceCache source) >>= \sourceConfig ->
|
||||
liftEitherM $ runExceptT $
|
||||
runLazyTx (_pscExecCtx sourceConfig) rTxAccessMode $ execRawSQL rSql
|
||||
where
|
||||
execRawSQL :: (MonadTx m) => Text -> m EncJSON
|
||||
execRawSQL =
|
||||
|
@ -1,5 +1,6 @@
|
||||
{-# LANGUAGE Arrows #-}
|
||||
{-# LANGUAGE OverloadedLabels #-}
|
||||
{-# LANGUAGE Arrows #-}
|
||||
{-# LANGUAGE OverloadedLabels #-}
|
||||
{-# LANGUAGE UndecidableInstances #-}
|
||||
|
||||
{-| Top-level functions concerned specifically with operations on the schema cache, such as
|
||||
rebuilding it from the catalog and incorporating schema changes. See the module documentation for
|
||||
@ -25,9 +26,11 @@ import qualified Data.HashMap.Strict.Extended as M
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.HashSet as HS
|
||||
import qualified Data.HashSet.InsOrd as HSIns
|
||||
import qualified Database.PG.Query as Q
|
||||
|
||||
import Control.Arrow.Extended
|
||||
import Control.Lens hiding ((.=))
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Control.Monad.Unique
|
||||
import Data.Aeson
|
||||
import Data.Text.Extended
|
||||
@ -44,7 +47,7 @@ import Hasura.RQL.DDL.CustomTypes
|
||||
import Hasura.RQL.DDL.Deps
|
||||
import Hasura.RQL.DDL.EventTrigger
|
||||
import Hasura.RQL.DDL.RemoteSchema
|
||||
import Hasura.RQL.DDL.RemoteSchema.Permission (resolveRoleBasedRemoteSchema)
|
||||
import Hasura.RQL.DDL.RemoteSchema.Permission (resolveRoleBasedRemoteSchema)
|
||||
import Hasura.RQL.DDL.ScheduledTrigger
|
||||
import Hasura.RQL.DDL.Schema.Cache.Common
|
||||
import Hasura.RQL.DDL.Schema.Cache.Dependencies
|
||||
@ -53,6 +56,7 @@ import Hasura.RQL.DDL.Schema.Cache.Permission
|
||||
import Hasura.RQL.DDL.Schema.Common
|
||||
import Hasura.RQL.DDL.Schema.Diff
|
||||
import Hasura.RQL.DDL.Schema.Function
|
||||
import Hasura.RQL.DDL.Schema.Source
|
||||
import Hasura.RQL.DDL.Schema.Table
|
||||
import Hasura.RQL.Types hiding (fmFunction, tmTable)
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
@ -60,18 +64,12 @@ import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.Session
|
||||
|
||||
buildRebuildableSchemaCache
|
||||
:: ( HasVersion
|
||||
, MonadIO m
|
||||
, MonadTx m
|
||||
, HasHttpManager m
|
||||
, HasSQLGenCtx m
|
||||
, HasRemoteSchemaPermsCtx m
|
||||
)
|
||||
:: (HasVersion)
|
||||
=> Env.Environment
|
||||
-> Metadata
|
||||
-> m RebuildableSchemaCache
|
||||
-> CacheBuild RebuildableSchemaCache
|
||||
buildRebuildableSchemaCache env metadata = do
|
||||
result <- runCacheBuild $ flip runReaderT CatalogSync $
|
||||
result <- flip runReaderT CatalogSync $
|
||||
Inc.build (buildSchemaCacheRule env) (metadata, initialInvalidationKeys)
|
||||
pure $ RebuildableSchemaCache (Inc.result result) initialInvalidationKeys (Inc.rebuildRule result)
|
||||
|
||||
@ -83,7 +81,10 @@ newtype CacheRWT m a
|
||||
deriving
|
||||
( Functor, Applicative, Monad, MonadIO, MonadUnique, MonadReader r, MonadError e, MonadTx
|
||||
, UserInfoM, HasHttpManager, HasSQLGenCtx, HasSystemDefined, MonadMetadataStorage
|
||||
, HasRemoteSchemaPermsCtx, MonadScheduledEvents)
|
||||
, MonadMetadataStorageQueryAPI, HasRemoteSchemaPermsCtx)
|
||||
|
||||
deriving instance (MonadBase IO m) => MonadBase IO (CacheRWT m)
|
||||
deriving instance (MonadBaseControl IO m) => MonadBaseControl IO (CacheRWT m)
|
||||
|
||||
runCacheRWT
|
||||
:: Functor m
|
||||
@ -94,16 +95,16 @@ runCacheRWT cache (CacheRWT m) =
|
||||
instance MonadTrans CacheRWT where
|
||||
lift = CacheRWT . lift
|
||||
|
||||
instance (Monad m) => TableCoreInfoRM 'Postgres (CacheRWT m)
|
||||
instance (Monad m) => CacheRM (CacheRWT m) where
|
||||
askSchemaCache = CacheRWT $ gets (lastBuiltSchemaCache . fst)
|
||||
askSchemaCache = CacheRWT $ gets (lastBuiltSchemaCache . (^. _1))
|
||||
|
||||
instance (MonadIO m, MonadTx m, HasHttpManager m, HasSQLGenCtx m, HasRemoteSchemaPermsCtx m) => CacheRWM (CacheRWT m) where
|
||||
instance (MonadIO m, MonadError QErr m, HasHttpManager m, HasSQLGenCtx m
|
||||
, HasRemoteSchemaPermsCtx m, MonadResolveSource m) => CacheRWM (CacheRWT m) where
|
||||
buildSchemaCacheWithOptions buildReason invalidations metadata = CacheRWT do
|
||||
(RebuildableSchemaCache _ invalidationKeys rule, oldInvalidations) <- get
|
||||
let newInvalidationKeys = invalidateKeys invalidations invalidationKeys
|
||||
result <- lift $ runCacheBuild $ flip runReaderT buildReason $
|
||||
Inc.build rule (metadata, newInvalidationKeys)
|
||||
result <- lift $ runCacheBuildM $ flip runReaderT buildReason $
|
||||
Inc.build rule (metadata, newInvalidationKeys)
|
||||
let schemaCache = Inc.result result
|
||||
prunedInvalidationKeys = pruneInvalidationKeys schemaCache newInvalidationKeys
|
||||
!newCache = RebuildableSchemaCache schemaCache prunedInvalidationKeys (Inc.rebuildRule result)
|
||||
@ -120,8 +121,8 @@ buildSchemaCacheRule
|
||||
-- Note: by supplying BuildReason via MonadReader, it does not participate in caching, which is
|
||||
-- what we want!
|
||||
:: ( HasVersion, ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
||||
, MonadIO m, MonadUnique m, MonadTx m
|
||||
, MonadReader BuildReason m, HasHttpManager m, HasSQLGenCtx m, HasRemoteSchemaPermsCtx m)
|
||||
, MonadIO m, MonadUnique m, MonadBaseControl IO m, MonadError QErr m
|
||||
, MonadReader BuildReason m, HasHttpManager m, HasSQLGenCtx m , HasRemoteSchemaPermsCtx m, MonadResolveSource m)
|
||||
=> Env.Environment
|
||||
-> (Metadata, InvalidationKeys) `arr` SchemaCache
|
||||
buildSchemaCacheRule env = proc (metadata, invalidationKeys) -> do
|
||||
@ -139,8 +140,7 @@ buildSchemaCacheRule env = proc (metadata, invalidationKeys) -> do
|
||||
-- Step 3: Build the GraphQL schema.
|
||||
(gqlContext, gqlSchemaInconsistentObjects) <- runWriterA buildGQLContext -<
|
||||
( QueryHasura
|
||||
, _boTables resolvedOutputs
|
||||
, _boFunctions resolvedOutputs
|
||||
, _boSources resolvedOutputs
|
||||
, _boRemoteSchemas resolvedOutputs
|
||||
, _boActions resolvedOutputs
|
||||
, _actNonObjects $ _boCustomTypes resolvedOutputs
|
||||
@ -149,17 +149,15 @@ buildSchemaCacheRule env = proc (metadata, invalidationKeys) -> do
|
||||
-- Step 4: Build the relay GraphQL schema
|
||||
(relayContext, relaySchemaInconsistentObjects) <- runWriterA buildGQLContext -<
|
||||
( QueryRelay
|
||||
, _boTables resolvedOutputs
|
||||
, _boFunctions resolvedOutputs
|
||||
, _boSources resolvedOutputs
|
||||
, _boRemoteSchemas resolvedOutputs
|
||||
, _boActions resolvedOutputs
|
||||
, _actNonObjects $ _boCustomTypes resolvedOutputs
|
||||
)
|
||||
|
||||
returnA -< SchemaCache
|
||||
{ scTables = _boTables resolvedOutputs
|
||||
{ scPostgres = _boSources resolvedOutputs
|
||||
, scActions = _boActions resolvedOutputs
|
||||
, scFunctions = _boFunctions resolvedOutputs
|
||||
-- TODO this is not the right value: we should track what part of the schema
|
||||
-- we can stitch without consistencies, I think.
|
||||
, scRemoteSchemas = fmap fst (_boRemoteSchemas resolvedOutputs) -- remoteSchemaMap
|
||||
@ -180,18 +178,93 @@ buildSchemaCacheRule env = proc (metadata, invalidationKeys) -> do
|
||||
<> toList relaySchemaInconsistentObjects
|
||||
}
|
||||
where
|
||||
buildAndCollectInfo
|
||||
resolveSourceArr
|
||||
:: ( ArrowChoice arr, Inc.ArrowCache m arr
|
||||
, ArrowWriter (Seq CollectedInfo) arr
|
||||
, MonadIO m, MonadBaseControl IO m
|
||||
, MonadResolveSource m
|
||||
)
|
||||
=> SourceMetadata `arr` Maybe ResolvedPGSource
|
||||
resolveSourceArr = proc sourceMetadata -> do
|
||||
let sourceName = _smName sourceMetadata
|
||||
metadataObj = MetadataObject (MOSource sourceName) $ toJSON sourceName
|
||||
(| withRecordInconsistency (
|
||||
liftEitherA <<< bindA -< resolveSource $ _smConfiguration sourceMetadata)
|
||||
|) metadataObj
|
||||
|
||||
buildSource
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
||||
, ArrowWriter (Seq CollectedInfo) arr, MonadIO m, MonadUnique m, MonadTx m, MonadReader BuildReason m
|
||||
, HasHttpManager m, HasSQLGenCtx m )
|
||||
=> (Metadata, Inc.Dependency InvalidationKeys) `arr` BuildOutputs
|
||||
buildAndCollectInfo = proc (metadata, invalidationKeys) -> do
|
||||
let Metadata tables functions remoteSchemas collections allowlists
|
||||
customTypes actions cronTriggers = metadata
|
||||
, ArrowWriter (Seq CollectedInfo) arr, MonadBaseControl IO m
|
||||
, HasSQLGenCtx m, MonadIO m, MonadError QErr m, MonadReader BuildReason m)
|
||||
=> ( SourceMetadata
|
||||
, SourceConfig 'Postgres
|
||||
, DBTablesMetadata 'Postgres
|
||||
, PostgresFunctionsMetadata
|
||||
, RemoteSchemaMap
|
||||
, Inc.Dependency InvalidationKeys
|
||||
) `arr` SourceInfo 'Postgres
|
||||
buildSource = proc (sourceMetadata, sourceConfig, pgTables, pgFunctions, remoteSchemaMap, invalidationKeys) -> do
|
||||
let SourceMetadata source tables functions _ = sourceMetadata
|
||||
(tableInputs, nonColumnInputs, permissions) = unzip3 $ map mkTableInputs $ OMap.elems tables
|
||||
eventTriggers = map (_tmTable &&& (OMap.elems . _tmEventTriggers)) (OMap.elems tables)
|
||||
-- HashMap k a -> HashMap k b -> HashMap k (a, b)
|
||||
alignTableMap = M.intersectionWith (,)
|
||||
|
||||
-- tables
|
||||
tableRawInfos <- buildTableCache -< ( source, sourceConfig, pgTables
|
||||
, tableInputs, Inc.selectD #_ikMetadata invalidationKeys
|
||||
)
|
||||
|
||||
-- relationships and computed fields
|
||||
let nonColumnsByTable = mapFromL _nctiTable nonColumnInputs
|
||||
tableCoreInfos <-
|
||||
(| Inc.keyed (\_ (tableRawInfo, nonColumnInput) -> do
|
||||
let columns = _tciFieldInfoMap tableRawInfo
|
||||
allFields <- addNonColumnFields -< (source, tableRawInfos, columns, remoteSchemaMap, pgFunctions, nonColumnInput)
|
||||
returnA -< (tableRawInfo {_tciFieldInfoMap = allFields}))
|
||||
|) (tableRawInfos `alignTableMap` nonColumnsByTable)
|
||||
|
||||
tableCoreInfosDep <- Inc.newDependency -< tableCoreInfos
|
||||
-- permissions and event triggers
|
||||
tableCache <-
|
||||
(| Inc.keyed (\_ ((tableCoreInfo, permissionInputs), (_, eventTriggerConfs)) -> do
|
||||
let tableFields = _tciFieldInfoMap tableCoreInfo
|
||||
permissionInfos <- buildTablePermissions -< (source, tableCoreInfosDep, tableFields, permissionInputs)
|
||||
eventTriggerInfos <- buildTableEventTriggers -< (source, sourceConfig, tableCoreInfo, eventTriggerConfs)
|
||||
returnA -< TableInfo tableCoreInfo permissionInfos eventTriggerInfos
|
||||
)
|
||||
|) (tableCoreInfos `alignTableMap` mapFromL _tpiTable permissions `alignTableMap` mapFromL fst eventTriggers)
|
||||
|
||||
-- sql functions
|
||||
functionCache <- (mapFromL _fmFunction (OMap.elems functions) >- returnA)
|
||||
>-> (| Inc.keyed (\_ (FunctionMetadata qf config) -> do
|
||||
let systemDefined = SystemDefined False
|
||||
definition = toJSON $ TrackFunction qf
|
||||
metadataObject = MetadataObject (MOSourceObjId source $ SMOFunction qf) definition
|
||||
schemaObject = SOSourceObj source $ SOIFunction qf
|
||||
addFunctionContext e = "in function " <> qf <<> ": " <> e
|
||||
(| withRecordInconsistency (
|
||||
(| modifyErrA (do
|
||||
let funcDefs = fromMaybe [] $ M.lookup qf pgFunctions
|
||||
rawfi <- bindErrorA -< handleMultipleFunctions qf funcDefs
|
||||
(fi, dep) <- bindErrorA -< mkFunctionInfo source qf systemDefined config rawfi
|
||||
recordDependencies -< (metadataObject, schemaObject, [dep])
|
||||
returnA -< fi)
|
||||
|) addFunctionContext)
|
||||
|) metadataObject) |)
|
||||
>-> (\infos -> M.catMaybes infos >- returnA)
|
||||
|
||||
returnA -< SourceInfo source tableCache functionCache sourceConfig
|
||||
|
||||
buildAndCollectInfo
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
||||
, ArrowWriter (Seq CollectedInfo) arr, MonadIO m, MonadUnique m, MonadError QErr m
|
||||
, MonadReader BuildReason m, MonadBaseControl IO m
|
||||
, HasHttpManager m, HasSQLGenCtx m, MonadResolveSource m)
|
||||
=> (Metadata, Inc.Dependency InvalidationKeys) `arr` BuildOutputs
|
||||
buildAndCollectInfo = proc (metadata, invalidationKeys) -> do
|
||||
let Metadata sources remoteSchemas collections allowlists
|
||||
customTypes actions cronTriggers = metadata
|
||||
remoteSchemaPermissions =
|
||||
let remoteSchemaPermsList = OMap.toList $ _rsmPermissions <$> remoteSchemas
|
||||
in concat $ flip map remoteSchemaPermsList $
|
||||
@ -200,13 +273,6 @@ buildSchemaCacheRule env = proc (metadata, invalidationKeys) -> do
|
||||
AddRemoteSchemaPermissions remoteSchemaName role defn comment
|
||||
)
|
||||
|
||||
pgTables <- bindA -< fetchTableMetadata
|
||||
pgFunctions <- bindA -< fetchFunctionMetadata
|
||||
pgScalars <- bindA -< fetchPgScalars
|
||||
|
||||
-- tables
|
||||
tableRawInfos <- buildTableCache -< (pgTables, tableInputs, Inc.selectD #_ikMetadata invalidationKeys)
|
||||
|
||||
-- remote schemas
|
||||
let remoteSchemaInvalidationKeys = Inc.selectD #_ikRemoteSchemas invalidationKeys
|
||||
remoteSchemaMap <- buildRemoteSchemas -< (remoteSchemaInvalidationKeys, (OMap.elems remoteSchemas))
|
||||
@ -225,43 +291,18 @@ buildSchemaCacheRule env = proc (metadata, invalidationKeys) -> do
|
||||
)
|
||||
|)
|
||||
|
||||
-- relationships and computed fields
|
||||
let nonColumnsByTable = mapFromL _nctiTable nonColumnInputs
|
||||
tableCoreInfos <-
|
||||
(| Inc.keyed (\_ (tableRawInfo, nonColumnInput) -> do
|
||||
let columns = _tciFieldInfoMap tableRawInfo
|
||||
allFields <- addNonColumnFields -< (tableRawInfos, columns, M.map fst remoteSchemaMap, pgFunctions, nonColumnInput)
|
||||
returnA -< (tableRawInfo {_tciFieldInfoMap = allFields}))
|
||||
|) (tableRawInfos `alignTableMap` nonColumnsByTable)
|
||||
|
||||
-- permissions and event triggers
|
||||
tableCoreInfosDep <- Inc.newDependency -< tableCoreInfos
|
||||
tableCache <-
|
||||
(| Inc.keyed (\_ ((tableCoreInfo, permissionInputs), (_, eventTriggerConfs)) -> do
|
||||
let tableFields = _tciFieldInfoMap tableCoreInfo
|
||||
permissionInfos <- buildTablePermissions -< (tableCoreInfosDep, tableFields, permissionInputs)
|
||||
eventTriggerInfos <- buildTableEventTriggers -< (tableCoreInfo, eventTriggerConfs)
|
||||
returnA -< TableInfo tableCoreInfo permissionInfos eventTriggerInfos
|
||||
)
|
||||
|) (tableCoreInfos `alignTableMap` mapFromL _tpiTable permissions `alignTableMap` mapFromL fst eventTriggers)
|
||||
|
||||
-- sql functions
|
||||
functionCache <- (mapFromL _fmFunction (OMap.elems functions) >- returnA)
|
||||
>-> (| Inc.keyed (\_ (FunctionMetadata qf config) -> do
|
||||
let systemDefined = SystemDefined False
|
||||
definition = toJSON $ TrackFunction qf
|
||||
metadataObject = MetadataObject (MOFunction qf) definition
|
||||
schemaObject = SOFunction qf
|
||||
addFunctionContext e = "in function " <> qf <<> ": " <> e
|
||||
(| withRecordInconsistency (
|
||||
(| modifyErrA (do
|
||||
let funcDefs = fromMaybe [] $ M.lookup qf pgFunctions
|
||||
rawfi <- bindErrorA -< handleMultipleFunctions qf funcDefs
|
||||
(fi, dep) <- bindErrorA -< mkFunctionInfo qf systemDefined config rawfi
|
||||
recordDependencies -< (metadataObject, schemaObject, [dep])
|
||||
returnA -< fi)
|
||||
|) addFunctionContext)
|
||||
|) metadataObject) |)
|
||||
sourcesOutput <-
|
||||
(| Inc.keyed (\_ sourceMetadata -> do
|
||||
maybeResolvedSource <- resolveSourceArr -< sourceMetadata
|
||||
case maybeResolvedSource of
|
||||
Nothing -> returnA -< Nothing
|
||||
Just (ResolvedPGSource pgSourceConfig tablesMeta functionsMeta pgScalars) -> do
|
||||
so <- buildSource -< ( sourceMetadata, pgSourceConfig, tablesMeta, functionsMeta
|
||||
, M.map fst remoteSchemaMap, invalidationKeys
|
||||
)
|
||||
returnA -< Just (so, pgScalars))
|
||||
|) (M.fromList $ OMap.toList sources)
|
||||
>-> (\infos -> M.catMaybes infos >- returnA)
|
||||
|
||||
-- allow list
|
||||
@ -274,9 +315,11 @@ buildSchemaCacheRule env = proc (metadata, invalidationKeys) -> do
|
||||
& HS.fromList
|
||||
|
||||
-- custom types
|
||||
let pgScalars = mconcat $ map snd $ M.elems sourcesOutput
|
||||
sourcesCache = M.map fst sourcesOutput
|
||||
maybeResolvedCustomTypes <-
|
||||
(| withRecordInconsistency
|
||||
(bindErrorA -< resolveCustomTypes tableCache customTypes pgScalars)
|
||||
(bindErrorA -< resolveCustomTypes sourcesCache customTypes pgScalars)
|
||||
|) (MetadataObject MOCustomTypes $ toJSON customTypes)
|
||||
|
||||
-- -- actions
|
||||
@ -296,17 +339,17 @@ buildSchemaCacheRule env = proc (metadata, invalidationKeys) -> do
|
||||
cronTriggersMap <- buildCronTriggers -< ((), OMap.elems cronTriggers)
|
||||
|
||||
returnA -< BuildOutputs
|
||||
{ _boTables = tableCache
|
||||
{ _boSources = M.map fst sourcesOutput
|
||||
, _boActions = actionCache
|
||||
, _boFunctions = functionCache
|
||||
, _boRemoteSchemas = remoteSchemaCache
|
||||
, _boAllowlist = allowList
|
||||
, _boCustomTypes = annotatedCustomTypes
|
||||
, _boCronTriggers = cronTriggersMap
|
||||
}
|
||||
|
||||
mkEventTriggerMetadataObject (table, eventTriggerConf) =
|
||||
let objectId = MOTableObj table $ MTOTrigger $ etcName eventTriggerConf
|
||||
mkEventTriggerMetadataObject (source, _, table, eventTriggerConf) =
|
||||
let objectId = MOSourceObjId source $
|
||||
SMOTableObj table $ MTOTrigger $ etcName eventTriggerConf
|
||||
definition = object ["table" .= table, "configuration" .= eventTriggerConf]
|
||||
in MetadataObject objectId definition
|
||||
|
||||
@ -345,7 +388,7 @@ buildSchemaCacheRule env = proc (metadata, invalidationKeys) -> do
|
||||
|
||||
buildRemoteSchemaPermissions
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Inc.ArrowCache m arr, MonadTx m)
|
||||
, Inc.ArrowCache m arr, MonadError QErr m)
|
||||
=> (RemoteSchemaCtx, [AddRemoteSchemaPermissions]) `arr` (M.HashMap RoleName IntrospectionResult)
|
||||
buildRemoteSchemaPermissions = buildInfoMap _arspRole mkRemoteSchemaPermissionMetadataObject buildRemoteSchemaPermission
|
||||
where
|
||||
@ -368,40 +411,42 @@ buildSchemaCacheRule env = proc (metadata, invalidationKeys) -> do
|
||||
|
||||
buildTableEventTriggers
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Inc.ArrowCache m arr, MonadTx m, MonadReader BuildReason m, HasSQLGenCtx m )
|
||||
=> (TableCoreInfo 'Postgres, [EventTriggerConf]) `arr` EventTriggerInfoMap
|
||||
buildTableEventTriggers = proc (tableInfo, eventTriggerConfs) ->
|
||||
buildInfoMap (etcName . snd) mkEventTriggerMetadataObject buildEventTrigger
|
||||
-< (tableInfo, map (_tciName tableInfo,) eventTriggerConfs)
|
||||
, Inc.ArrowCache m arr, MonadIO m, MonadError QErr m, MonadBaseControl IO m
|
||||
, MonadReader BuildReason m, HasSQLGenCtx m)
|
||||
=> (SourceName, SourceConfig 'Postgres, TableCoreInfo 'Postgres, [EventTriggerConf]) `arr` EventTriggerInfoMap
|
||||
buildTableEventTriggers = proc (source, sourceConfig, tableInfo, eventTriggerConfs) ->
|
||||
buildInfoMap (etcName . (^. _4)) mkEventTriggerMetadataObject buildEventTrigger
|
||||
-< (tableInfo, map (source, sourceConfig, _tciName tableInfo,) eventTriggerConfs)
|
||||
where
|
||||
buildEventTrigger = proc (tableInfo, (table, eventTriggerConf)) -> do
|
||||
buildEventTrigger = proc (tableInfo, (source, sourceConfig, table, eventTriggerConf)) -> do
|
||||
let triggerName = etcName eventTriggerConf
|
||||
metadataObject = mkEventTriggerMetadataObject (table, eventTriggerConf)
|
||||
schemaObjectId = SOTableObj table $ TOTrigger triggerName
|
||||
metadataObject = mkEventTriggerMetadataObject (source, sourceConfig, table, eventTriggerConf)
|
||||
schemaObjectId = SOSourceObj source $
|
||||
SOITableObj table $ TOTrigger triggerName
|
||||
addTriggerContext e = "in event trigger " <> triggerName <<> ": " <> e
|
||||
(| withRecordInconsistency (
|
||||
(| modifyErrA (do
|
||||
(info, dependencies) <- bindErrorA -< mkEventTriggerInfo env table eventTriggerConf
|
||||
(info, dependencies) <- bindErrorA -< mkEventTriggerInfo env source table eventTriggerConf
|
||||
let tableColumns = M.mapMaybe (^? _FIColumn) (_tciFieldInfoMap tableInfo)
|
||||
recreateViewIfNeeded -< (table, tableColumns, triggerName, etcDefinition eventTriggerConf)
|
||||
recreateTriggerIfNeeded -< (table, M.elems tableColumns, triggerName, etcDefinition eventTriggerConf, sourceConfig)
|
||||
recordDependencies -< (metadataObject, schemaObjectId, dependencies)
|
||||
returnA -< info)
|
||||
|) (addTableContext table . addTriggerContext))
|
||||
|) metadataObject
|
||||
|
||||
recreateViewIfNeeded = Inc.cache $
|
||||
arrM \(tableName, tableColumns, triggerName, triggerDefinition) -> do
|
||||
recreateTriggerIfNeeded = Inc.cache $
|
||||
arrM \(tableName, tableColumns, triggerName, triggerDefinition, sourceConfig) -> do
|
||||
buildReason <- ask
|
||||
when (buildReason == CatalogUpdate) $ do
|
||||
liftTx $ delTriggerQ triggerName -- executes DROP IF EXISTS.. sql
|
||||
mkAllTriggersQ triggerName tableName (M.elems tableColumns) triggerDefinition
|
||||
when (buildReason == CatalogUpdate) $
|
||||
liftEitherM $ runPgSourceWriteTx sourceConfig $
|
||||
createPostgresTableEventTrigger tableName tableColumns triggerName triggerDefinition
|
||||
|
||||
buildCronTriggers
|
||||
:: ( ArrowChoice arr
|
||||
, Inc.ArrowDistribute arr
|
||||
, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Inc.ArrowCache m arr
|
||||
, MonadTx m)
|
||||
, MonadError QErr m)
|
||||
=> ((),[CronTriggerMetadata])
|
||||
`arr` HashMap TriggerName CronTriggerInfo
|
||||
buildCronTriggers = buildInfoMap ctName mkCronTriggerMetadataObject buildCronTrigger
|
||||
@ -457,73 +502,77 @@ buildSchemaCacheRule env = proc (metadata, invalidationKeys) -> do
|
||||
-- result. If it did, it checks to ensure the changes do not violate any integrity constraints, and
|
||||
-- if not, incorporates them into the schema cache.
|
||||
withMetadataCheck
|
||||
:: (MonadTx m, CacheRWM m, HasSQLGenCtx m, MetadataM m)
|
||||
=> Bool -> m a -> m a
|
||||
withMetadataCheck cascade action = do
|
||||
sc <- askSchemaCache
|
||||
let preActionTables = scTables sc
|
||||
preActionFunctions = scFunctions sc
|
||||
-- Drop event triggers so no interference is caused to the sql query
|
||||
forM_ (M.elems preActionTables) $ \tableInfo -> do
|
||||
let eventTriggers = _tiEventTriggerInfoMap tableInfo
|
||||
forM_ (M.keys eventTriggers) (liftTx . delTriggerQ)
|
||||
:: (MonadIO m, MonadBaseControl IO m, MonadError QErr m, CacheRWM m, HasSQLGenCtx m, MetadataM m)
|
||||
=> SourceName -> Bool -> Q.TxAccess -> LazyTxT QErr m a -> m a
|
||||
withMetadataCheck source cascade txAccess action = do
|
||||
SourceInfo _ preActionTables preActionFunctions sourceConfig <- askPGSourceCache source
|
||||
|
||||
-- Get the metadata before the sql query, everything, need to filter this
|
||||
(preActionTableMeta, preActionFunctionMeta) <- fetchMeta preActionTables preActionFunctions
|
||||
(actionResult, metadataUpdater) <-
|
||||
liftEitherM $ runExceptT $ runLazyTx (_pscExecCtx sourceConfig) txAccess $ do
|
||||
-- Drop event triggers so no interference is caused to the sql query
|
||||
forM_ (M.elems preActionTables) $ \tableInfo -> do
|
||||
let eventTriggers = _tiEventTriggerInfoMap tableInfo
|
||||
forM_ (M.keys eventTriggers) (liftTx . delTriggerQ)
|
||||
|
||||
-- Run the action
|
||||
actionResult <- action
|
||||
-- Get the metadata before the sql query, everything, need to filter this
|
||||
(preActionTableMeta, preActionFunctionMeta) <- fetchMeta preActionTables preActionFunctions
|
||||
|
||||
-- Get the metadata after the sql query
|
||||
(postActionTableMeta, postActionFunctionMeta) <- fetchMeta preActionTables preActionFunctions
|
||||
-- Run the action
|
||||
actionResult <- action
|
||||
-- Get the metadata after the sql query
|
||||
(postActionTableMeta, postActionFunctionMeta) <- fetchMeta preActionTables preActionFunctions
|
||||
|
||||
let preActionTableMeta' = filter (flip M.member preActionTables . tmTable) preActionTableMeta
|
||||
schemaDiff = getSchemaDiff preActionTableMeta' postActionTableMeta
|
||||
FunctionDiff droppedFuncs alteredFuncs = getFuncDiff preActionFunctionMeta postActionFunctionMeta
|
||||
overloadedFuncs = getOverloadedFuncs (M.keys preActionFunctions) postActionFunctionMeta
|
||||
let preActionTableMeta' = filter (flip M.member preActionTables . tmTable) preActionTableMeta
|
||||
schemaDiff = getSchemaDiff preActionTableMeta' postActionTableMeta
|
||||
FunctionDiff droppedFuncs alteredFuncs = getFuncDiff preActionFunctionMeta postActionFunctionMeta
|
||||
overloadedFuncs = getOverloadedFuncs (M.keys preActionFunctions) postActionFunctionMeta
|
||||
|
||||
-- Do not allow overloading functions
|
||||
unless (null overloadedFuncs) $
|
||||
throw400 NotSupported $ "the following tracked function(s) cannot be overloaded: "
|
||||
<> commaSeparated overloadedFuncs
|
||||
-- Do not allow overloading functions
|
||||
unless (null overloadedFuncs) $
|
||||
throw400 NotSupported $ "the following tracked function(s) cannot be overloaded: "
|
||||
<> commaSeparated overloadedFuncs
|
||||
|
||||
indirectDeps <- getSchemaChangeDeps schemaDiff
|
||||
indirectDeps <- getSchemaChangeDeps source schemaDiff
|
||||
|
||||
-- Report back with an error if cascade is not set
|
||||
when (indirectDeps /= [] && not cascade) $ reportDepsExt indirectDeps []
|
||||
-- Report back with an error if cascade is not set
|
||||
when (indirectDeps /= [] && not cascade) $ reportDepsExt indirectDeps []
|
||||
|
||||
metadataUpdater <- execWriterT $ do
|
||||
-- Purge all the indirect dependents from state
|
||||
mapM_ (purgeDependentObject >=> tell) indirectDeps
|
||||
metadataUpdater <- execWriterT $ do
|
||||
-- Purge all the indirect dependents from state
|
||||
mapM_ (purgeDependentObject >=> tell) indirectDeps
|
||||
|
||||
-- Purge all dropped functions
|
||||
let purgedFuncs = flip mapMaybe indirectDeps $ \case
|
||||
SOFunction qf -> Just qf
|
||||
_ -> Nothing
|
||||
-- Purge all dropped functions
|
||||
let purgedFuncs = flip mapMaybe indirectDeps $ \case
|
||||
SOSourceObj _ (SOIFunction qf) -> Just qf
|
||||
_ -> Nothing
|
||||
|
||||
forM_ (droppedFuncs \\ purgedFuncs) $ tell . dropFunctionInMetadata
|
||||
forM_ (droppedFuncs \\ purgedFuncs) $ tell . dropFunctionInMetadata source
|
||||
|
||||
-- Process altered functions
|
||||
forM_ alteredFuncs $ \(qf, newTy) -> do
|
||||
when (newTy == FTVOLATILE) $
|
||||
throw400 NotSupported $
|
||||
"type of function " <> qf <<> " is altered to \"VOLATILE\" which is not supported now"
|
||||
-- Process altered functions
|
||||
forM_ alteredFuncs $ \(qf, newTy) -> do
|
||||
when (newTy == FTVOLATILE) $
|
||||
throw400 NotSupported $
|
||||
"type of function " <> qf <<> " is altered to \"VOLATILE\" which is not supported now"
|
||||
|
||||
-- update the metadata with the changes
|
||||
processSchemaChanges preActionTables schemaDiff
|
||||
-- update the metadata with the changes
|
||||
processSchemaChanges preActionTables schemaDiff
|
||||
|
||||
pure (actionResult, metadataUpdater)
|
||||
|
||||
-- Build schema cache with updated metadata
|
||||
withNewInconsistentObjsCheck $ buildSchemaCache metadataUpdater
|
||||
|
||||
postActionSchemaCache <- askSchemaCache
|
||||
|
||||
-- Recreate event triggers in hdb_catalog
|
||||
let postActionTables = scTables postActionSchemaCache
|
||||
forM_ (M.elems postActionTables) $ \(TableInfo coreInfo _ eventTriggers) -> do
|
||||
let table = _tciName coreInfo
|
||||
columns = getCols $ _tciFieldInfoMap coreInfo
|
||||
forM_ (M.toList eventTriggers) $ \(triggerName, eti) -> do
|
||||
let opsDefinition = etiOpsDef eti
|
||||
mkAllTriggersQ triggerName table columns opsDefinition
|
||||
let postActionTables = maybe mempty _pcTables $ M.lookup source $ scPostgres postActionSchemaCache
|
||||
liftEitherM $ runPgSourceWriteTx sourceConfig $
|
||||
forM_ (M.elems postActionTables) $ \(TableInfo coreInfo _ eventTriggers) -> do
|
||||
let table = _tciName coreInfo
|
||||
columns = getCols $ _tciFieldInfoMap coreInfo
|
||||
forM_ (M.toList eventTriggers) $ \(triggerName, eti) -> do
|
||||
let opsDefinition = etiOpsDef eti
|
||||
mkAllTriggersQ triggerName table columns opsDefinition
|
||||
|
||||
pure actionResult
|
||||
where
|
||||
@ -536,13 +585,13 @@ withMetadataCheck cascade action = do
|
||||
processSchemaChanges preActionTables schemaDiff = do
|
||||
-- Purge the dropped tables
|
||||
forM_ droppedTables $
|
||||
\tn -> tell $ MetadataModifier $ metaTables %~ OMap.delete tn
|
||||
\tn -> tell $ MetadataModifier $ metaSources.ix source.smTables %~ OMap.delete tn
|
||||
|
||||
for_ alteredTables $ \(oldQtn, tableDiff) -> do
|
||||
ti <- onNothing
|
||||
(M.lookup oldQtn preActionTables)
|
||||
(throw500 $ "old table metadata not found in cache : " <>> oldQtn)
|
||||
processTableChanges (_tiCoreInfo ti) tableDiff
|
||||
processTableChanges source (_tiCoreInfo ti) tableDiff
|
||||
where
|
||||
SchemaDiff droppedTables alteredTables = schemaDiff
|
||||
|
||||
|
@ -29,20 +29,26 @@ import Hasura.RQL.Types
|
||||
data InvalidationKeys = InvalidationKeys
|
||||
{ _ikMetadata :: !Inc.InvalidationKey
|
||||
, _ikRemoteSchemas :: !(HashMap RemoteSchemaName Inc.InvalidationKey)
|
||||
, _ikSources :: !(HashMap SourceName Inc.InvalidationKey)
|
||||
} deriving (Show, Eq, Generic)
|
||||
instance Inc.Cacheable InvalidationKeys
|
||||
instance Inc.Select InvalidationKeys
|
||||
$(makeLenses ''InvalidationKeys)
|
||||
|
||||
initialInvalidationKeys :: InvalidationKeys
|
||||
initialInvalidationKeys = InvalidationKeys Inc.initialInvalidationKey mempty
|
||||
initialInvalidationKeys = InvalidationKeys Inc.initialInvalidationKey mempty mempty
|
||||
|
||||
invalidateKeys :: CacheInvalidations -> InvalidationKeys -> InvalidationKeys
|
||||
invalidateKeys CacheInvalidations{..} InvalidationKeys{..} = InvalidationKeys
|
||||
{ _ikMetadata = if ciMetadata then Inc.invalidate _ikMetadata else _ikMetadata
|
||||
, _ikRemoteSchemas = foldl' (flip invalidateRemoteSchema) _ikRemoteSchemas ciRemoteSchemas }
|
||||
, _ikRemoteSchemas = foldl' (flip invalidate) _ikRemoteSchemas ciRemoteSchemas
|
||||
, _ikSources = foldl' (flip invalidate) _ikSources ciSources
|
||||
}
|
||||
where
|
||||
invalidateRemoteSchema = M.alter $ Just . maybe Inc.initialInvalidationKey Inc.invalidate
|
||||
invalidate
|
||||
:: (Eq a, Hashable a)
|
||||
=> a -> HashMap a Inc.InvalidationKey -> HashMap a Inc.InvalidationKey
|
||||
invalidate = M.alter $ Just . maybe Inc.initialInvalidationKey Inc.invalidate
|
||||
|
||||
data TableBuildInput
|
||||
= TableBuildInput
|
||||
@ -95,9 +101,8 @@ mkTableInputs TableMetadata{..} =
|
||||
-- 'MonadWriter' side channel.
|
||||
data BuildOutputs
|
||||
= BuildOutputs
|
||||
{ _boTables :: !(TableCache 'Postgres)
|
||||
{ _boSources :: !(SourceCache 'Postgres)
|
||||
, _boActions :: !ActionCache
|
||||
, _boFunctions :: !FunctionCache
|
||||
, _boRemoteSchemas :: !(HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject))
|
||||
-- ^ We preserve the 'MetadataObject' from the original catalog metadata in the output so we can
|
||||
-- reuse it later if we need to mark the remote schema inconsistent during GraphQL schema
|
||||
@ -111,19 +116,19 @@ $(makeLenses ''BuildOutputs)
|
||||
-- | Parameters required for schema cache build
|
||||
data CacheBuildParams
|
||||
= CacheBuildParams
|
||||
{ _cbpManager :: !HTTP.Manager
|
||||
, _cbpSqlGenCtx :: !SQLGenCtx
|
||||
{ _cbpManager :: !HTTP.Manager
|
||||
, _cbpSqlGenCtx :: !SQLGenCtx
|
||||
, _cbpRemoteSchemaPermsCtx :: !RemoteSchemaPermsCtx
|
||||
, _cbpSourceResolver :: !SourceResolver
|
||||
}
|
||||
|
||||
-- | The monad in which @'RebuildableSchemaCache' is being run
|
||||
newtype CacheBuild a
|
||||
= CacheBuild {unCacheBuild :: ReaderT CacheBuildParams (LazyTxT QErr IO) a}
|
||||
= CacheBuild {unCacheBuild :: ReaderT CacheBuildParams (ExceptT QErr IO) a}
|
||||
deriving ( Functor, Applicative, Monad
|
||||
, MonadError QErr
|
||||
, MonadReader CacheBuildParams
|
||||
, MonadIO
|
||||
, MonadTx
|
||||
, MonadBase IO
|
||||
, MonadBaseControl IO
|
||||
, MonadUnique
|
||||
@ -138,21 +143,34 @@ instance HasSQLGenCtx CacheBuild where
|
||||
instance HasRemoteSchemaPermsCtx CacheBuild where
|
||||
askRemoteSchemaPermsCtx = asks _cbpRemoteSchemaPermsCtx
|
||||
|
||||
instance MonadResolveSource CacheBuild where
|
||||
getSourceResolver = asks _cbpSourceResolver
|
||||
|
||||
|
||||
runCacheBuild
|
||||
:: ( MonadIO m
|
||||
, MonadError QErr m
|
||||
)
|
||||
=> CacheBuildParams -> CacheBuild a -> m a
|
||||
runCacheBuild params (CacheBuild m) = do
|
||||
liftEitherM $ liftIO $ runExceptT (runReaderT m params)
|
||||
|
||||
runCacheBuildM
|
||||
:: ( MonadIO m
|
||||
, MonadError QErr m
|
||||
, HasHttpManager m
|
||||
, HasSQLGenCtx m
|
||||
, HasRemoteSchemaPermsCtx m
|
||||
, MonadTx m
|
||||
, MonadResolveSource m
|
||||
)
|
||||
=> CacheBuild a -> m a
|
||||
runCacheBuild (CacheBuild m) = do
|
||||
httpManager <- askHttpManager
|
||||
sqlGenCtx <- askSQLGenCtx
|
||||
remoteSchemaPermsCtx <- askRemoteSchemaPermsCtx
|
||||
let params = CacheBuildParams httpManager sqlGenCtx remoteSchemaPermsCtx
|
||||
liftTx $ lazyTxToQTx (runReaderT m params)
|
||||
runCacheBuildM m = do
|
||||
params <- CacheBuildParams
|
||||
<$> askHttpManager
|
||||
<*> askSQLGenCtx
|
||||
<*> askRemoteSchemaPermsCtx
|
||||
<*> getSourceResolver
|
||||
runCacheBuild params m
|
||||
|
||||
data RebuildableSchemaCache
|
||||
= RebuildableSchemaCache
|
||||
|
@ -83,9 +83,37 @@ pruneDanglingDependents cache = fmap (M.filter (not . null)) . traverse do
|
||||
where
|
||||
resolveDependency :: SchemaDependency -> Either Text ()
|
||||
resolveDependency (SchemaDependency objectId _) = case objectId of
|
||||
SOTable tableName -> void $ resolveTable tableName
|
||||
SOFunction functionName -> unless (functionName `M.member` _boFunctions cache) $
|
||||
Left $ "function " <> functionName <<> " is not tracked"
|
||||
SOSource source -> void $ M.lookup source (_boSources cache)
|
||||
`onNothing` Left ("no such source exists: " <>> source)
|
||||
SOSourceObj source sourceObjId -> case sourceObjId of
|
||||
SOITable tableName -> void $ resolveTable source tableName
|
||||
SOIFunction functionName -> void $
|
||||
(M.lookup source (_boSources cache) >>= M.lookup functionName . _pcFunctions)
|
||||
`onNothing` Left ("function " <> functionName <<> " is not tracked")
|
||||
SOITableObj tableName tableObjectId -> do
|
||||
tableInfo <- resolveTable source tableName
|
||||
case tableObjectId of
|
||||
TOCol columnName ->
|
||||
void $ resolveField tableInfo (fromCol @'Postgres columnName) _FIColumn "column"
|
||||
TORel relName ->
|
||||
void $ resolveField tableInfo (fromRel relName) _FIRelationship "relationship"
|
||||
TOComputedField fieldName ->
|
||||
void $ resolveField tableInfo (fromComputedField fieldName) _FIComputedField "computed field"
|
||||
TORemoteRel fieldName ->
|
||||
void $ resolveField tableInfo (fromRemoteRelationship fieldName) _FIRemoteRelationship "remote relationship"
|
||||
TOForeignKey constraintName -> do
|
||||
let foreignKeys = _tciForeignKeys $ _tiCoreInfo tableInfo
|
||||
unless (isJust $ find ((== constraintName) . _cName . _fkConstraint) foreignKeys) $
|
||||
Left $ "no foreign key constraint named " <> constraintName <<> " is "
|
||||
<> "defined for table " <>> tableName
|
||||
TOPerm roleName permType -> withPermType permType \accessor -> do
|
||||
let permLens = permAccToLens accessor
|
||||
unless (has (tiRolePermInfoMap.ix roleName.permLens._Just) tableInfo) $
|
||||
Left $ "no " <> permTypeToCode permType <> " permission defined on table "
|
||||
<> tableName <<> " for role " <>> roleName
|
||||
TOTrigger triggerName ->
|
||||
unless (M.member triggerName (_tiEventTriggerInfoMap tableInfo)) $ Left $
|
||||
"no event trigger named " <> triggerName <<> " is defined for table " <>> tableName
|
||||
SORemoteSchema remoteSchemaName -> unless (remoteSchemaName `M.member` _boRemoteSchemas cache) $
|
||||
Left $ "remote schema " <> remoteSchemaName <<> " is not found"
|
||||
SORemoteSchemaPermission remoteSchemaName roleName -> do
|
||||
@ -95,33 +123,10 @@ pruneDanglingDependents cache = fmap (M.filter (not . null)) . traverse do
|
||||
unless (roleName `M.member` _rscPermissions (fst remoteSchema)) $
|
||||
Left $ "no permission defined on remote schema " <> remoteSchemaName
|
||||
<<> " for role " <>> roleName
|
||||
SOTableObj tableName tableObjectId -> do
|
||||
tableInfo <- resolveTable tableName
|
||||
case tableObjectId of
|
||||
TOCol columnName ->
|
||||
void $ resolveField tableInfo (fromCol @'Postgres columnName) _FIColumn "column"
|
||||
TORel relName ->
|
||||
void $ resolveField tableInfo (fromRel relName) _FIRelationship "relationship"
|
||||
TOComputedField fieldName ->
|
||||
void $ resolveField tableInfo (fromComputedField fieldName) _FIComputedField "computed field"
|
||||
TORemoteRel fieldName ->
|
||||
void $ resolveField tableInfo (fromRemoteRelationship fieldName) _FIRemoteRelationship "remote relationship"
|
||||
TOForeignKey constraintName -> do
|
||||
let foreignKeys = _tciForeignKeys $ _tiCoreInfo tableInfo
|
||||
unless (isJust $ find ((== constraintName) . _cName . _fkConstraint) foreignKeys) $
|
||||
Left $ "no foreign key constraint named " <> constraintName <<> " is "
|
||||
<> "defined for table " <>> tableName
|
||||
TOPerm roleName permType -> withPermType permType \accessor -> do
|
||||
let permLens = permAccToLens accessor
|
||||
unless (has (tiRolePermInfoMap.ix roleName.permLens._Just) tableInfo) $
|
||||
Left $ "no " <> permTypeToCode permType <> " permission defined on table "
|
||||
<> tableName <<> " for role " <>> roleName
|
||||
TOTrigger triggerName ->
|
||||
unless (M.member triggerName (_tiEventTriggerInfoMap tableInfo)) $ Left $
|
||||
"no event trigger named " <> triggerName <<> " is defined for table " <>> tableName
|
||||
|
||||
resolveTable tableName = M.lookup tableName (_boTables cache) `onNothing`
|
||||
Left ("table " <> tableName <<> " is not tracked")
|
||||
resolveTable source tableName =
|
||||
(M.lookup source (_boSources cache) >>= M.lookup tableName . _pcTables)
|
||||
`onNothing` Left ("table " <> tableName <<> " is not tracked")
|
||||
|
||||
resolveField :: TableInfo 'Postgres -> FieldName -> Getting (First a) (FieldInfo 'Postgres) a -> Text -> Either Text a
|
||||
resolveField tableInfo fieldName fieldType fieldTypeName = do
|
||||
@ -132,20 +137,23 @@ pruneDanglingDependents cache = fmap (M.filter (not . null)) . traverse do
|
||||
(fieldInfo ^? fieldType) `onNothing` Left
|
||||
("field " <> fieldName <<> "of table " <> tableName <<> " is not a " <> fieldTypeName)
|
||||
|
||||
deleteMetadataObject :: MetadataObjId -> BuildOutputs -> BuildOutputs
|
||||
deleteMetadataObject
|
||||
:: MetadataObjId -> BuildOutputs -> BuildOutputs
|
||||
deleteMetadataObject objectId = case objectId of
|
||||
MOTable name -> boTables %~ M.delete name
|
||||
MOFunction name -> boFunctions %~ M.delete name
|
||||
MORemoteSchema name -> boRemoteSchemas %~ M.delete name
|
||||
MOSource name -> boSources %~ M.delete name
|
||||
MOSourceObjId source sourceObjId -> boSources.ix source %~ case sourceObjId of
|
||||
SMOTable name -> pcTables %~ M.delete name
|
||||
SMOFunction name -> pcFunctions %~ M.delete name
|
||||
SMOTableObj tableName tableObjectId -> pcTables.ix tableName %~ case tableObjectId of
|
||||
MTORel name _ -> tiCoreInfo.tciFieldInfoMap %~ M.delete (fromRel name)
|
||||
MTOComputedField name -> tiCoreInfo.tciFieldInfoMap %~ M.delete (fromComputedField name)
|
||||
MTORemoteRelationship name -> tiCoreInfo.tciFieldInfoMap %~ M.delete (fromRemoteRelationship name)
|
||||
MTOPerm roleName permType -> withPermType permType \accessor ->
|
||||
tiRolePermInfoMap.ix roleName.permAccToLens accessor .~ Nothing
|
||||
MTOTrigger name -> tiEventTriggerInfoMap %~ M.delete name
|
||||
MORemoteSchema name -> boRemoteSchemas %~ M.delete name
|
||||
MORemoteSchemaPermissions name role -> boRemoteSchemas.ix name._1.rscPermissions %~ M.delete role
|
||||
MOCronTrigger name -> boCronTriggers %~ M.delete name
|
||||
MOTableObj tableName tableObjectId -> boTables.ix tableName %~ case tableObjectId of
|
||||
MTORel name _ -> tiCoreInfo.tciFieldInfoMap %~ M.delete (fromRel name)
|
||||
MTOComputedField name -> tiCoreInfo.tciFieldInfoMap %~ M.delete (fromComputedField name)
|
||||
MTORemoteRelationship name -> tiCoreInfo.tciFieldInfoMap %~ M.delete (fromRemoteRelationship name)
|
||||
MTOPerm roleName permType -> withPermType permType \accessor ->
|
||||
tiRolePermInfoMap.ix roleName.permAccToLens accessor .~ Nothing
|
||||
MTOTrigger name -> tiEventTriggerInfoMap %~ M.delete name
|
||||
MOCronTrigger name -> boCronTriggers %~ M.delete name
|
||||
MOCustomTypes -> boCustomTypes %~ const emptyAnnotatedCustomTypes
|
||||
MOAction name -> boActions %~ M.delete name
|
||||
MOActionPermission name role -> boActions.ix name.aiPermissions %~ M.delete role
|
||||
|
@ -12,7 +12,7 @@ import qualified Data.Sequence as Seq
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
|
||||
import Control.Arrow.Extended
|
||||
import Control.Lens ((^.), _3)
|
||||
import Control.Lens ((^.), _3, _4)
|
||||
import Data.Aeson
|
||||
import Data.Text.Extended
|
||||
|
||||
@ -28,13 +28,15 @@ import Hasura.RQL.Types
|
||||
addNonColumnFields
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, ArrowKleisli m arr, MonadError QErr m )
|
||||
=> ( HashMap (TableName 'Postgres) (TableCoreInfoG 'Postgres (ColumnInfo 'Postgres) (ColumnInfo 'Postgres))
|
||||
=> ( SourceName
|
||||
, HashMap (TableName 'Postgres) (TableCoreInfoG 'Postgres (ColumnInfo 'Postgres) (ColumnInfo 'Postgres))
|
||||
, FieldInfoMap (ColumnInfo 'Postgres)
|
||||
, RemoteSchemaMap
|
||||
, PostgresFunctionsMetadata
|
||||
, NonColumnTableInputs
|
||||
) `arr` FieldInfoMap (FieldInfo 'Postgres)
|
||||
addNonColumnFields = proc ( rawTableInfo
|
||||
addNonColumnFields = proc ( source
|
||||
, rawTableInfo
|
||||
, columns
|
||||
, remoteSchemaMap
|
||||
, pgFunctions
|
||||
@ -42,33 +44,33 @@ addNonColumnFields = proc ( rawTableInfo
|
||||
) -> do
|
||||
objectRelationshipInfos
|
||||
<- buildInfoMapPreservingMetadata
|
||||
(_rdName . snd)
|
||||
(_rdName . (^. _3))
|
||||
(mkRelationshipMetadataObject ObjRel)
|
||||
buildObjectRelationship
|
||||
-< (_tciForeignKeys <$> rawTableInfo, map (_nctiTable,) _nctiObjectRelationships)
|
||||
-< (_tciForeignKeys <$> rawTableInfo, map (source, _nctiTable,) _nctiObjectRelationships)
|
||||
|
||||
arrayRelationshipInfos
|
||||
<- buildInfoMapPreservingMetadata
|
||||
(_rdName . snd)
|
||||
(_rdName . (^. _3))
|
||||
(mkRelationshipMetadataObject ArrRel)
|
||||
buildArrayRelationship
|
||||
-< (_tciForeignKeys <$> rawTableInfo, map (_nctiTable,) _nctiArrayRelationships)
|
||||
-< (_tciForeignKeys <$> rawTableInfo, map (source, _nctiTable,) _nctiArrayRelationships)
|
||||
|
||||
let relationshipInfos = objectRelationshipInfos <> arrayRelationshipInfos
|
||||
|
||||
computedFieldInfos
|
||||
<- buildInfoMapPreservingMetadata
|
||||
(_cfmName . (^. _3))
|
||||
(\(_, t, c) -> mkComputedFieldMetadataObject t c)
|
||||
(_cfmName . (^. _4))
|
||||
(\(s, _, t, c) -> mkComputedFieldMetadataObject (s, t, c))
|
||||
buildComputedField
|
||||
-< (HS.fromList $ M.keys rawTableInfo, map (pgFunctions, _nctiTable,) _nctiComputedFields)
|
||||
-< (HS.fromList $ M.keys rawTableInfo, map (source, pgFunctions, _nctiTable,) _nctiComputedFields)
|
||||
|
||||
rawRemoteRelationshipInfos
|
||||
<- buildInfoMapPreservingMetadata
|
||||
(_rrmName . snd)
|
||||
(_rrmName . (^. _3))
|
||||
mkRemoteRelationshipMetadataObject
|
||||
buildRemoteRelationship
|
||||
-< ((M.elems columns, remoteSchemaMap), map (_nctiTable,) _nctiRemoteRelationships)
|
||||
-< ((M.elems columns, remoteSchemaMap), map (source, _nctiTable,) _nctiRemoteRelationships)
|
||||
|
||||
let relationshipFields = mapKeys fromRel relationshipInfos
|
||||
computedFieldFields = mapKeys fromComputedField computedFieldInfos
|
||||
@ -125,53 +127,57 @@ addNonColumnFields = proc ( rawTableInfo
|
||||
|
||||
mkRelationshipMetadataObject
|
||||
:: (ToJSON a)
|
||||
=> RelType -> (TableName 'Postgres, RelDef a) -> MetadataObject
|
||||
mkRelationshipMetadataObject relType (table, relDef) =
|
||||
let objectId = MOTableObj table $ MTORel (_rdName relDef) relType
|
||||
in MetadataObject objectId $ toJSON $ WithTable table relDef
|
||||
=> RelType -> (SourceName, TableName 'Postgres, RelDef a) -> MetadataObject
|
||||
mkRelationshipMetadataObject relType (source, table, relDef) =
|
||||
let objectId = MOSourceObjId source $
|
||||
SMOTableObj table $ MTORel (_rdName relDef) relType
|
||||
in MetadataObject objectId $ toJSON $ WithTable source table relDef
|
||||
|
||||
buildObjectRelationship
|
||||
:: ( ArrowChoice arr
|
||||
, ArrowWriter (Seq CollectedInfo) arr
|
||||
)
|
||||
=> ( HashMap (TableName 'Postgres) (HashSet (ForeignKey 'Postgres))
|
||||
, ( TableName 'Postgres
|
||||
, ( SourceName
|
||||
, TableName 'Postgres
|
||||
, ObjRelDef
|
||||
)
|
||||
) `arr` Maybe (RelInfo 'Postgres)
|
||||
buildObjectRelationship = proc (fkeysMap, (table, relDef)) -> do
|
||||
buildObjectRelationship = proc (fkeysMap, (source, table, relDef)) -> do
|
||||
let buildRelInfo def = do
|
||||
fkeys <- getTableInfo table fkeysMap
|
||||
objRelP2Setup table fkeys def
|
||||
buildRelationship -< (table, buildRelInfo, ObjRel, relDef)
|
||||
objRelP2Setup source table fkeys def
|
||||
buildRelationship -< (source, table, buildRelInfo, ObjRel, relDef)
|
||||
|
||||
buildArrayRelationship
|
||||
:: ( ArrowChoice arr
|
||||
, ArrowWriter (Seq CollectedInfo) arr
|
||||
)
|
||||
=> ( HashMap (TableName 'Postgres) (HashSet (ForeignKey 'Postgres))
|
||||
, ( TableName 'Postgres
|
||||
, ( SourceName
|
||||
, TableName 'Postgres
|
||||
, ArrRelDef
|
||||
)
|
||||
) `arr` Maybe (RelInfo 'Postgres)
|
||||
buildArrayRelationship = proc (fkeysMap, (table, relDef)) -> do
|
||||
let buildRelInfo def = arrRelP2Setup fkeysMap table def
|
||||
buildRelationship -< (table, buildRelInfo, ArrRel, relDef)
|
||||
buildArrayRelationship = proc (fkeysMap, (source, table, relDef)) -> do
|
||||
let buildRelInfo def = arrRelP2Setup fkeysMap source table def
|
||||
buildRelationship -< (source, table, buildRelInfo, ArrRel, relDef)
|
||||
|
||||
buildRelationship
|
||||
:: ( ArrowChoice arr
|
||||
, ArrowWriter (Seq CollectedInfo) arr
|
||||
, ToJSON a
|
||||
)
|
||||
=> ( TableName 'Postgres
|
||||
=> ( SourceName
|
||||
, TableName 'Postgres
|
||||
, (RelDef a -> Either QErr (RelInfo 'Postgres, [SchemaDependency]))
|
||||
, RelType
|
||||
, RelDef a
|
||||
) `arr` Maybe (RelInfo 'Postgres)
|
||||
buildRelationship = proc (table, buildRelInfo, relType, relDef) -> do
|
||||
buildRelationship = proc (source, table, buildRelInfo, relType, relDef) -> do
|
||||
let relName = _rdName relDef
|
||||
metadataObject = mkRelationshipMetadataObject relType (table, relDef)
|
||||
schemaObject = SOTableObj table $ TORel relName
|
||||
metadataObject = mkRelationshipMetadataObject relType (source, table, relDef)
|
||||
schemaObject = SOSourceObj source $ SOITableObj table $ TORel relName
|
||||
addRelationshipContext e = "in relationship " <> relName <<> ": " <> e
|
||||
(| withRecordInconsistency (
|
||||
(| modifyErrA (do
|
||||
@ -182,19 +188,19 @@ buildRelationship = proc (table, buildRelInfo, relType, relDef) -> do
|
||||
|) metadataObject
|
||||
|
||||
mkComputedFieldMetadataObject
|
||||
:: TableName 'Postgres -> ComputedFieldMetadata -> MetadataObject
|
||||
mkComputedFieldMetadataObject table ComputedFieldMetadata{..} =
|
||||
let objectId = MOTableObj table $ MTOComputedField _cfmName
|
||||
definition = AddComputedField table _cfmName _cfmDefinition _cfmComment
|
||||
:: (SourceName, TableName 'Postgres, ComputedFieldMetadata) -> MetadataObject
|
||||
mkComputedFieldMetadataObject (source, table, ComputedFieldMetadata{..}) =
|
||||
let objectId = MOSourceObjId source $ SMOTableObj table $ MTOComputedField _cfmName
|
||||
definition = AddComputedField source table _cfmName _cfmDefinition _cfmComment
|
||||
in MetadataObject objectId (toJSON definition)
|
||||
|
||||
buildComputedField
|
||||
:: ( ArrowChoice arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, ArrowKleisli m arr, MonadError QErr m )
|
||||
=> ( HashSet (TableName 'Postgres)
|
||||
, (PostgresFunctionsMetadata, TableName 'Postgres, ComputedFieldMetadata)
|
||||
, (SourceName, PostgresFunctionsMetadata, TableName 'Postgres, ComputedFieldMetadata)
|
||||
) `arr` Maybe (ComputedFieldInfo 'Postgres)
|
||||
buildComputedField = proc (trackedTableNames, (pgFunctions, table, cf@ComputedFieldMetadata{..})) -> do
|
||||
buildComputedField = proc (trackedTableNames, (source, pgFunctions, table, cf@ComputedFieldMetadata{..})) -> do
|
||||
let addComputedFieldContext e = "in computed field " <> _cfmName <<> ": " <> e
|
||||
function = _cfdFunction _cfmDefinition
|
||||
funcDefs = fromMaybe [] $ M.lookup function pgFunctions
|
||||
@ -203,30 +209,32 @@ buildComputedField = proc (trackedTableNames, (pgFunctions, table, cf@ComputedFi
|
||||
rawfi <- bindErrorA -< handleMultipleFunctions (_cfdFunction _cfmDefinition) funcDefs
|
||||
bindErrorA -< addComputedFieldP2Setup trackedTableNames table _cfmName _cfmDefinition rawfi _cfmComment)
|
||||
|) (addTableContext table . addComputedFieldContext))
|
||||
|) (mkComputedFieldMetadataObject table cf)
|
||||
|) (mkComputedFieldMetadataObject (source, table, cf))
|
||||
|
||||
mkRemoteRelationshipMetadataObject
|
||||
:: (TableName 'Postgres, RemoteRelationshipMetadata) -> MetadataObject
|
||||
mkRemoteRelationshipMetadataObject (table, RemoteRelationshipMetadata{..}) =
|
||||
let objectId = MOTableObj table $ MTORemoteRelationship _rrmName
|
||||
:: (SourceName, TableName 'Postgres, RemoteRelationshipMetadata) -> MetadataObject
|
||||
mkRemoteRelationshipMetadataObject (source, table, RemoteRelationshipMetadata{..}) =
|
||||
let objectId = MOSourceObjId source $
|
||||
SMOTableObj table $ MTORemoteRelationship _rrmName
|
||||
RemoteRelationshipDef{..} = _rrmDefinition
|
||||
in MetadataObject objectId $ toJSON $
|
||||
RemoteRelationship _rrmName table _rrdHasuraFields _rrdRemoteSchema _rrdRemoteField
|
||||
RemoteRelationship _rrmName source table _rrdHasuraFields _rrdRemoteSchema _rrdRemoteField
|
||||
|
||||
buildRemoteRelationship
|
||||
:: ( ArrowChoice arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, ArrowKleisli m arr, MonadError QErr m )
|
||||
=> ( ([ColumnInfo 'Postgres], RemoteSchemaMap)
|
||||
, (TableName 'Postgres, RemoteRelationshipMetadata)
|
||||
, (SourceName, TableName 'Postgres, RemoteRelationshipMetadata)
|
||||
) `arr` Maybe (RemoteFieldInfo 'Postgres)
|
||||
buildRemoteRelationship = proc ( (pgColumns, remoteSchemaMap)
|
||||
, (table, rrm@RemoteRelationshipMetadata{..})
|
||||
, (source, table, rrm@RemoteRelationshipMetadata{..})
|
||||
) -> do
|
||||
let metadataObject = mkRemoteRelationshipMetadataObject (table, rrm)
|
||||
schemaObj = SOTableObj table $ TORemoteRel _rrmName
|
||||
let metadataObject = mkRemoteRelationshipMetadataObject (source, table, rrm)
|
||||
schemaObj = SOSourceObj source $
|
||||
SOITableObj table $ TORemoteRel _rrmName
|
||||
addRemoteRelationshipContext e = "in remote relationship" <> _rrmName <<> ": " <> e
|
||||
RemoteRelationshipDef{..} = _rrmDefinition
|
||||
remoteRelationship = RemoteRelationship _rrmName table _rrdHasuraFields
|
||||
remoteRelationship = RemoteRelationship _rrmName source table _rrdHasuraFields
|
||||
_rrdRemoteSchema _rrdRemoteField
|
||||
(| withRecordInconsistency (
|
||||
(| modifyErrA (do
|
||||
|
@ -27,19 +27,20 @@ import Hasura.Session
|
||||
buildTablePermissions
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
||||
, MonadError QErr m, ArrowWriter (Seq CollectedInfo) arr)
|
||||
=> ( Inc.Dependency (TableCoreCache 'Postgres)
|
||||
=> ( SourceName
|
||||
, Inc.Dependency (TableCoreCache 'Postgres)
|
||||
, FieldInfoMap (FieldInfo 'Postgres)
|
||||
, TablePermissionInputs
|
||||
) `arr` (RolePermInfoMap 'Postgres)
|
||||
buildTablePermissions = Inc.cache proc (tableCache, tableFields, tablePermissions) -> do
|
||||
buildTablePermissions = Inc.cache proc (source, tableCache, tableFields, tablePermissions) -> do
|
||||
let alignedPermissions = alignPermissions tablePermissions
|
||||
table = _tpiTable tablePermissions
|
||||
|
||||
(| Inc.keyed (\_ (insertPermission, selectPermission, updatePermission, deletePermission) -> do
|
||||
insert <- buildPermission -< (tableCache, table, tableFields, listToMaybe insertPermission)
|
||||
select <- buildPermission -< (tableCache, table, tableFields, listToMaybe selectPermission)
|
||||
update <- buildPermission -< (tableCache, table, tableFields, listToMaybe updatePermission)
|
||||
delete <- buildPermission -< (tableCache, table, tableFields, listToMaybe deletePermission)
|
||||
insert <- buildPermission -< (tableCache, source, table, tableFields, listToMaybe insertPermission)
|
||||
select <- buildPermission -< (tableCache, source, table, tableFields, listToMaybe selectPermission)
|
||||
update <- buildPermission -< (tableCache, source, table, tableFields, listToMaybe updatePermission)
|
||||
delete <- buildPermission -< (tableCache, source, table, tableFields, listToMaybe deletePermission)
|
||||
returnA -< RolePermInfo insert select update delete)
|
||||
|) alignedPermissions
|
||||
where
|
||||
@ -56,11 +57,12 @@ buildTablePermissions = Inc.cache proc (tableCache, tableFields, tablePermission
|
||||
|
||||
mkPermissionMetadataObject
|
||||
:: forall a. (IsPerm a)
|
||||
=> QualifiedTable -> PermDef a -> MetadataObject
|
||||
mkPermissionMetadataObject table permDef =
|
||||
=> SourceName -> QualifiedTable -> PermDef a -> MetadataObject
|
||||
mkPermissionMetadataObject source table permDef =
|
||||
let permType = permAccToType (permAccessor :: PermAccessor 'Postgres (PermInfo a))
|
||||
objectId = MOTableObj table $ MTOPerm (_pdRole permDef) permType
|
||||
definition = toJSON $ WithTable table permDef
|
||||
objectId = MOSourceObjId source $
|
||||
SMOTableObj table $ MTOPerm (_pdRole permDef) permType
|
||||
definition = toJSON $ WithTable source table permDef
|
||||
in MetadataObject objectId definition
|
||||
|
||||
mkRemoteSchemaPermissionMetadataObject
|
||||
@ -73,13 +75,13 @@ mkRemoteSchemaPermissionMetadataObject (AddRemoteSchemaPermissions rsName roleNa
|
||||
withPermission
|
||||
:: forall a b c s arr. (ArrowChoice arr, ArrowWriter (Seq CollectedInfo) arr, IsPerm c)
|
||||
=> WriterA (Seq SchemaDependency) (ErrorA QErr arr) (a, s) b
|
||||
-> arr (a, ((QualifiedTable, PermDef c), s)) (Maybe b)
|
||||
withPermission f = proc (e, ((table, permission), s)) -> do
|
||||
let metadataObject = mkPermissionMetadataObject table permission
|
||||
-> arr (a, ((SourceName, QualifiedTable, PermDef c), s)) (Maybe b)
|
||||
withPermission f = proc (e, ((source, table, permission), s)) -> do
|
||||
let metadataObject = mkPermissionMetadataObject source table permission
|
||||
permType = permAccToType (permAccessor :: PermAccessor 'Postgres (PermInfo c))
|
||||
roleName = _pdRole permission
|
||||
schemaObject = SOTableObj table $
|
||||
TOPerm roleName permType
|
||||
schemaObject = SOSourceObj source $
|
||||
SOITableObj table $ TOPerm roleName permType
|
||||
addPermContext err = "in permission for role " <> roleName <<> ": " <> err
|
||||
(| withRecordInconsistency (
|
||||
(| withRecordDependencies (
|
||||
@ -95,19 +97,20 @@ buildPermission
|
||||
, Inc.Cacheable a
|
||||
)
|
||||
=> ( Inc.Dependency (TableCoreCache 'Postgres)
|
||||
, SourceName
|
||||
, QualifiedTable
|
||||
, FieldInfoMap (FieldInfo 'Postgres)
|
||||
, Maybe (PermDef a)
|
||||
) `arr` Maybe (PermInfo a)
|
||||
buildPermission = Inc.cache proc (tableCache, table, tableFields, maybePermission) -> do
|
||||
buildPermission = Inc.cache proc (tableCache, source, table, tableFields, maybePermission) -> do
|
||||
(| traverseA ( \permission ->
|
||||
(| withPermission (do
|
||||
bindErrorA -< when (_pdRole permission == adminRoleName) $
|
||||
throw400 ConstraintViolation "cannot define permission for admin role"
|
||||
(info, dependencies) <- liftEitherA <<< Inc.bindDepend -< runExceptT $
|
||||
runTableCoreCacheRT (buildPermInfo table tableFields permission) (tableCache)
|
||||
runTableCoreCacheRT (buildPermInfo source table tableFields permission) (source, tableCache)
|
||||
tellA -< Seq.fromList dependencies
|
||||
returnA -< info)
|
||||
|) (table, permission))
|
||||
|) (source, table, permission))
|
||||
|) maybePermission
|
||||
>-> (\info -> join info >- returnA)
|
||||
|
@ -16,15 +16,15 @@ import qualified Database.PG.Query as Q
|
||||
purgeDependentObject
|
||||
:: (MonadError QErr m) => SchemaObjId -> m MetadataModifier
|
||||
purgeDependentObject = \case
|
||||
SOTableObj tn tableObj -> pure $ MetadataModifier $
|
||||
metaTables.ix tn %~ case tableObj of
|
||||
SOSourceObj source (SOITableObj tn tableObj) -> pure $ MetadataModifier $
|
||||
tableMetadataSetter source tn %~ case tableObj of
|
||||
TOPerm rn pt -> dropPermissionInMetadata rn pt
|
||||
TORel rn -> dropRelationshipInMetadata rn
|
||||
TOTrigger trn -> dropEventTriggerInMetadata trn
|
||||
TOComputedField ccn -> dropComputedFieldInMetadata ccn
|
||||
TORemoteRel rrn -> dropRemoteRelationshipInMetadata rrn
|
||||
_ -> id
|
||||
SOFunction qf -> pure $ dropFunctionInMetadata qf
|
||||
SOSourceObj source (SOIFunction qf) -> pure $ dropFunctionInMetadata source qf
|
||||
schemaObjId ->
|
||||
throw500 $ "unexpected dependent object: " <> reportSchemaObj schemaObjId
|
||||
|
||||
|
@ -31,7 +31,7 @@ import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import Data.List.Extended (duplicates)
|
||||
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Backends.Postgres.SQL.Types hiding (TableName)
|
||||
import Hasura.RQL.DDL.Schema.Common
|
||||
import Hasura.RQL.Types hiding (ConstraintName, fmFunction,
|
||||
tmComputedFields, tmTable)
|
||||
@ -174,21 +174,21 @@ getTableDiff oldtm newtm =
|
||||
|
||||
getTableChangeDeps
|
||||
:: (QErrM m, CacheRM m)
|
||||
=> QualifiedTable -> TableDiff 'Postgres -> m [SchemaObjId]
|
||||
getTableChangeDeps tn tableDiff = do
|
||||
=> SourceName -> QualifiedTable -> TableDiff 'Postgres -> m [SchemaObjId]
|
||||
getTableChangeDeps source tn tableDiff = do
|
||||
sc <- askSchemaCache
|
||||
-- for all the dropped columns
|
||||
droppedColDeps <- fmap concat $ forM droppedCols $ \droppedCol -> do
|
||||
let objId = SOTableObj tn $ TOCol droppedCol
|
||||
let objId = SOSourceObj source $ SOITableObj tn $ TOCol droppedCol
|
||||
return $ getDependentObjs sc objId
|
||||
-- for all dropped constraints
|
||||
droppedConsDeps <- fmap concat $ forM droppedFKeyConstraints $ \droppedCons -> do
|
||||
let objId = SOTableObj tn $ TOForeignKey droppedCons
|
||||
let objId = SOSourceObj source $ SOITableObj tn $ TOForeignKey droppedCons
|
||||
return $ getDependentObjs sc objId
|
||||
return $ droppedConsDeps <> droppedColDeps <> droppedComputedFieldDeps
|
||||
where
|
||||
TableDiff _ droppedCols _ _ droppedFKeyConstraints computedFieldDiff _ _ = tableDiff
|
||||
droppedComputedFieldDeps = map (SOTableObj tn . TOComputedField) $ _cfdDropped computedFieldDiff
|
||||
droppedComputedFieldDeps = map (SOSourceObj source . SOITableObj tn . TOComputedField) $ _cfdDropped computedFieldDiff
|
||||
|
||||
data SchemaDiff (b :: BackendType)
|
||||
= SchemaDiff
|
||||
@ -207,21 +207,22 @@ getSchemaDiff oldMeta newMeta =
|
||||
|
||||
getSchemaChangeDeps
|
||||
:: (QErrM m, CacheRM m)
|
||||
=> SchemaDiff 'Postgres -> m [SchemaObjId]
|
||||
getSchemaChangeDeps schemaDiff = do
|
||||
=> SourceName -> SchemaDiff 'Postgres -> m [SchemaObjId]
|
||||
getSchemaChangeDeps source schemaDiff = do
|
||||
-- Get schema cache
|
||||
sc <- askSchemaCache
|
||||
let tableIds = map SOTable droppedTables
|
||||
let tableIds = map (SOSourceObj source . SOITable) droppedTables
|
||||
-- Get the dependent of the dropped tables
|
||||
let tableDropDeps = concatMap (getDependentObjs sc) tableIds
|
||||
tableModDeps <- concat <$> traverse (uncurry getTableChangeDeps) alteredTables
|
||||
tableModDeps <- concat <$> traverse (uncurry (getTableChangeDeps source)) alteredTables
|
||||
return $ filter (not . isDirectDep) $
|
||||
HS.toList $ HS.fromList $ tableDropDeps <> tableModDeps
|
||||
where
|
||||
SchemaDiff droppedTables alteredTables = schemaDiff
|
||||
|
||||
isDirectDep (SOTableObj tn _) = tn `HS.member` HS.fromList droppedTables
|
||||
isDirectDep _ = False
|
||||
isDirectDep (SOSourceObj s (SOITableObj tn _)) =
|
||||
s == source && tn `HS.member` HS.fromList droppedTables
|
||||
isDirectDep _ = False
|
||||
|
||||
data FunctionDiff
|
||||
= FunctionDiff
|
||||
|
@ -12,6 +12,7 @@ module Hasura.RQL.DDL.Schema.Enum (
|
||||
-- * Loading table info
|
||||
, resolveEnumReferences
|
||||
, fetchAndValidateEnumValues
|
||||
, fetchEnumValuesFromDb
|
||||
) where
|
||||
|
||||
import Hasura.Prelude
|
||||
@ -23,20 +24,23 @@ import qualified Data.Sequence.NonEmpty as NESeq
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Control.Monad.Validate
|
||||
import Data.List (delete)
|
||||
import Data.Text.Extended
|
||||
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as S (Extractor(..), SQLExp(SENull), mkExtr, mkSelect, selFrom, mkSimpleFromExp, selExtr)
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as S (Extractor (..), SQLExp (SENull), mkExtr,
|
||||
mkSelect, mkSimpleFromExp, selExtr,
|
||||
selFrom)
|
||||
|
||||
import Hasura.Backends.Postgres.Connection (MonadTx(..), defaultTxErrorHandler)
|
||||
import Hasura.Backends.Postgres.SQL.Types (PGScalarType(PGText))
|
||||
import Hasura.Backends.Postgres.Connection
|
||||
import Hasura.Backends.Postgres.SQL.Types hiding (TableName)
|
||||
import Hasura.RQL.Types.Column
|
||||
import Hasura.RQL.Types.Common
|
||||
import Hasura.RQL.Types.Error
|
||||
import Hasura.Server.Utils (makeReasonMessage)
|
||||
import Hasura.SQL.Backend
|
||||
import Hasura.SQL.Types
|
||||
import Hasura.Server.Utils (makeReasonMessage)
|
||||
|
||||
|
||||
-- | Given a map of enum tables, computes all enum references implied by the given set of foreign
|
||||
@ -62,7 +66,8 @@ resolveEnumReferences enumTables =
|
||||
pure (localColumn, EnumReference (_fkForeignTable foreignKey) enumValues)
|
||||
|
||||
data EnumTableIntegrityError (b :: BackendType)
|
||||
= EnumTableMissingPrimaryKey
|
||||
= EnumTablePostgresError !Text
|
||||
| EnumTableMissingPrimaryKey
|
||||
| EnumTableMultiColumnPrimaryKey ![Column b]
|
||||
| EnumTableNonTextualPrimaryKey !(RawColumnInfo b)
|
||||
| EnumTableNoEnumValues
|
||||
@ -71,19 +76,30 @@ data EnumTableIntegrityError (b :: BackendType)
|
||||
| EnumTableTooManyColumns ![Column b]
|
||||
|
||||
fetchAndValidateEnumValues
|
||||
:: (MonadTx m)
|
||||
=> TableName 'Postgres
|
||||
:: (MonadIO m, MonadBaseControl IO m)
|
||||
=> SourceConfig 'Postgres
|
||||
-> TableName 'Postgres
|
||||
-> Maybe (PrimaryKey (RawColumnInfo 'Postgres))
|
||||
-> [RawColumnInfo 'Postgres]
|
||||
-> m EnumValues
|
||||
fetchAndValidateEnumValues tableName maybePrimaryKey columnInfos =
|
||||
-> m (Either QErr EnumValues)
|
||||
fetchAndValidateEnumValues pgSourceConfig tableName maybePrimaryKey columnInfos = runExceptT $
|
||||
either (throw400 ConstraintViolation . showErrors) pure =<< runValidateT fetchAndValidate
|
||||
where
|
||||
fetchAndValidate :: (MonadTx m, MonadValidate [EnumTableIntegrityError 'Postgres] m) => m EnumValues
|
||||
fetchAndValidate
|
||||
:: (MonadIO m, MonadBaseControl IO m, MonadValidate [EnumTableIntegrityError 'Postgres] m)
|
||||
=> m EnumValues
|
||||
fetchAndValidate = do
|
||||
primaryKeyColumn <- tolerate validatePrimaryKey
|
||||
maybeCommentColumn <- validateColumns primaryKeyColumn
|
||||
maybe (refute mempty) (fetchEnumValues maybeCommentColumn) primaryKeyColumn
|
||||
maybePrimaryKeyColumn <- tolerate validatePrimaryKey
|
||||
maybeCommentColumn <- validateColumns maybePrimaryKeyColumn
|
||||
case maybePrimaryKeyColumn of
|
||||
Nothing -> refute mempty
|
||||
Just primaryKeyColumn -> do
|
||||
result <- runPgSourceReadTx pgSourceConfig $ runValidateT $
|
||||
fetchEnumValuesFromDb tableName primaryKeyColumn maybeCommentColumn
|
||||
case result of
|
||||
Left e -> (refute . pure . EnumTablePostgresError . qeError) e
|
||||
Right (Left vErrs) -> refute vErrs
|
||||
Right (Right r) -> pure r
|
||||
where
|
||||
validatePrimaryKey = case maybePrimaryKey of
|
||||
Nothing -> refute [EnumTableMissingPrimaryKey]
|
||||
@ -102,31 +118,6 @@ fetchAndValidateEnumValues tableName maybePrimaryKey columnInfos =
|
||||
_ -> dispute [EnumTableNonTextualCommentColumn column] $> Nothing
|
||||
columns -> dispute [EnumTableTooManyColumns $ map prciName columns] $> Nothing
|
||||
|
||||
fetchEnumValues maybeCommentColumn primaryKeyColumn = do
|
||||
let nullExtr = S.Extractor S.SENull Nothing
|
||||
commentExtr = maybe nullExtr (S.mkExtr . prciName) maybeCommentColumn
|
||||
-- FIXME: postgres-specific sql generation
|
||||
query = Q.fromBuilder $ toSQL S.mkSelect
|
||||
{ S.selFrom = Just $ S.mkSimpleFromExp tableName
|
||||
, S.selExtr = [S.mkExtr (prciName primaryKeyColumn), commentExtr] }
|
||||
rawEnumValues <- liftTx $ Q.withQE defaultTxErrorHandler query () True
|
||||
when (null rawEnumValues) $ dispute [EnumTableNoEnumValues]
|
||||
let enumValues = flip map rawEnumValues $
|
||||
\(enumValueText, comment) ->
|
||||
case mkValidEnumValueName enumValueText of
|
||||
Nothing -> Left enumValueText
|
||||
Just enumValue -> Right (EnumValue enumValue, EnumValueInfo comment)
|
||||
badNames = lefts enumValues
|
||||
validEnums = rights enumValues
|
||||
case NE.nonEmpty badNames of
|
||||
Just someBadNames -> refute [EnumTableInvalidEnumValueNames someBadNames]
|
||||
Nothing -> pure $ M.fromList validEnums
|
||||
|
||||
-- https://graphql.github.io/graphql-spec/June2018/#EnumValue
|
||||
mkValidEnumValueName name =
|
||||
if name `elem` ["true", "false", "null"] then Nothing
|
||||
else G.mkName name
|
||||
|
||||
showErrors :: [EnumTableIntegrityError 'Postgres] -> Text
|
||||
showErrors allErrors =
|
||||
"the table " <> tableName <<> " cannot be used as an enum " <> reasonsMessage
|
||||
@ -135,6 +126,7 @@ fetchAndValidateEnumValues tableName maybePrimaryKey columnInfos =
|
||||
|
||||
showOne :: EnumTableIntegrityError 'Postgres -> Text
|
||||
showOne = \case
|
||||
EnumTablePostgresError err -> "postgres error: " <> err
|
||||
EnumTableMissingPrimaryKey -> "the table must have a primary key"
|
||||
EnumTableMultiColumnPrimaryKey cols ->
|
||||
"the table’s primary key must not span multiple columns ("
|
||||
@ -159,3 +151,33 @@ fetchAndValidateEnumValues tableName maybePrimaryKey columnInfos =
|
||||
typeMismatch description colInfo expected =
|
||||
"the table’s " <> description <> " (" <> prciName colInfo <<> ") must have type "
|
||||
<> expected <<> ", not type " <>> prciType colInfo
|
||||
|
||||
fetchEnumValuesFromDb
|
||||
:: (MonadTx m, MonadValidate [EnumTableIntegrityError 'Postgres] m)
|
||||
=> TableName 'Postgres
|
||||
-> RawColumnInfo 'Postgres
|
||||
-> Maybe (RawColumnInfo 'Postgres)
|
||||
-> m EnumValues
|
||||
fetchEnumValuesFromDb tableName primaryKeyColumn maybeCommentColumn = do
|
||||
let nullExtr = S.Extractor S.SENull Nothing
|
||||
commentExtr = maybe nullExtr (S.mkExtr . prciName) maybeCommentColumn
|
||||
query = Q.fromBuilder $ toSQL S.mkSelect
|
||||
{ S.selFrom = Just $ S.mkSimpleFromExp tableName
|
||||
, S.selExtr = [S.mkExtr (prciName primaryKeyColumn), commentExtr] }
|
||||
rawEnumValues <- liftTx $ Q.withQE defaultTxErrorHandler query () True
|
||||
when (null rawEnumValues) $ dispute [EnumTableNoEnumValues]
|
||||
let enumValues = flip map rawEnumValues $
|
||||
\(enumValueText, comment) ->
|
||||
case mkValidEnumValueName enumValueText of
|
||||
Nothing -> Left enumValueText
|
||||
Just enumValue -> Right (EnumValue enumValue, EnumValueInfo comment)
|
||||
badNames = lefts enumValues
|
||||
validEnums = rights enumValues
|
||||
case NE.nonEmpty badNames of
|
||||
Just someBadNames -> refute [EnumTableInvalidEnumValueNames someBadNames]
|
||||
Nothing -> pure $ M.fromList validEnums
|
||||
where
|
||||
-- https://graphql.github.io/graphql-spec/June2018/#EnumValue
|
||||
mkValidEnumValueName name =
|
||||
if name `elem` ["true", "false", "null"] then Nothing
|
||||
else G.mkName name
|
||||
|
@ -7,7 +7,6 @@ module Hasura.RQL.DDL.Schema.Function where
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Control.Monad.Validate as MV
|
||||
import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.Sequence as Seq
|
||||
import qualified Data.Text as T
|
||||
@ -15,6 +14,8 @@ import qualified Database.PG.Query as Q
|
||||
|
||||
import Control.Lens hiding ((.=))
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import Data.Text.Extended
|
||||
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
@ -65,12 +66,13 @@ data FunctionIntegrityError
|
||||
|
||||
mkFunctionInfo
|
||||
:: (QErrM m)
|
||||
=> QualifiedFunction
|
||||
=> SourceName
|
||||
-> QualifiedFunction
|
||||
-> SystemDefined
|
||||
-> FunctionConfig
|
||||
-> RawFunctionInfo
|
||||
-> m (FunctionInfo, SchemaDependency)
|
||||
mkFunctionInfo qf systemDefined FunctionConfig{..} rawFuncInfo =
|
||||
mkFunctionInfo source qf systemDefined FunctionConfig{..} rawFuncInfo =
|
||||
either (throw400 NotSupported . showErrors) pure
|
||||
=<< MV.runValidateT validateFunction
|
||||
where
|
||||
@ -114,7 +116,7 @@ mkFunctionInfo qf systemDefined FunctionConfig{..} rawFuncInfo =
|
||||
let retTable = typeToTable returnType
|
||||
|
||||
pure ( FunctionInfo qf systemDefined funVol exposeAs inputArguments retTable descM
|
||||
, SchemaDependency (SOTable retTable) DRTable
|
||||
, SchemaDependency (SOSourceObj source $ SOITable retTable) DRTable
|
||||
)
|
||||
|
||||
validateFunctionArgNames = do
|
||||
@ -166,22 +168,23 @@ newtype TrackFunction
|
||||
-- Validate function tracking operation. Fails if function is already being
|
||||
-- tracked, or if a table with the same name is being tracked.
|
||||
trackFunctionP1
|
||||
:: (CacheRM m, QErrM m) => QualifiedFunction -> m ()
|
||||
trackFunctionP1 qf = do
|
||||
:: (CacheRM m, QErrM m) => SourceName -> QualifiedFunction -> m ()
|
||||
trackFunctionP1 sourceName qf = do
|
||||
rawSchemaCache <- askSchemaCache
|
||||
when (M.member qf $ scFunctions rawSchemaCache) $
|
||||
when (isJust $ getPGFunctionInfo sourceName qf $ scPostgres rawSchemaCache) $
|
||||
throw400 AlreadyTracked $ "function already tracked : " <>> qf
|
||||
let qt = fmap (TableName . getFunctionTxt) qf
|
||||
when (M.member qt $ scTables rawSchemaCache) $
|
||||
when (isJust $ getPGTableInfo sourceName qt $ scPostgres rawSchemaCache) $
|
||||
throw400 NotSupported $ "table with name " <> qf <<> " already exists"
|
||||
|
||||
trackFunctionP2
|
||||
:: (MonadError QErr m, CacheRWM m, MetadataM m)
|
||||
=> QualifiedFunction -> FunctionConfig -> m EncJSON
|
||||
trackFunctionP2 qf config = do
|
||||
buildSchemaCacheFor (MOFunction qf)
|
||||
=> SourceName -> QualifiedFunction -> FunctionConfig -> m EncJSON
|
||||
trackFunctionP2 sourceName qf config = do
|
||||
buildSchemaCacheFor (MOSourceObjId sourceName $ SMOFunction qf)
|
||||
$ MetadataModifier
|
||||
$ metaFunctions %~ OMap.insert qf (FunctionMetadata qf config)
|
||||
$ metaSources.ix sourceName.smFunctions
|
||||
%~ OMap.insert qf (FunctionMetadata qf config)
|
||||
pure successMsg
|
||||
|
||||
handleMultipleFunctions :: (QErrM m) => QualifiedFunction -> [a] -> m a
|
||||
@ -206,38 +209,49 @@ fetchRawFunctionInfo qf@(QualifiedObject sn fn) =
|
||||
|] (sn, fn) True
|
||||
|
||||
runTrackFunc
|
||||
:: (MonadTx m, CacheRWM m, MetadataM m)
|
||||
:: (MonadError QErr m, CacheRWM m, MetadataM m)
|
||||
=> TrackFunction -> m EncJSON
|
||||
runTrackFunc (TrackFunction qf)= do
|
||||
trackFunctionP1 qf
|
||||
trackFunctionP2 qf emptyFunctionConfig
|
||||
-- v1 track_function lacks a means to take extra arguments
|
||||
trackFunctionP1 defaultSource qf
|
||||
trackFunctionP2 defaultSource qf emptyFunctionConfig
|
||||
|
||||
runTrackFunctionV2
|
||||
:: (QErrM m, CacheRWM m, MetadataM m)
|
||||
=> TrackFunctionV2 -> m EncJSON
|
||||
runTrackFunctionV2 (TrackFunctionV2 qf config) = do
|
||||
trackFunctionP1 qf
|
||||
trackFunctionP2 qf config
|
||||
runTrackFunctionV2 (TrackFunctionV2 source qf config) = do
|
||||
trackFunctionP1 source qf
|
||||
trackFunctionP2 source qf config
|
||||
|
||||
-- | JSON API payload for 'untrack_function':
|
||||
--
|
||||
-- https://hasura.io/docs/1.0/graphql/core/api-reference/schema-metadata-api/custom-functions.html#untrack-function
|
||||
newtype UnTrackFunction
|
||||
data UnTrackFunction
|
||||
= UnTrackFunction
|
||||
{ utfName :: QualifiedFunction }
|
||||
deriving (Show, Eq, FromJSON, ToJSON)
|
||||
{ _utfFunction :: !QualifiedFunction
|
||||
, _utfSource :: !SourceName
|
||||
} deriving (Show, Eq)
|
||||
$(deriveToJSON (aesonDrop 4 snakeCase) ''UnTrackFunction)
|
||||
|
||||
instance FromJSON UnTrackFunction where
|
||||
parseJSON v = withSource <|> withoutSource
|
||||
where
|
||||
withoutSource = UnTrackFunction <$> parseJSON v <*> pure defaultSource
|
||||
withSource = flip (withObject "Object") v \o ->
|
||||
UnTrackFunction <$> o .: "table"
|
||||
<*> o .:? "source" .!= defaultSource
|
||||
|
||||
runUntrackFunc
|
||||
:: (CacheRWM m, MonadError QErr m, MetadataM m)
|
||||
=> UnTrackFunction -> m EncJSON
|
||||
runUntrackFunc (UnTrackFunction qf) = do
|
||||
void $ askFunctionInfo qf
|
||||
runUntrackFunc (UnTrackFunction qf source) = do
|
||||
void $ askFunctionInfo source qf
|
||||
-- Delete function from metadata
|
||||
withNewInconsistentObjsCheck
|
||||
$ buildSchemaCache
|
||||
$ dropFunctionInMetadata qf
|
||||
$ dropFunctionInMetadata defaultSource qf
|
||||
pure successMsg
|
||||
|
||||
dropFunctionInMetadata :: QualifiedFunction -> MetadataModifier
|
||||
dropFunctionInMetadata function = MetadataModifier $
|
||||
metaFunctions %~ OMap.delete function
|
||||
dropFunctionInMetadata :: SourceName -> QualifiedFunction -> MetadataModifier
|
||||
dropFunctionInMetadata source function = MetadataModifier $
|
||||
metaSources.ix source.smFunctions %~ OMap.delete function
|
||||
|
@ -24,8 +24,9 @@ import Hasura.RQL.DDL.ComputedField
|
||||
import Hasura.RQL.DDL.Permission
|
||||
import Hasura.RQL.Types
|
||||
|
||||
saveMetadataToHdbTables :: (MonadTx m, HasSystemDefined m) => Metadata -> m ()
|
||||
saveMetadataToHdbTables (Metadata tables functions schemas collections
|
||||
saveMetadataToHdbTables
|
||||
:: (MonadTx m, HasSystemDefined m) => MetadataNoSources -> m ()
|
||||
saveMetadataToHdbTables (MetadataNoSources tables functions schemas collections
|
||||
allowlist customTypes actions cronTriggers) = do
|
||||
|
||||
withPathK "tables" $ do
|
||||
@ -46,7 +47,7 @@ saveMetadataToHdbTables (Metadata tables functions schemas collections
|
||||
indexedForM_ _tmComputedFields $
|
||||
\(ComputedFieldMetadata name definition comment) ->
|
||||
addComputedFieldToCatalog $
|
||||
AddComputedField _tmTable name definition comment
|
||||
AddComputedField defaultSource _tmTable name definition comment
|
||||
|
||||
-- Remote Relationships
|
||||
withPathK "remote_relationships" $
|
||||
@ -54,7 +55,7 @@ saveMetadataToHdbTables (Metadata tables functions schemas collections
|
||||
\(RemoteRelationshipMetadata name def) -> do
|
||||
let RemoteRelationshipDef rs hf rf = def
|
||||
addRemoteRelationshipToCatalog $
|
||||
RemoteRelationship name _tmTable hf rs rf
|
||||
RemoteRelationship name defaultSource _tmTable hf rs rf
|
||||
|
||||
-- Permissions
|
||||
withPathK "insert_permissions" $ processPerms _tmTable _tmInsertPermissions
|
||||
@ -167,7 +168,7 @@ addComputedFieldToCatalog q =
|
||||
|] (schemaName, tableName, computedField, Q.AltJ definition, comment) True
|
||||
where
|
||||
QualifiedObject schemaName tableName = table
|
||||
AddComputedField table computedField definition comment = q
|
||||
AddComputedField _ table computedField definition comment = q
|
||||
|
||||
addRemoteRelationshipToCatalog :: MonadTx m => RemoteRelationship -> m ()
|
||||
addRemoteRelationshipToCatalog remoteRelationship = liftTx $
|
||||
@ -278,7 +279,7 @@ addCronTriggerToCatalog CronTriggerMetadata {..} = liftTx $ do
|
||||
let scheduleTimes = generateScheduleTimes currentTime 100 ctSchedule -- generate next 100 events
|
||||
insertScheduledEventTx $ SESCron $ map (CronEventSeed ctName) scheduleTimes
|
||||
|
||||
fetchMetadataFromHdbTables :: MonadTx m => m Metadata
|
||||
fetchMetadataFromHdbTables :: MonadTx m => m MetadataNoSources
|
||||
fetchMetadataFromHdbTables = liftTx do
|
||||
tables <- Q.catchE defaultTxErrorHandler fetchTables
|
||||
let tableMetaMap = OMap.fromList . flip map tables $
|
||||
@ -340,7 +341,7 @@ fetchMetadataFromHdbTables = liftTx do
|
||||
-- fetch actions
|
||||
actions <- oMapFromL _amName <$> fetchActions
|
||||
|
||||
Metadata fullTableMetaMap functions remoteSchemas collections
|
||||
MetadataNoSources fullTableMetaMap functions remoteSchemas collections
|
||||
allowlist customTypes actions <$> fetchCronTriggers
|
||||
|
||||
where
|
||||
|
@ -55,25 +55,25 @@ renameTableInMetadata
|
||||
, CacheRM m
|
||||
, MonadWriter MetadataModifier m
|
||||
)
|
||||
=> QualifiedTable -> QualifiedTable -> m ()
|
||||
renameTableInMetadata newQT oldQT = do
|
||||
=> SourceName -> QualifiedTable -> QualifiedTable -> m ()
|
||||
renameTableInMetadata source newQT oldQT = do
|
||||
sc <- askSchemaCache
|
||||
let allDeps = getDependentObjs sc $ SOTable oldQT
|
||||
let allDeps = getDependentObjs sc $ SOSourceObj source $ SOITable oldQT
|
||||
|
||||
-- update all dependant schema objects
|
||||
forM_ allDeps $ \case
|
||||
(SOTableObj refQT (TORel rn)) ->
|
||||
updateRelDefs refQT rn (oldQT, newQT)
|
||||
(SOTableObj refQT (TOPerm rn pt)) ->
|
||||
updatePermFlds refQT rn pt $ RTable (oldQT, newQT)
|
||||
(SOSourceObj _ (SOITableObj refQT (TORel rn))) ->
|
||||
updateRelDefs source refQT rn (oldQT, newQT)
|
||||
(SOSourceObj _ (SOITableObj refQT (TOPerm rn pt))) ->
|
||||
updatePermFlds source refQT rn pt $ RTable (oldQT, newQT)
|
||||
-- A trigger's definition is not dependent on the table directly
|
||||
(SOTableObj _ (TOTrigger _)) -> pure ()
|
||||
(SOSourceObj _ (SOITableObj _ (TOTrigger _))) -> pure ()
|
||||
-- A remote relationship's definition is not dependent on the table directly
|
||||
(SOTableObj _ (TORemoteRel _)) -> pure ()
|
||||
(SOSourceObj _ (SOITableObj _ (TORemoteRel _))) -> pure ()
|
||||
|
||||
d -> otherDeps errMsg d
|
||||
-- Update table name in metadata
|
||||
tell $ MetadataModifier $ metaTables %~ \tables ->
|
||||
tell $ MetadataModifier $ metaSources.ix source.smTables %~ \tables ->
|
||||
flip (maybe tables) (OMap.lookup oldQT tables) $
|
||||
\tableMeta -> OMap.delete oldQT $ OMap.insert newQT tableMeta{_tmTable = newQT} tables
|
||||
where
|
||||
@ -84,27 +84,28 @@ renameColumnInMetadata
|
||||
, CacheRM m
|
||||
, MonadWriter MetadataModifier m
|
||||
)
|
||||
=> PGCol -> PGCol -> QualifiedTable -> FieldInfoMap (FieldInfo 'Postgres) -> m ()
|
||||
renameColumnInMetadata oCol nCol qt fieldInfo = do
|
||||
=> PGCol -> PGCol -> SourceName -> QualifiedTable -> FieldInfoMap (FieldInfo 'Postgres) -> m ()
|
||||
renameColumnInMetadata oCol nCol source qt fieldInfo = do
|
||||
sc <- askSchemaCache
|
||||
-- Check if any relation exists with new column name
|
||||
assertFldNotExists
|
||||
-- Fetch dependent objects
|
||||
let depObjs = getDependentObjs sc $ SOTableObj qt $ TOCol oCol
|
||||
let depObjs = getDependentObjs sc $ SOSourceObj source $
|
||||
SOITableObj qt $ TOCol oCol
|
||||
renameFld = RFCol $ RenameItem qt oCol nCol
|
||||
-- Update dependent objects
|
||||
forM_ depObjs $ \case
|
||||
(SOTableObj refQT (TOPerm role pt)) ->
|
||||
updatePermFlds refQT role pt $ RField renameFld
|
||||
(SOTableObj refQT (TORel rn)) ->
|
||||
updateColInRel refQT rn $ RenameItem qt oCol nCol
|
||||
(SOTableObj refQT (TOTrigger triggerName)) ->
|
||||
updateColInEventTriggerDef refQT triggerName $ RenameItem qt oCol nCol
|
||||
(SOTableObj _ (TORemoteRel remoteRelName)) ->
|
||||
updateColInRemoteRelationship remoteRelName $ RenameItem qt oCol nCol
|
||||
(SOSourceObj _ (SOITableObj refQT (TOPerm role pt))) ->
|
||||
updatePermFlds source refQT role pt $ RField renameFld
|
||||
(SOSourceObj _ (SOITableObj refQT (TORel rn))) ->
|
||||
updateColInRel source refQT rn $ RenameItem qt oCol nCol
|
||||
(SOSourceObj _ (SOITableObj refQT (TOTrigger triggerName))) ->
|
||||
updateColInEventTriggerDef source refQT triggerName $ RenameItem qt oCol nCol
|
||||
(SOSourceObj _ (SOITableObj _ (TORemoteRel remoteRelName))) ->
|
||||
updateColInRemoteRelationship source remoteRelName $ RenameItem qt oCol nCol
|
||||
d -> otherDeps errMsg d
|
||||
-- Update custom column names
|
||||
possiblyUpdateCustomColumnNames qt oCol nCol
|
||||
possiblyUpdateCustomColumnNames source qt oCol nCol
|
||||
where
|
||||
errMsg = "cannot rename column " <> oCol <<> " to " <>> nCol
|
||||
assertFldNotExists =
|
||||
@ -120,17 +121,18 @@ renameRelationshipInMetadata
|
||||
, CacheRM m
|
||||
, MonadWriter MetadataModifier m
|
||||
)
|
||||
=> QualifiedTable -> RelName -> RelType -> RelName -> m ()
|
||||
renameRelationshipInMetadata qt oldRN relType newRN = do
|
||||
=> SourceName -> QualifiedTable -> RelName -> RelType -> RelName -> m ()
|
||||
renameRelationshipInMetadata source qt oldRN relType newRN = do
|
||||
sc <- askSchemaCache
|
||||
let depObjs = getDependentObjs sc $ SOTableObj qt $ TORel oldRN
|
||||
let depObjs = getDependentObjs sc $ SOSourceObj source $
|
||||
SOITableObj qt $ TORel oldRN
|
||||
renameFld = RFRel $ RenameItem qt oldRN newRN
|
||||
|
||||
forM_ depObjs $ \case
|
||||
(SOTableObj refQT (TOPerm role pt)) ->
|
||||
updatePermFlds refQT role pt $ RField renameFld
|
||||
(SOSourceObj _ (SOITableObj refQT (TOPerm role pt))) ->
|
||||
updatePermFlds source refQT role pt $ RField renameFld
|
||||
d -> otherDeps errMsg d
|
||||
tell $ MetadataModifier $ metaTables.ix qt %~ case relType of
|
||||
tell $ MetadataModifier $ tableMetadataSetter source qt %~ case relType of
|
||||
ObjRel -> tmObjectRelationships %~ rewriteRelationships
|
||||
ArrRel -> tmArrayRelationships %~ rewriteRelationships
|
||||
where
|
||||
@ -147,11 +149,11 @@ updateRelDefs
|
||||
, CacheRM m
|
||||
, MonadWriter MetadataModifier m
|
||||
)
|
||||
=> QualifiedTable -> RelName -> RenameTable -> m ()
|
||||
updateRelDefs qt rn renameTable = do
|
||||
fim <- askFieldInfoMap qt
|
||||
=> SourceName -> QualifiedTable -> RelName -> RenameTable -> m ()
|
||||
updateRelDefs source qt rn renameTable = do
|
||||
fim <- askFieldInfoMap source qt
|
||||
ri <- askRelType fim rn ""
|
||||
tell $ MetadataModifier $ metaTables.ix qt %~ case riType ri of
|
||||
tell $ MetadataModifier $ tableMetadataSetter source qt %~ case riType ri of
|
||||
ObjRel -> tmObjectRelationships.ix rn %~ updateObjRelDef renameTable
|
||||
ArrRel -> tmArrayRelationships.ix rn %~ updateArrRelDef renameTable
|
||||
where
|
||||
@ -181,13 +183,13 @@ updatePermFlds
|
||||
, CacheRM m
|
||||
, MonadWriter MetadataModifier m
|
||||
)
|
||||
=> QualifiedTable -> RoleName -> PermType -> Rename -> m ()
|
||||
updatePermFlds refQT rn pt rename = do
|
||||
tables <- scTables <$> askSchemaCache
|
||||
=> SourceName -> QualifiedTable -> RoleName -> PermType -> Rename -> m ()
|
||||
updatePermFlds source refQT rn pt rename = do
|
||||
tables <- getSourceTables source
|
||||
let withTables :: Reader (TableCache 'Postgres) a -> a
|
||||
withTables = flip runReader tables
|
||||
tell $ MetadataModifier $
|
||||
metaTables.ix refQT %~ case pt of
|
||||
tableMetadataSetter source refQT %~ case pt of
|
||||
PTInsert ->
|
||||
tmInsertPermissions.ix rn.pdPermission %~ \insPerm ->
|
||||
withTables $ updateInsPermFlds refQT rename insPerm
|
||||
@ -338,13 +340,13 @@ updateColExp qt rf (ColExp fld val) =
|
||||
-- rename columns in relationship definitions
|
||||
updateColInRel
|
||||
:: (CacheRM m, MonadWriter MetadataModifier m)
|
||||
=> QualifiedTable -> RelName -> RenameCol -> m ()
|
||||
updateColInRel fromQT rn rnCol = do
|
||||
tables <- scTables <$> askSchemaCache
|
||||
=> SourceName -> QualifiedTable -> RelName -> RenameCol -> m ()
|
||||
updateColInRel source fromQT rn rnCol = do
|
||||
tables <- getSourceTables source
|
||||
let maybeRelInfo =
|
||||
tables ^? ix fromQT.tiCoreInfo.tciFieldInfoMap.ix (fromRel rn)._FIRelationship
|
||||
forM_ maybeRelInfo $ \relInfo ->
|
||||
tell $ MetadataModifier $ metaTables.ix fromQT %~
|
||||
tell $ MetadataModifier $ tableMetadataSetter source fromQT %~
|
||||
case riType relInfo of
|
||||
ObjRel -> tmObjectRelationships.ix rn.rdUsing %~
|
||||
updateColInObjRel fromQT (riRTable relInfo) rnCol
|
||||
@ -355,12 +357,12 @@ updateColInRemoteRelationship
|
||||
:: ( MonadError QErr m
|
||||
, MonadWriter MetadataModifier m
|
||||
)
|
||||
=> RemoteRelationshipName -> RenameCol -> m ()
|
||||
updateColInRemoteRelationship remoteRelationshipName renameCol = do
|
||||
=> SourceName -> RemoteRelationshipName -> RenameCol -> m ()
|
||||
updateColInRemoteRelationship source remoteRelationshipName renameCol = do
|
||||
oldColName <- parseGraphQLName $ getPGColTxt oldCol
|
||||
newColName <- parseGraphQLName $ getPGColTxt newCol
|
||||
tell $ MetadataModifier $
|
||||
metaTables.ix qt.tmRemoteRelationships.ix remoteRelationshipName.rrmDefinition %~
|
||||
tableMetadataSetter source qt.tmRemoteRelationships.ix remoteRelationshipName.rrmDefinition %~
|
||||
(rrdHasuraFields %~ modifyHasuraFields) .
|
||||
(rrdRemoteField %~ modifyFieldCalls oldColName newColName)
|
||||
where
|
||||
@ -392,10 +394,10 @@ updateColInRemoteRelationship remoteRelationshipName renameCol = do
|
||||
-- rename columns in relationship definitions
|
||||
updateColInEventTriggerDef
|
||||
:: (MonadWriter MetadataModifier m)
|
||||
=> QualifiedTable -> TriggerName -> RenameCol -> m ()
|
||||
updateColInEventTriggerDef table trigName rnCol =
|
||||
=> SourceName -> QualifiedTable -> TriggerName -> RenameCol -> m ()
|
||||
updateColInEventTriggerDef source table trigName rnCol =
|
||||
tell $ MetadataModifier $
|
||||
metaTables.ix table.tmEventTriggers.ix trigName %~ rewriteEventTriggerConf
|
||||
tableMetadataSetter source table.tmEventTriggers.ix trigName %~ rewriteEventTriggerConf
|
||||
where
|
||||
rewriteSubsCols = \case
|
||||
SubCStar -> SubCStar
|
||||
@ -460,10 +462,14 @@ updateColMap fromQT toQT rnCol =
|
||||
|
||||
possiblyUpdateCustomColumnNames
|
||||
:: MonadWriter MetadataModifier m
|
||||
=> QualifiedTable -> PGCol -> PGCol -> m ()
|
||||
possiblyUpdateCustomColumnNames qt oCol nCol = do
|
||||
=> SourceName -> QualifiedTable -> PGCol -> PGCol -> m ()
|
||||
possiblyUpdateCustomColumnNames source qt oCol nCol = do
|
||||
let updateCustomColumns customColumns =
|
||||
M.fromList $ flip map (M.toList customColumns) $
|
||||
\(dbCol, val) -> (, val) $ if dbCol == oCol then nCol else dbCol
|
||||
tell $ MetadataModifier $
|
||||
metaTables.ix qt.tmConfiguration.tcCustomColumnNames %~ updateCustomColumns
|
||||
tableMetadataSetter source qt.tmConfiguration.tcCustomColumnNames %~ updateCustomColumns
|
||||
|
||||
getSourceTables :: CacheRM m => SourceName -> m (TableCache 'Postgres)
|
||||
getSourceTables source =
|
||||
(maybe mempty _pcTables . M.lookup source . scPostgres) <$> askSchemaCache
|
||||
|
95
server/src-lib/Hasura/RQL/DDL/Schema/Source.hs
Normal file
95
server/src-lib/Hasura/RQL/DDL/Schema/Source.hs
Normal file
@ -0,0 +1,95 @@
|
||||
module Hasura.RQL.DDL.Schema.Source where
|
||||
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Hasura.Backends.Postgres.Connection
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.DDL.Schema.Common
|
||||
import Hasura.RQL.Types
|
||||
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Database.PG.Query as Q
|
||||
|
||||
mkPgSourceResolver :: Q.PGLogger -> SourceResolver
|
||||
mkPgSourceResolver pgLogger config = runExceptT do
|
||||
env <- lift Env.getEnvironment
|
||||
let PostgresSourceConnInfo urlConf connSettings = _scConnectionInfo config
|
||||
PostgresPoolSettings maxConns idleTimeout retries = connSettings
|
||||
urlText <- resolveUrlConf env urlConf
|
||||
let connInfo = Q.ConnInfo retries $ Q.CDDatabaseURI $ txtToBs urlText
|
||||
connParams = Q.defaultConnParams{ Q.cpIdleTime = idleTimeout
|
||||
, Q.cpConns = maxConns
|
||||
}
|
||||
pgPool <- liftIO $ Q.initPGPool connInfo connParams pgLogger
|
||||
let pgExecCtx = mkPGExecCtx Q.ReadCommitted pgPool
|
||||
pure $ PGSourceConfig pgExecCtx connInfo Nothing
|
||||
|
||||
resolveSource
|
||||
:: (MonadIO m, MonadBaseControl IO m, MonadResolveSource m)
|
||||
=> SourceConfiguration -> m (Either QErr ResolvedPGSource)
|
||||
resolveSource config = runExceptT do
|
||||
sourceResolver <- getSourceResolver
|
||||
sourceConfig <- liftEitherM $ liftIO $ sourceResolver config
|
||||
|
||||
(tablesMeta, functionsMeta, pgScalars) <- runLazyTx (_pscExecCtx sourceConfig) Q.ReadWrite $ do
|
||||
initSource
|
||||
tablesMeta <- fetchTableMetadata
|
||||
functionsMeta <- fetchFunctionMetadata
|
||||
pgScalars <- fetchPgScalars
|
||||
pure (tablesMeta, functionsMeta, pgScalars)
|
||||
pure $ ResolvedPGSource sourceConfig tablesMeta functionsMeta pgScalars
|
||||
|
||||
initSource :: MonadTx m => m ()
|
||||
initSource = do
|
||||
hdbCatalogExist <- doesSchemaExist "hdb_catalog"
|
||||
eventLogTableExist <- doesTableExist "hdb_catalog" "event_log"
|
||||
sourceVersionTableExist <- doesTableExist "hdb_catalog" "hdb_source_catalog_version"
|
||||
-- Fresh database
|
||||
if | not hdbCatalogExist -> liftTx do
|
||||
Q.unitQE defaultTxErrorHandler "CREATE SCHEMA hdb_catalog" () False
|
||||
enablePgcryptoExtension
|
||||
initPgSourceCatalog
|
||||
-- Only 'hdb_catalog' schema defined
|
||||
| not sourceVersionTableExist && not eventLogTableExist ->
|
||||
liftTx initPgSourceCatalog
|
||||
-- Source is initialised by pre multisource support servers
|
||||
| not sourceVersionTableExist && eventLogTableExist ->
|
||||
liftTx createVersionTable
|
||||
| otherwise -> migrateSourceCatalog
|
||||
where
|
||||
initPgSourceCatalog = do
|
||||
() <- Q.multiQE defaultTxErrorHandler $(Q.sqlFromFile "src-rsr/init_pg_source.sql")
|
||||
setSourceCatalogVersion
|
||||
|
||||
createVersionTable = do
|
||||
() <- Q.multiQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
CREATE TABLE hdb_catalog.hdb_source_catalog_version(
|
||||
version TEXT NOT NULL,
|
||||
upgraded_on TIMESTAMPTZ NOT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX hdb_source_catalog_version_one_row
|
||||
ON hdb_catalog.hdb_source_catalog_version((version IS NOT NULL));
|
||||
|]
|
||||
setSourceCatalogVersion
|
||||
|
||||
migrateSourceCatalog = do
|
||||
version <- getSourceCatalogVersion
|
||||
case version of
|
||||
"1" -> pure ()
|
||||
_ -> throw500 $ "unexpected source catalog version: " <> version
|
||||
|
||||
currentSourceCatalogVersion :: Text
|
||||
currentSourceCatalogVersion = "1"
|
||||
|
||||
setSourceCatalogVersion :: MonadTx m => m ()
|
||||
setSourceCatalogVersion = liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
INSERT INTO hdb_catalog.hdb_source_catalog_version(version, upgraded_on)
|
||||
VALUES ($1, NOW())
|
||||
ON CONFLICT ((version IS NOT NULL))
|
||||
DO UPDATE SET version = $1, upgraded_on = NOW()
|
||||
|] (Identity currentSourceCatalogVersion) False
|
||||
|
||||
getSourceCatalogVersion :: MonadTx m => m Text
|
||||
getSourceCatalogVersion = liftTx $ runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler
|
||||
[Q.sql| SELECT version FROM hdb_catalog.hdb_source_catalog_version |] () False
|
@ -1,5 +1,5 @@
|
||||
{-# LANGUAGE Arrows #-}
|
||||
{-# LANGUAGE AllowAmbiguousTypes #-}
|
||||
{-# LANGUAGE Arrows #-}
|
||||
|
||||
-- | Description: Create/delete SQL tables to/from Hasura metadata.
|
||||
module Hasura.RQL.DDL.Schema.Table
|
||||
@ -35,6 +35,7 @@ import qualified Language.GraphQL.Draft.Syntax as G
|
||||
|
||||
import Control.Arrow.Extended
|
||||
import Control.Lens.Extended hiding ((.=))
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
@ -42,7 +43,8 @@ import Data.Text.Extended
|
||||
|
||||
import qualified Hasura.Incremental as Inc
|
||||
|
||||
import Hasura.Backends.Postgres.SQL.Types (QualifiedTable, snakeCaseQualifiedObject, FunctionName(..))
|
||||
import Hasura.Backends.Postgres.SQL.Types (FunctionName (..), QualifiedTable,
|
||||
snakeCaseQualifiedObject)
|
||||
import Hasura.EncJSON
|
||||
import Hasura.GraphQL.Context
|
||||
import Hasura.GraphQL.Schema.Common (textToName)
|
||||
@ -58,7 +60,8 @@ import Hasura.Server.Utils
|
||||
|
||||
data TrackTable
|
||||
= TrackTable
|
||||
{ tName :: !QualifiedTable
|
||||
{ tSource :: !SourceName
|
||||
, tName :: !QualifiedTable
|
||||
, tIsEnum :: !Bool
|
||||
} deriving (Show, Eq)
|
||||
|
||||
@ -66,39 +69,60 @@ instance FromJSON TrackTable where
|
||||
parseJSON v = withOptions <|> withoutOptions
|
||||
where
|
||||
withOptions = flip (withObject "TrackTable") v $ \o -> TrackTable
|
||||
<$> o .: "table"
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "table"
|
||||
<*> o .:? "is_enum" .!= False
|
||||
withoutOptions = TrackTable <$> parseJSON v <*> pure False
|
||||
withoutOptions = TrackTable defaultSource <$> parseJSON v <*> pure False
|
||||
|
||||
instance ToJSON TrackTable where
|
||||
toJSON (TrackTable name isEnum)
|
||||
| isEnum = object [ "table" .= name, "is_enum" .= isEnum ]
|
||||
toJSON (TrackTable source name isEnum)
|
||||
| isEnum = object [ "source" .= source, "table" .= name, "is_enum" .= isEnum ]
|
||||
| otherwise = toJSON name
|
||||
|
||||
data SetTableIsEnum
|
||||
= SetTableIsEnum
|
||||
{ stieTable :: !QualifiedTable
|
||||
{ stieSource :: !SourceName
|
||||
, stieTable :: !QualifiedTable
|
||||
, stieIsEnum :: !Bool
|
||||
} deriving (Show, Eq)
|
||||
$(deriveJSON (aesonDrop 4 snakeCase) ''SetTableIsEnum)
|
||||
$(deriveToJSON (aesonDrop 4 snakeCase) ''SetTableIsEnum)
|
||||
|
||||
instance FromJSON SetTableIsEnum where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
SetTableIsEnum
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "table"
|
||||
<*> o .: "is_enum"
|
||||
|
||||
data UntrackTable =
|
||||
UntrackTable
|
||||
{ utTable :: !QualifiedTable
|
||||
, utCascade :: !(Maybe Bool)
|
||||
{ utSource :: !SourceName
|
||||
, utTable :: !QualifiedTable
|
||||
, utCascade :: !Bool
|
||||
} deriving (Show, Eq)
|
||||
$(deriveJSON (aesonDrop 2 snakeCase){omitNothingFields=True} ''UntrackTable)
|
||||
$(deriveToJSON (aesonDrop 2 snakeCase){omitNothingFields=True} ''UntrackTable)
|
||||
|
||||
instance FromJSON UntrackTable where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
UntrackTable
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "table"
|
||||
<*> o .:? "cascade" .!= False
|
||||
|
||||
isTableTracked :: SchemaCache -> SourceName -> QualifiedTable -> Bool
|
||||
isTableTracked sc source tableName =
|
||||
isJust $ getPGTableInfo source tableName $ scPostgres sc
|
||||
|
||||
-- | Track table/view, Phase 1:
|
||||
-- Validate table tracking operation. Fails if table is already being tracked,
|
||||
-- or if a function with the same name is being tracked.
|
||||
trackExistingTableOrViewP1 :: (QErrM m, CacheRWM m) => QualifiedTable -> m ()
|
||||
trackExistingTableOrViewP1 qt = do
|
||||
trackExistingTableOrViewP1 :: (QErrM m, CacheRWM m) => SourceName -> QualifiedTable -> m ()
|
||||
trackExistingTableOrViewP1 source qt = do
|
||||
rawSchemaCache <- askSchemaCache
|
||||
when (Map.member qt $ scTables rawSchemaCache) $
|
||||
when (isTableTracked rawSchemaCache source qt) $
|
||||
throw400 AlreadyTracked $ "view/table already tracked : " <>> qt
|
||||
let qf = fmap (FunctionName . toTxt) qt
|
||||
when (Map.member qf $ scFunctions rawSchemaCache) $
|
||||
when (isJust $ getPGFunctionInfo source qf $ scPostgres rawSchemaCache) $
|
||||
throw400 NotSupported $ "function with name " <> qt <<> " already exists"
|
||||
|
||||
-- | Check whether a given name would conflict with the current schema by doing
|
||||
@ -152,9 +176,9 @@ checkConflictingNode sc tnGQL = do
|
||||
_ -> pure ()
|
||||
|
||||
trackExistingTableOrViewP2
|
||||
:: (MonadTx m, CacheRWM m, MetadataM m)
|
||||
=> QualifiedTable -> Bool -> TableConfig -> m EncJSON
|
||||
trackExistingTableOrViewP2 tableName isEnum config = do
|
||||
:: (MonadError QErr m, CacheRWM m, MetadataM m)
|
||||
=> SourceName -> QualifiedTable -> Bool -> TableConfig -> m EncJSON
|
||||
trackExistingTableOrViewP2 source tableName isEnum config = do
|
||||
sc <- askSchemaCache
|
||||
{-
|
||||
The next line does more than what it says on the tin. Removing the following
|
||||
@ -166,16 +190,16 @@ trackExistingTableOrViewP2 tableName isEnum config = do
|
||||
-}
|
||||
checkConflictingNode sc $ snakeCaseQualifiedObject tableName
|
||||
let metadata = mkTableMeta tableName isEnum config
|
||||
buildSchemaCacheFor (MOTable tableName)
|
||||
buildSchemaCacheFor (MOSourceObjId source $ SMOTable tableName)
|
||||
$ MetadataModifier
|
||||
$ metaTables %~ OMap.insert tableName metadata
|
||||
$ metaSources.ix source.smTables %~ OMap.insert tableName metadata
|
||||
pure successMsg
|
||||
|
||||
runTrackTableQ
|
||||
:: (MonadTx m, CacheRWM m, MetadataM m) => TrackTable -> m EncJSON
|
||||
runTrackTableQ (TrackTable qt isEnum) = do
|
||||
trackExistingTableOrViewP1 qt
|
||||
trackExistingTableOrViewP2 qt isEnum emptyTableConfig
|
||||
:: (MonadError QErr m, CacheRWM m, MetadataM m) => TrackTable -> m EncJSON
|
||||
runTrackTableQ (TrackTable source qt isEnum) = do
|
||||
trackExistingTableOrViewP1 source qt
|
||||
trackExistingTableOrViewP2 source qt isEnum emptyTableConfig
|
||||
|
||||
data TrackTableV2
|
||||
= TrackTableV2
|
||||
@ -185,29 +209,38 @@ data TrackTableV2
|
||||
$(deriveJSON (aesonDrop 4 snakeCase) ''TrackTableV2)
|
||||
|
||||
runTrackTableV2Q
|
||||
:: (MonadTx m, CacheRWM m, MetadataM m) => TrackTableV2 -> m EncJSON
|
||||
runTrackTableV2Q (TrackTableV2 (TrackTable qt isEnum) config) = do
|
||||
trackExistingTableOrViewP1 qt
|
||||
trackExistingTableOrViewP2 qt isEnum config
|
||||
:: (MonadError QErr m, CacheRWM m, MetadataM m) => TrackTableV2 -> m EncJSON
|
||||
runTrackTableV2Q (TrackTableV2 (TrackTable source qt isEnum) config) = do
|
||||
trackExistingTableOrViewP1 source qt
|
||||
trackExistingTableOrViewP2 source qt isEnum config
|
||||
|
||||
runSetExistingTableIsEnumQ :: (MonadTx m, CacheRWM m, MetadataM m) => SetTableIsEnum -> m EncJSON
|
||||
runSetExistingTableIsEnumQ (SetTableIsEnum tableName isEnum) = do
|
||||
void $ askTabInfo tableName -- assert that table is tracked
|
||||
buildSchemaCacheFor (MOTable tableName)
|
||||
runSetExistingTableIsEnumQ :: (MonadError QErr m, CacheRWM m, MetadataM m) => SetTableIsEnum -> m EncJSON
|
||||
runSetExistingTableIsEnumQ (SetTableIsEnum source tableName isEnum) = do
|
||||
void $ askTabInfo source tableName -- assert that table is tracked
|
||||
buildSchemaCacheFor (MOSourceObjId source $ SMOTable tableName)
|
||||
$ MetadataModifier
|
||||
$ metaTables.ix tableName.tmIsEnum .~ isEnum
|
||||
$ tableMetadataSetter source tableName.tmIsEnum .~ isEnum
|
||||
return successMsg
|
||||
|
||||
data SetTableCustomization
|
||||
= SetTableCustomization
|
||||
{ _stcTable :: !QualifiedTable
|
||||
{ _stcSource :: !SourceName
|
||||
, _stcTable :: !QualifiedTable
|
||||
, _stcConfiguration :: !TableConfig
|
||||
} deriving (Show, Eq)
|
||||
$(deriveJSON (aesonDrop 4 snakeCase) ''SetTableCustomization)
|
||||
$(deriveToJSON (aesonDrop 4 snakeCase) ''SetTableCustomization)
|
||||
|
||||
instance FromJSON SetTableCustomization where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
SetTableCustomization
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "table"
|
||||
<*> o .: "configuration"
|
||||
|
||||
data SetTableCustomFields
|
||||
= SetTableCustomFields
|
||||
{ _stcfTable :: !QualifiedTable
|
||||
{ _stcfSource :: !SourceName
|
||||
, _stcfTable :: !QualifiedTable
|
||||
, _stcfCustomRootFields :: !TableCustomRootFields
|
||||
, _stcfCustomColumnNames :: !CustomColumnNames
|
||||
} deriving (Show, Eq)
|
||||
@ -216,34 +249,35 @@ $(deriveToJSON (aesonDrop 5 snakeCase) ''SetTableCustomFields)
|
||||
instance FromJSON SetTableCustomFields where
|
||||
parseJSON = withObject "SetTableCustomFields" $ \o ->
|
||||
SetTableCustomFields
|
||||
<$> o .: "table"
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "table"
|
||||
<*> o .:? "custom_root_fields" .!= emptyCustomRootFields
|
||||
<*> o .:? "custom_column_names" .!= Map.empty
|
||||
|
||||
runSetTableCustomFieldsQV2
|
||||
:: (QErrM m, CacheRWM m, MetadataM m) => SetTableCustomFields -> m EncJSON
|
||||
runSetTableCustomFieldsQV2 (SetTableCustomFields tableName rootFields columnNames) = do
|
||||
void $ askTabInfo tableName -- assert that table is tracked
|
||||
runSetTableCustomFieldsQV2 (SetTableCustomFields source tableName rootFields columnNames) = do
|
||||
void $ askTabInfo source tableName -- assert that table is tracked
|
||||
let tableConfig = TableConfig rootFields columnNames Nothing
|
||||
buildSchemaCacheFor (MOTable tableName)
|
||||
buildSchemaCacheFor (MOSourceObjId source $ SMOTable tableName)
|
||||
$ MetadataModifier
|
||||
$ metaTables.ix tableName.tmConfiguration .~ tableConfig
|
||||
$ tableMetadataSetter source tableName.tmConfiguration .~ tableConfig
|
||||
return successMsg
|
||||
|
||||
runSetTableCustomization
|
||||
:: (QErrM m, CacheRWM m, MetadataM m) => SetTableCustomization -> m EncJSON
|
||||
runSetTableCustomization (SetTableCustomization table config) = do
|
||||
void $ askTabInfo table
|
||||
buildSchemaCacheFor (MOTable table)
|
||||
runSetTableCustomization (SetTableCustomization source table config) = do
|
||||
void $ askTabInfo source table
|
||||
buildSchemaCacheFor (MOSourceObjId source $ SMOTable table)
|
||||
$ MetadataModifier
|
||||
$ metaTables.ix table.tmConfiguration .~ config
|
||||
$ tableMetadataSetter source table.tmConfiguration .~ config
|
||||
return successMsg
|
||||
|
||||
unTrackExistingTableOrViewP1
|
||||
:: (CacheRM m, QErrM m) => UntrackTable -> m ()
|
||||
unTrackExistingTableOrViewP1 (UntrackTable vn _) = do
|
||||
unTrackExistingTableOrViewP1 (UntrackTable source vn _) = do
|
||||
rawSchemaCache <- askSchemaCache
|
||||
case Map.lookup vn (scTables rawSchemaCache) of
|
||||
case getPGTableInfo source vn $ scPostgres rawSchemaCache of
|
||||
Just ti ->
|
||||
-- Check if table/view is system defined
|
||||
when (isSystemDefined $ _tciSystemDefined $ _tiCoreInfo ti) $ throw400 NotSupported $
|
||||
@ -254,29 +288,30 @@ unTrackExistingTableOrViewP1 (UntrackTable vn _) = do
|
||||
unTrackExistingTableOrViewP2
|
||||
:: (CacheRWM m, QErrM m, MetadataM m)
|
||||
=> UntrackTable -> m EncJSON
|
||||
unTrackExistingTableOrViewP2 (UntrackTable qtn cascade) = withNewInconsistentObjsCheck do
|
||||
unTrackExistingTableOrViewP2 (UntrackTable source qtn cascade) = withNewInconsistentObjsCheck do
|
||||
sc <- askSchemaCache
|
||||
|
||||
-- Get relational, query template and function dependants
|
||||
let allDeps = getDependentObjs sc (SOTable qtn)
|
||||
let allDeps = getDependentObjs sc (SOSourceObj source $ SOITable qtn)
|
||||
indirectDeps = filter (not . isDirectDep) allDeps
|
||||
-- Report bach with an error if cascade is not set
|
||||
when (indirectDeps /= [] && not (or cascade)) $ reportDepsExt indirectDeps []
|
||||
when (indirectDeps /= [] && not cascade) $ reportDepsExt indirectDeps []
|
||||
-- Purge all the dependents from state
|
||||
metadataModifier <- execWriterT do
|
||||
mapM_ (purgeDependentObject >=> tell) indirectDeps
|
||||
tell $ dropTableInMetadata qtn
|
||||
tell $ dropTableInMetadata source qtn
|
||||
-- delete the table and its direct dependencies
|
||||
buildSchemaCache metadataModifier
|
||||
pure successMsg
|
||||
where
|
||||
isDirectDep = \case
|
||||
(SOTableObj dtn _) -> qtn == dtn
|
||||
SOSourceObj s (SOITableObj dtn _) ->
|
||||
s == source && qtn == dtn
|
||||
_ -> False
|
||||
|
||||
dropTableInMetadata :: QualifiedTable -> MetadataModifier
|
||||
dropTableInMetadata table =
|
||||
MetadataModifier $ metaTables %~ OMap.delete table
|
||||
dropTableInMetadata :: SourceName -> QualifiedTable -> MetadataModifier
|
||||
dropTableInMetadata source table =
|
||||
MetadataModifier $ metaSources.ix source.smTables %~ OMap.delete table
|
||||
|
||||
runUntrackTableQ
|
||||
:: (CacheRWM m, QErrM m, MetadataM m)
|
||||
@ -290,8 +325,8 @@ processTableChanges
|
||||
, CacheRM m
|
||||
, MonadWriter MetadataModifier m
|
||||
)
|
||||
=> TableCoreInfo 'Postgres -> TableDiff 'Postgres -> m ()
|
||||
processTableChanges ti tableDiff = do
|
||||
=> SourceName -> TableCoreInfo 'Postgres -> TableDiff 'Postgres -> m ()
|
||||
processTableChanges source ti tableDiff = do
|
||||
-- If table rename occurs then don't replace constraints and
|
||||
-- process dropped/added columns, because schema reload happens eventually
|
||||
sc <- askSchemaCache
|
||||
@ -305,7 +340,7 @@ processTableChanges ti tableDiff = do
|
||||
checkConflictingNode sc tnGQL
|
||||
procAlteredCols sc tn
|
||||
-- update new table in metadata
|
||||
renameTableInMetadata newTN tn
|
||||
renameTableInMetadata source newTN tn
|
||||
|
||||
-- Process computed field diff
|
||||
processComputedFieldDiff tn
|
||||
@ -320,16 +355,16 @@ processTableChanges ti tableDiff = do
|
||||
modifiedCustomColumnNames = foldl' (flip Map.delete) customColumnNames droppedCols
|
||||
when (modifiedCustomColumnNames /= customColumnNames) $
|
||||
tell $ MetadataModifier $
|
||||
metaTables.ix tn.tmConfiguration .~ (TableConfig customFields modifiedCustomColumnNames customName)
|
||||
tableMetadataSetter source tn.tmConfiguration .~ (TableConfig customFields modifiedCustomColumnNames customName)
|
||||
|
||||
procAlteredCols sc tn = for_ alteredCols $
|
||||
\( RawColumnInfo oldName _ oldType _ _
|
||||
, RawColumnInfo newName _ newType _ _ ) -> do
|
||||
if | oldName /= newName ->
|
||||
renameColumnInMetadata oldName newName tn (_tciFieldInfoMap ti)
|
||||
renameColumnInMetadata oldName newName source tn (_tciFieldInfoMap ti)
|
||||
|
||||
| oldType /= newType -> do
|
||||
let colId = SOTableObj tn $ TOCol oldName
|
||||
let colId = SOSourceObj source $ SOITableObj tn $ TOCol oldName
|
||||
typeDepObjs = getDependentObjsWith (== DROnType) sc colId
|
||||
|
||||
unless (null typeDepObjs) $ throw400 DependencyError $
|
||||
@ -359,32 +394,40 @@ processTableChanges ti tableDiff = do
|
||||
buildTableCache
|
||||
:: forall arr m
|
||||
. ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Inc.ArrowCache m arr, MonadTx m
|
||||
, Inc.ArrowCache m arr, MonadIO m, MonadBaseControl IO m
|
||||
)
|
||||
=> ( DBTablesMetadata 'Postgres
|
||||
=> ( SourceName
|
||||
, SourceConfig 'Postgres
|
||||
, DBTablesMetadata 'Postgres
|
||||
, [TableBuildInput]
|
||||
, Inc.Dependency Inc.InvalidationKey
|
||||
) `arr` Map.HashMap (TableName 'Postgres) (TableCoreInfoG 'Postgres (ColumnInfo 'Postgres) (ColumnInfo 'Postgres))
|
||||
buildTableCache = Inc.cache proc (pgTables, tableBuildInputs, reloadMetadataInvalidationKey) -> do
|
||||
buildTableCache = Inc.cache proc (source, pgSourceConfig, pgTables, tableBuildInputs, reloadMetadataInvalidationKey) -> do
|
||||
rawTableInfos <-
|
||||
(| Inc.keyed (| withTable (\tables -> do
|
||||
table <- noDuplicateTables -< tables
|
||||
let maybeInfo = Map.lookup (_tbiName table) pgTables
|
||||
buildRawTableInfo -< (table, maybeInfo, reloadMetadataInvalidationKey)
|
||||
buildRawTableInfo -< (table, maybeInfo, pgSourceConfig, reloadMetadataInvalidationKey)
|
||||
)
|
||||
|)
|
||||
|) (Map.groupOnNE _tbiName tableBuildInputs)
|
||||
let rawTableCache = Map.catMaybes rawTableInfos
|
||||
|) (withSourceInKey source $ Map.groupOnNE _tbiName tableBuildInputs)
|
||||
let rawTableCache = removeSourceInKey $ Map.catMaybes rawTableInfos
|
||||
enumTables = flip Map.mapMaybe rawTableCache \rawTableInfo ->
|
||||
(,) <$> _tciPrimaryKey rawTableInfo <*> _tciEnumValues rawTableInfo
|
||||
tableInfos <-
|
||||
(| Inc.keyed (| withTable (\table -> processTableInfo -< (enumTables, table)) |)
|
||||
|) rawTableCache
|
||||
returnA -< Map.catMaybes tableInfos
|
||||
|) (withSourceInKey source rawTableCache)
|
||||
returnA -< removeSourceInKey (Map.catMaybes tableInfos)
|
||||
where
|
||||
withTable :: ErrorA QErr arr (e, s) a -> arr (e, ((TableName 'Postgres), s)) (Maybe a)
|
||||
withSourceInKey :: (Eq k, Hashable k) => SourceName -> HashMap k v -> HashMap (SourceName, k) v
|
||||
withSourceInKey source = mapKeys (source,)
|
||||
|
||||
removeSourceInKey :: (Eq k, Hashable k) => HashMap (SourceName, k) v -> HashMap k v
|
||||
removeSourceInKey = mapKeys snd
|
||||
|
||||
withTable :: ErrorA QErr arr (e, s) a -> arr (e, ((SourceName, TableName 'Postgres), s)) (Maybe a)
|
||||
withTable f = withRecordInconsistency f <<<
|
||||
second (first $ arr \name -> MetadataObject (MOTable name) (toJSON name))
|
||||
second (first $ arr \(source, name) -> MetadataObject (MOSourceObjId source $ SMOTable name) (toJSON name))
|
||||
|
||||
noDuplicateTables = proc tables -> case tables of
|
||||
table :| [] -> returnA -< table
|
||||
@ -395,9 +438,10 @@ buildTableCache = Inc.cache proc (pgTables, tableBuildInputs, reloadMetadataInva
|
||||
:: ErrorA QErr arr
|
||||
( TableBuildInput
|
||||
, Maybe (DBTableMetadata 'Postgres)
|
||||
, SourceConfig 'Postgres
|
||||
, Inc.Dependency Inc.InvalidationKey
|
||||
) (TableCoreInfoG 'Postgres (RawColumnInfo 'Postgres) (Column 'Postgres))
|
||||
buildRawTableInfo = Inc.cache proc (tableBuildInput, maybeInfo, reloadMetadataInvalidationKey) -> do
|
||||
buildRawTableInfo = Inc.cache proc (tableBuildInput, maybeInfo, pgSourceConfig, reloadMetadataInvalidationKey) -> do
|
||||
let TableBuildInput name isEnum config = tableBuildInput
|
||||
metadataTable <-
|
||||
(| onNothingA (throwA -<
|
||||
@ -413,7 +457,8 @@ buildTableCache = Inc.cache proc (pgTables, tableBuildInputs, reloadMetadataInva
|
||||
-- We want to make sure we reload enum values whenever someone explicitly calls
|
||||
-- `reload_metadata`.
|
||||
Inc.dependOn -< reloadMetadataInvalidationKey
|
||||
bindErrorA -< Just <$> fetchAndValidateEnumValues name rawPrimaryKey columns
|
||||
eitherEnums <- bindA -< fetchAndValidateEnumValues pgSourceConfig name rawPrimaryKey columns
|
||||
liftEitherA -< Just <$> eitherEnums
|
||||
else returnA -< Nothing
|
||||
|
||||
returnA -< TableCoreInfo
|
||||
|
@ -11,10 +11,12 @@ import Hasura.Prelude
|
||||
import qualified Data.ByteString.Builder as BB
|
||||
import qualified Data.Sequence as DS
|
||||
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.Aeson
|
||||
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as S
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Backends.Postgres.Translate.BoolExp
|
||||
@ -23,6 +25,7 @@ import Hasura.RQL.DML.Internal
|
||||
import Hasura.RQL.DML.Types
|
||||
import Hasura.RQL.IR.BoolExp
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.RQL.Types.Run
|
||||
import Hasura.SQL.Types
|
||||
|
||||
|
||||
@ -65,13 +68,13 @@ mkSQLCount (CountQueryP1 tn (permFltr, mWc) mDistCols) =
|
||||
-- SELECT count(*) FROM (SELECT DISTINCT c1, .. cn FROM .. WHERE ..) r;
|
||||
-- SELECT count(*) FROM (SELECT * FROM .. WHERE ..) r;
|
||||
validateCountQWith
|
||||
:: (UserInfoM m, QErrM m, CacheRM m)
|
||||
:: (UserInfoM m, QErrM m, TableInfoRM 'Postgres m)
|
||||
=> SessVarBldr 'Postgres m
|
||||
-> (ColumnType 'Postgres -> Value -> m S.SQLExp)
|
||||
-> CountQuery
|
||||
-> m CountQueryP1
|
||||
validateCountQWith sessVarBldr prepValBldr (CountQuery qt mDistCols mWhere) = do
|
||||
tableInfo <- askTabInfo qt
|
||||
tableInfo <- askTabInfoSource qt
|
||||
|
||||
-- Check if select is allowed
|
||||
selPerm <- modifyErr (<> selNecessaryMsg) $
|
||||
@ -105,9 +108,11 @@ validateCountQWith sessVarBldr prepValBldr (CountQuery qt mDistCols mWhere) = do
|
||||
|
||||
validateCountQ
|
||||
:: (QErrM m, UserInfoM m, CacheRM m)
|
||||
=> CountQuery -> m (CountQueryP1, DS.Seq Q.PrepArg)
|
||||
validateCountQ =
|
||||
runDMLP1T . validateCountQWith sessVarFromCurrentSetting binRHSBuilder
|
||||
=> SourceName -> CountQuery -> m (CountQueryP1, DS.Seq Q.PrepArg)
|
||||
validateCountQ source query = do
|
||||
tableCache <- askTableCache source
|
||||
flip runTableCacheRT (source, tableCache) $ runDMLP1T $
|
||||
validateCountQWith sessVarFromCurrentSetting binRHSBuilder query
|
||||
|
||||
countQToTx
|
||||
:: (QErrM m, MonadTx m)
|
||||
@ -122,7 +127,11 @@ countQToTx (u, p) = do
|
||||
BB.byteString "{\"count\":" <> BB.intDec c <> BB.char7 '}'
|
||||
|
||||
runCount
|
||||
:: (QErrM m, UserInfoM m, CacheRM m, MonadTx m)
|
||||
=> CountQuery -> m EncJSON
|
||||
runCount q =
|
||||
validateCountQ q >>= countQToTx
|
||||
:: ( QErrM m, UserInfoM m, CacheRM m
|
||||
, MonadIO m, MonadBaseControl IO m
|
||||
, Tracing.MonadTrace m
|
||||
)
|
||||
=> SourceName -> CountQuery -> m EncJSON
|
||||
runCount source q = do
|
||||
sourceConfig <- _pcConfiguration <$> askPGSourceCache source
|
||||
validateCountQ source q >>= liftEitherM . runExceptT . runQueryLazyTx (_pscExecCtx sourceConfig) Q.ReadOnly . countQToTx
|
||||
|
@ -14,6 +14,7 @@ import qualified Data.Environment as Env
|
||||
import qualified Data.Sequence as DS
|
||||
import qualified Database.PG.Query as Q
|
||||
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.Aeson
|
||||
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as S
|
||||
@ -27,18 +28,19 @@ import Hasura.RQL.DML.Internal
|
||||
import Hasura.RQL.DML.Types
|
||||
import Hasura.RQL.IR.Delete
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.RQL.Types.Run
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
|
||||
|
||||
validateDeleteQWith
|
||||
:: (UserInfoM m, QErrM m, CacheRM m)
|
||||
:: (UserInfoM m, QErrM m, TableInfoRM 'Postgres m)
|
||||
=> SessVarBldr 'Postgres m
|
||||
-> (ColumnType 'Postgres -> Value -> m S.SQLExp)
|
||||
-> DeleteQuery
|
||||
-> m (AnnDel 'Postgres)
|
||||
validateDeleteQWith sessVarBldr prepValBldr
|
||||
(DeleteQuery tableName rqlBE mRetCols) = do
|
||||
tableInfo <- askTabInfo tableName
|
||||
tableInfo <- askTabInfoSource tableName
|
||||
let coreInfo = _tiCoreInfo tableInfo
|
||||
|
||||
-- If table is view then check if it deletable
|
||||
@ -81,18 +83,23 @@ validateDeleteQWith sessVarBldr prepValBldr
|
||||
|
||||
validateDeleteQ
|
||||
:: (QErrM m, UserInfoM m, CacheRM m)
|
||||
=> DeleteQuery -> m (AnnDel 'Postgres, DS.Seq Q.PrepArg)
|
||||
validateDeleteQ =
|
||||
runDMLP1T . validateDeleteQWith sessVarFromCurrentSetting binRHSBuilder
|
||||
=> SourceName -> DeleteQuery -> m (AnnDel 'Postgres, DS.Seq Q.PrepArg)
|
||||
validateDeleteQ source query = do
|
||||
tableCache <- askTableCache source
|
||||
flip runTableCacheRT (source, tableCache) $ runDMLP1T $
|
||||
validateDeleteQWith sessVarFromCurrentSetting binRHSBuilder query
|
||||
|
||||
runDelete
|
||||
:: ( HasVersion, QErrM m, UserInfoM m, CacheRM m
|
||||
, MonadTx m, HasSQLGenCtx m, MonadIO m
|
||||
, Tracing.MonadTrace m
|
||||
, HasSQLGenCtx m, MonadIO m
|
||||
, MonadBaseControl IO m, Tracing.MonadTrace m
|
||||
)
|
||||
=> Env.Environment
|
||||
-> SourceName
|
||||
-> DeleteQuery
|
||||
-> m EncJSON
|
||||
runDelete env q = do
|
||||
runDelete env source q = do
|
||||
sourceConfig <- _pcConfiguration <$> askPGSourceCache source
|
||||
strfyNum <- stringifyNum <$> askSQLGenCtx
|
||||
validateDeleteQ q >>= execDeleteQuery env strfyNum Nothing
|
||||
validateDeleteQ source q >>= liftEitherM . runExceptT .
|
||||
runQueryLazyTx (_pscExecCtx sourceConfig) Q.ReadWrite . execDeleteQuery env strfyNum Nothing
|
||||
|
@ -9,6 +9,7 @@ import qualified Data.HashSet as HS
|
||||
import qualified Data.Sequence as DS
|
||||
import qualified Database.PG.Query as Q
|
||||
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.Aeson.Types
|
||||
import Data.Text.Extended
|
||||
|
||||
@ -23,6 +24,7 @@ import Hasura.RQL.DML.Internal
|
||||
import Hasura.RQL.DML.Types
|
||||
import Hasura.RQL.IR.Insert
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.RQL.Types.Run
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.Session
|
||||
|
||||
@ -126,7 +128,7 @@ buildConflictClause sessVarBldr tableInfo inpCols (OnConflict mTCol mTCons act)
|
||||
|
||||
|
||||
convInsertQuery
|
||||
:: (UserInfoM m, QErrM m, CacheRM m)
|
||||
:: (UserInfoM m, QErrM m, TableInfoRM 'Postgres m)
|
||||
=> (Value -> m [InsObj])
|
||||
-> SessVarBldr 'Postgres m
|
||||
-> (ColumnType 'Postgres -> Value -> m S.SQLExp)
|
||||
@ -137,7 +139,7 @@ convInsertQuery objsParser sessVarBldr prepFn (InsertQuery tableName val oC mRet
|
||||
insObjs <- objsParser val
|
||||
|
||||
-- Get the current table information
|
||||
tableInfo <- askTabInfo tableName
|
||||
tableInfo <- askTabInfoSource tableName
|
||||
let coreInfo = _tiCoreInfo tableInfo
|
||||
|
||||
-- If table is view then check if it is insertable
|
||||
@ -195,24 +197,27 @@ convInsertQuery objsParser sessVarBldr prepFn (InsertQuery tableName val oC mRet
|
||||
|
||||
convInsQ
|
||||
:: (QErrM m, UserInfoM m, CacheRM m)
|
||||
=> InsertQuery
|
||||
=> SourceName -> InsertQuery
|
||||
-> m (InsertQueryP1 'Postgres, DS.Seq Q.PrepArg)
|
||||
convInsQ =
|
||||
runDMLP1T .
|
||||
convInsertQuery (withPathK "objects" . decodeInsObjs)
|
||||
sessVarFromCurrentSetting
|
||||
binRHSBuilder
|
||||
convInsQ source query = do
|
||||
tableCache <- askTableCache source
|
||||
flip runTableCacheRT (source, tableCache) $ runDMLP1T $
|
||||
convInsertQuery (withPathK "objects" . decodeInsObjs)
|
||||
sessVarFromCurrentSetting binRHSBuilder query
|
||||
|
||||
runInsert
|
||||
:: ( HasVersion, QErrM m, UserInfoM m
|
||||
, CacheRM m, MonadTx m, HasSQLGenCtx m, MonadIO m
|
||||
, Tracing.MonadTrace m
|
||||
, CacheRM m, HasSQLGenCtx m, MonadIO m
|
||||
, MonadBaseControl IO m, Tracing.MonadTrace m
|
||||
)
|
||||
=> Env.Environment -> InsertQuery -> m EncJSON
|
||||
runInsert env q = do
|
||||
res <- convInsQ q
|
||||
=> Env.Environment -> SourceName -> InsertQuery -> m EncJSON
|
||||
runInsert env source q = do
|
||||
sourceConfig <- _pcConfiguration <$> askPGSourceCache source
|
||||
res <- convInsQ source q
|
||||
strfyNum <- stringifyNum <$> askSQLGenCtx
|
||||
execInsertQuery env strfyNum Nothing res
|
||||
liftEitherM $ runExceptT $
|
||||
runQueryLazyTx (_pscExecCtx sourceConfig) Q.ReadWrite $
|
||||
execInsertQuery env strfyNum Nothing res
|
||||
|
||||
decodeInsObjs :: (UserInfoM m, QErrM m) => Value -> m [InsObj]
|
||||
decodeInsObjs v = do
|
||||
|
@ -18,20 +18,20 @@ import Data.Text.Extended
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as S
|
||||
|
||||
import Hasura.Backends.Postgres.SQL.Error
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Backends.Postgres.SQL.Types hiding (TableName)
|
||||
import Hasura.Backends.Postgres.SQL.Value
|
||||
import Hasura.Backends.Postgres.Translate.BoolExp
|
||||
import Hasura.Backends.Postgres.Translate.Column
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.SQL.Types
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
|
||||
|
||||
newtype DMLP1T m a
|
||||
= DMLP1T { unDMLP1T :: StateT (DS.Seq Q.PrepArg) m a }
|
||||
deriving ( Functor, Applicative, Monad, MonadTrans
|
||||
, MonadState (DS.Seq Q.PrepArg), MonadError e
|
||||
, TableCoreInfoRM b, CacheRM, UserInfoM, HasSQLGenCtx
|
||||
, SourceM, TableCoreInfoRM b, TableInfoRM b, CacheRM, UserInfoM, HasSQLGenCtx
|
||||
)
|
||||
|
||||
runDMLP1T :: DMLP1T m a -> m (a, DS.Seq Q.PrepArg)
|
||||
@ -151,18 +151,18 @@ binRHSBuilder colType val = do
|
||||
return $ toPrepParam (DS.length preparedArgs + 1) (unsafePGColumnToBackend colType)
|
||||
|
||||
fetchRelTabInfo
|
||||
:: (QErrM m, CacheRM m)
|
||||
=> QualifiedTable
|
||||
-> m (TableInfo 'Postgres)
|
||||
:: (QErrM m, TableInfoRM 'Postgres m)
|
||||
=> TableName 'Postgres -> m (TableInfo 'Postgres)
|
||||
fetchRelTabInfo refTabName =
|
||||
-- Internal error
|
||||
modifyErrAndSet500 ("foreign " <> ) $ askTabInfo refTabName
|
||||
modifyErrAndSet500 ("foreign " <> ) $
|
||||
askTabInfoSource refTabName
|
||||
|
||||
type SessVarBldr b m = SessionVarType b -> SessionVariable -> m (SQLExpression b)
|
||||
|
||||
fetchRelDet
|
||||
:: (UserInfoM m, QErrM m, CacheRM m)
|
||||
=> RelName -> QualifiedTable
|
||||
:: (UserInfoM m, QErrM m, TableInfoRM 'Postgres m)
|
||||
=> RelName -> TableName 'Postgres
|
||||
-> m (FieldInfoMap (FieldInfo 'Postgres), SelPermInfo 'Postgres)
|
||||
fetchRelDet relName refTabName = do
|
||||
roleName <- askCurRole
|
||||
@ -183,7 +183,7 @@ fetchRelDet relName refTabName = do
|
||||
]
|
||||
|
||||
checkOnColExp
|
||||
:: (UserInfoM m, QErrM m, CacheRM m)
|
||||
:: (UserInfoM m, QErrM m, TableInfoRM 'Postgres m)
|
||||
=> SelPermInfo 'Postgres
|
||||
-> SessVarBldr 'Postgres m
|
||||
-> AnnBoolExpFldSQL 'Postgres
|
||||
@ -235,7 +235,7 @@ currentSession :: S.SQLExp
|
||||
currentSession = S.SEUnsafe "current_setting('hasura.user')::json"
|
||||
|
||||
checkSelPerm
|
||||
:: (UserInfoM m, QErrM m, CacheRM m)
|
||||
:: (UserInfoM m, QErrM m, TableInfoRM 'Postgres m)
|
||||
=> SelPermInfo 'Postgres
|
||||
-> SessVarBldr 'Postgres m
|
||||
-> AnnBoolExpSQL 'Postgres
|
||||
@ -244,7 +244,7 @@ checkSelPerm spi sessVarBldr =
|
||||
traverse (checkOnColExp spi sessVarBldr)
|
||||
|
||||
convBoolExp
|
||||
:: (UserInfoM m, QErrM m, CacheRM m)
|
||||
:: (UserInfoM m, QErrM m, TableInfoRM 'Postgres m)
|
||||
=> FieldInfoMap (FieldInfo 'Postgres)
|
||||
-> SelPermInfo 'Postgres
|
||||
-> BoolExp 'Postgres
|
||||
|
@ -12,12 +12,14 @@ import qualified Data.List.NonEmpty as NE
|
||||
import qualified Data.Sequence as DS
|
||||
import qualified Database.PG.Query as Q
|
||||
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.Aeson.Types
|
||||
import Data.Text.Extended
|
||||
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as S
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Backends.Postgres.SQL.Types hiding (TableName)
|
||||
import Hasura.Backends.Postgres.Translate.Select
|
||||
import Hasura.EncJSON
|
||||
import Hasura.RQL.DML.Internal
|
||||
@ -25,6 +27,7 @@ import Hasura.RQL.DML.Types
|
||||
import Hasura.RQL.IR.OrderBy
|
||||
import Hasura.RQL.IR.Select
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.RQL.Types.Run
|
||||
import Hasura.SQL.Types
|
||||
|
||||
|
||||
@ -60,7 +63,7 @@ instance FromJSON (ExtCol 'Postgres) where
|
||||
, "object (relationship)"
|
||||
]
|
||||
|
||||
convSelCol :: (UserInfoM m, QErrM m, CacheRM m)
|
||||
convSelCol :: (UserInfoM m, QErrM m, TableInfoRM 'Postgres m)
|
||||
=> FieldInfoMap (FieldInfo 'Postgres)
|
||||
-> SelPermInfo 'Postgres
|
||||
-> SelCol 'Postgres
|
||||
@ -80,7 +83,7 @@ convSelCol fieldInfoMap spi (SCStar wildcard) =
|
||||
convWildcard fieldInfoMap spi wildcard
|
||||
|
||||
convWildcard
|
||||
:: (UserInfoM m, QErrM m, CacheRM m)
|
||||
:: (UserInfoM m, QErrM m, TableInfoRM 'Postgres m)
|
||||
=> FieldInfoMap (FieldInfo 'Postgres)
|
||||
-> SelPermInfo 'Postgres
|
||||
-> Wildcard
|
||||
@ -109,7 +112,7 @@ convWildcard fieldInfoMap selPermInfo wildcard =
|
||||
|
||||
relExtCols wc = mapM (mkRelCol wc) relColInfos
|
||||
|
||||
resolveStar :: (UserInfoM m, QErrM m, CacheRM m)
|
||||
resolveStar :: (UserInfoM m, QErrM m, TableInfoRM 'Postgres m)
|
||||
=> FieldInfoMap (FieldInfo 'Postgres)
|
||||
-> SelPermInfo 'Postgres
|
||||
-> SelectQ 'Postgres
|
||||
@ -135,7 +138,7 @@ resolveStar fim spi (SelectG selCols mWh mOb mLt mOf) = do
|
||||
equals _ _ = False
|
||||
|
||||
convOrderByElem
|
||||
:: (UserInfoM m, QErrM m, CacheRM m)
|
||||
:: (UserInfoM m, QErrM m, TableInfoRM 'Postgres m)
|
||||
=> SessVarBldr 'Postgres m
|
||||
-> (FieldInfoMap (FieldInfo 'Postgres), SelPermInfo 'Postgres)
|
||||
-> OrderByCol
|
||||
@ -189,8 +192,8 @@ convOrderByElem sessVarBldr (flds, spi) = \case
|
||||
throw400 UnexpectedPayload (mconcat [ fldName <<> " is a remote field" ])
|
||||
|
||||
convSelectQ
|
||||
:: (UserInfoM m, QErrM m, CacheRM m, HasSQLGenCtx m)
|
||||
=> QualifiedTable
|
||||
:: (UserInfoM m, QErrM m, TableInfoRM 'Postgres m, HasSQLGenCtx m)
|
||||
=> TableName 'Postgres
|
||||
-> FieldInfoMap (FieldInfo 'Postgres) -- Table information of current table
|
||||
-> SelPermInfo 'Postgres -- Additional select permission info
|
||||
-> SelectQExt 'Postgres -- Given Select Query
|
||||
@ -255,7 +258,7 @@ convExtSimple fieldInfoMap selPermInfo pgCol = do
|
||||
relWhenPGErr = "relationships have to be expanded"
|
||||
|
||||
convExtRel
|
||||
:: (UserInfoM m, QErrM m, CacheRM m, HasSQLGenCtx m)
|
||||
:: (UserInfoM m, QErrM m, TableInfoRM 'Postgres m, HasSQLGenCtx m)
|
||||
=> FieldInfoMap (FieldInfo 'Postgres)
|
||||
-> RelName
|
||||
-> Maybe RelName
|
||||
@ -293,13 +296,13 @@ convExtRel fieldInfoMap relName mAlias selQ sessVarBldr prepValBldr = do
|
||||
]
|
||||
|
||||
convSelectQuery
|
||||
:: (UserInfoM m, QErrM m, CacheRM m, HasSQLGenCtx m)
|
||||
:: (UserInfoM m, QErrM m, TableInfoRM 'Postgres m, HasSQLGenCtx m)
|
||||
=> SessVarBldr 'Postgres m
|
||||
-> (ColumnType 'Postgres -> Value -> m S.SQLExp)
|
||||
-> SelectQuery
|
||||
-> m (AnnSimpleSel 'Postgres)
|
||||
convSelectQuery sessVarBldr prepArgBuilder (DMLQuery qt selQ) = do
|
||||
tabInfo <- withPathK "table" $ askTabInfo qt
|
||||
tabInfo <- withPathK "table" $ askTabInfoSource qt
|
||||
selPermInfo <- askSelPermInfo tabInfo
|
||||
let fieldInfo = _tciFieldInfoMap $ _tiCoreInfo tabInfo
|
||||
extSelQ <- resolveStar fieldInfo selPermInfo selQ
|
||||
@ -315,16 +318,24 @@ selectP2 jsonAggSelect (sel, p) =
|
||||
|
||||
phaseOne
|
||||
:: (QErrM m, UserInfoM m, CacheRM m, HasSQLGenCtx m)
|
||||
=> SelectQuery -> m (AnnSimpleSel 'Postgres, DS.Seq Q.PrepArg)
|
||||
phaseOne =
|
||||
runDMLP1T . convSelectQuery sessVarFromCurrentSetting binRHSBuilder
|
||||
=> SourceName -> SelectQuery -> m (AnnSimpleSel 'Postgres, DS.Seq Q.PrepArg)
|
||||
phaseOne sourceName query = do
|
||||
tableCache <- askTableCache sourceName
|
||||
flip runTableCacheRT (sourceName, tableCache) $ runDMLP1T $
|
||||
convSelectQuery sessVarFromCurrentSetting binRHSBuilder query
|
||||
|
||||
phaseTwo :: (MonadTx m) => (AnnSimpleSel 'Postgres, DS.Seq Q.PrepArg) -> m EncJSON
|
||||
phaseTwo =
|
||||
liftTx . selectP2 JASMultipleRows
|
||||
|
||||
runSelect
|
||||
:: (QErrM m, UserInfoM m, CacheRM m, HasSQLGenCtx m, MonadTx m)
|
||||
=> SelectQuery -> m EncJSON
|
||||
runSelect q =
|
||||
phaseOne q >>= phaseTwo
|
||||
:: (QErrM m, UserInfoM m, CacheRM m
|
||||
, HasSQLGenCtx m, MonadIO m, MonadBaseControl IO m
|
||||
, Tracing.MonadTrace m
|
||||
)
|
||||
=> SourceName -> SelectQuery -> m EncJSON
|
||||
runSelect source q = do
|
||||
sourceConfig <- _pcConfiguration <$> askPGSourceCache source
|
||||
p1Result <- phaseOne source q
|
||||
liftEitherM $ runExceptT $
|
||||
runQueryLazyTx (_pscExecCtx sourceConfig) Q.ReadWrite $ phaseTwo p1Result
|
||||
|
@ -9,6 +9,7 @@ import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.Sequence as DS
|
||||
import qualified Database.PG.Query as Q
|
||||
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.Aeson.Types
|
||||
import Data.Text.Extended
|
||||
|
||||
@ -25,6 +26,7 @@ import Hasura.RQL.DML.Types
|
||||
import Hasura.RQL.IR.BoolExp
|
||||
import Hasura.RQL.IR.Update
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.RQL.Types.Run
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.Session
|
||||
|
||||
@ -91,14 +93,14 @@ convOp fieldInfoMap preSetCols updPerm objs conv =
|
||||
<> " for role " <> roleName <<> "; its value is predefined in permission"
|
||||
|
||||
validateUpdateQueryWith
|
||||
:: (UserInfoM m, QErrM m, CacheRM m)
|
||||
:: (UserInfoM m, QErrM m, TableInfoRM 'Postgres m)
|
||||
=> SessVarBldr 'Postgres m
|
||||
-> (ColumnType 'Postgres -> Value -> m S.SQLExp)
|
||||
-> UpdateQuery
|
||||
-> m (AnnUpd 'Postgres)
|
||||
validateUpdateQueryWith sessVarBldr prepValBldr uq = do
|
||||
let tableName = uqTable uq
|
||||
tableInfo <- withPathK "table" $ askTabInfo tableName
|
||||
tableInfo <- withPathK "table" $ askTabInfoSource tableName
|
||||
let coreInfo = _tiCoreInfo tableInfo
|
||||
|
||||
-- If it is view then check if it is updatable
|
||||
@ -175,16 +177,20 @@ validateUpdateQueryWith sessVarBldr prepValBldr uq = do
|
||||
|
||||
validateUpdateQuery
|
||||
:: (QErrM m, UserInfoM m, CacheRM m)
|
||||
=> UpdateQuery -> m (AnnUpd 'Postgres, DS.Seq Q.PrepArg)
|
||||
validateUpdateQuery =
|
||||
runDMLP1T . validateUpdateQueryWith sessVarFromCurrentSetting binRHSBuilder
|
||||
=> SourceName -> UpdateQuery -> m (AnnUpd 'Postgres, DS.Seq Q.PrepArg)
|
||||
validateUpdateQuery source query = do
|
||||
tableCache <- askTableCache source
|
||||
flip runTableCacheRT (source, tableCache) $ runDMLP1T $
|
||||
validateUpdateQueryWith sessVarFromCurrentSetting binRHSBuilder query
|
||||
|
||||
runUpdate
|
||||
:: ( HasVersion, QErrM m, UserInfoM m, CacheRM m
|
||||
, MonadTx m, HasSQLGenCtx m, MonadIO m
|
||||
, HasSQLGenCtx m, MonadIO m, MonadBaseControl IO m
|
||||
, Tracing.MonadTrace m
|
||||
)
|
||||
=> Env.Environment -> UpdateQuery -> m EncJSON
|
||||
runUpdate env q = do
|
||||
=> Env.Environment -> SourceName -> UpdateQuery -> m EncJSON
|
||||
runUpdate env source q = do
|
||||
sourceConfig <- _pcConfiguration <$> askPGSourceCache source
|
||||
strfyNum <- stringifyNum <$> askSQLGenCtx
|
||||
validateUpdateQuery q >>= execUpdateQuery env strfyNum Nothing
|
||||
validateUpdateQuery source q >>= liftEitherM . runExceptT .
|
||||
runQueryLazyTx (_pscExecCtx sourceConfig) Q.ReadWrite . execUpdateQuery env strfyNum Nothing
|
||||
|
@ -18,11 +18,15 @@ module Hasura.RQL.Types
|
||||
, QCtx(..)
|
||||
, HasQCtx(..)
|
||||
, mkAdminQCtx
|
||||
, askPGSourceCache
|
||||
, askTableCache
|
||||
, askTabInfo
|
||||
, isTableTracked
|
||||
, getTableInfo
|
||||
, askTabInfoSource
|
||||
, askTableCoreInfo
|
||||
, askTableCoreInfoSource
|
||||
, getTableInfo
|
||||
, askFieldInfoMap
|
||||
, askFieldInfoMapSource
|
||||
, askPGType
|
||||
, assertPGCol
|
||||
, askRelType
|
||||
@ -45,13 +49,14 @@ import Hasura.Prelude
|
||||
import Data.Aeson
|
||||
import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
|
||||
import Control.Monad.Unique
|
||||
import Data.Text.Extended
|
||||
|
||||
import Hasura.Backends.Postgres.Connection as R
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Backends.Postgres.SQL.Types hiding (TableName)
|
||||
import Hasura.RQL.IR.BoolExp as R
|
||||
import Hasura.RQL.Types.Action as R
|
||||
import Hasura.RQL.Types.Column as R
|
||||
@ -70,6 +75,8 @@ import Hasura.RQL.Types.RemoteSchema as R
|
||||
import Hasura.RQL.Types.ScheduledTrigger as R
|
||||
import Hasura.RQL.Types.SchemaCache as R
|
||||
import Hasura.RQL.Types.SchemaCache.Build as R
|
||||
import Hasura.RQL.Types.SchemaCacheTypes as R
|
||||
import Hasura.RQL.Types.Source as R
|
||||
import Hasura.RQL.Types.Table as R
|
||||
import Hasura.SQL.Backend as R
|
||||
|
||||
@ -97,41 +104,58 @@ class (Monad m) => UserInfoM m where
|
||||
|
||||
instance (UserInfoM m) => UserInfoM (ReaderT r m) where
|
||||
askUserInfo = lift askUserInfo
|
||||
instance (UserInfoM m) => UserInfoM (ExceptT r m) where
|
||||
askUserInfo = lift askUserInfo
|
||||
instance (UserInfoM m) => UserInfoM (StateT s m) where
|
||||
askUserInfo = lift askUserInfo
|
||||
instance (UserInfoM m) => UserInfoM (TraceT m) where
|
||||
askUserInfo = lift askUserInfo
|
||||
instance (UserInfoM m) => UserInfoM (MetadataT m) where
|
||||
askUserInfo = lift askUserInfo
|
||||
instance (UserInfoM m) => UserInfoM (TableCacheRT b m) where
|
||||
askUserInfo = lift askUserInfo
|
||||
|
||||
askPGSourceCache
|
||||
:: (CacheRM m, MonadError QErr m)
|
||||
=> SourceName -> m (SourceInfo 'Postgres)
|
||||
askPGSourceCache source = do
|
||||
pgSources <- scPostgres <$> askSchemaCache
|
||||
onNothing (M.lookup source pgSources) $
|
||||
throw400 NotExists $ "source with name " <> source <<> " not exists"
|
||||
|
||||
askTabInfo
|
||||
:: (QErrM m, CacheRM m)
|
||||
=> QualifiedTable -> m (TableInfo 'Postgres)
|
||||
askTabInfo tabName = do
|
||||
=> SourceName -> QualifiedTable -> m (TableInfo 'Postgres)
|
||||
askTabInfo sourceName tabName = do
|
||||
rawSchemaCache <- askSchemaCache
|
||||
liftMaybe (err400 NotExists errMsg) $ M.lookup tabName $ scTables rawSchemaCache
|
||||
liftMaybe (err400 NotExists errMsg) $ do
|
||||
sourceCache <- M.lookup sourceName $ scPostgres rawSchemaCache
|
||||
M.lookup tabName $ _pcTables sourceCache
|
||||
where
|
||||
errMsg = "table " <> tabName <<> " does not exist"
|
||||
errMsg = "table " <> tabName <<> " does not exist " <> "in source: "
|
||||
<> sourceNameToText sourceName
|
||||
|
||||
isTableTracked :: SchemaCache -> QualifiedTable -> Bool
|
||||
isTableTracked sc qt =
|
||||
isJust $ M.lookup qt $ scTables sc
|
||||
askTabInfoSource
|
||||
:: (QErrM m, TableInfoRM 'Postgres m)
|
||||
=> QualifiedTable -> m (TableInfo 'Postgres)
|
||||
askTabInfoSource tableName = do
|
||||
lookupTableInfo tableName >>= (`onNothing` throwTableDoesNotExist tableName)
|
||||
|
||||
askTabInfoFromTrigger
|
||||
:: (QErrM m, CacheRM m)
|
||||
=> TriggerName -> m (TableInfo 'Postgres)
|
||||
askTabInfoFromTrigger trn = do
|
||||
=> SourceName -> TriggerName -> m (TableInfo 'Postgres)
|
||||
askTabInfoFromTrigger sourceName trn = do
|
||||
sc <- askSchemaCache
|
||||
let tabInfos = M.elems $ scTables sc
|
||||
let tabInfos = M.elems $ maybe mempty _pcTables $ M.lookup sourceName $ scPostgres sc
|
||||
liftMaybe (err400 NotExists errMsg) $ find (isJust.M.lookup trn._tiEventTriggerInfoMap) tabInfos
|
||||
where
|
||||
errMsg = "event trigger " <> triggerNameToTxt trn <<> " does not exist"
|
||||
|
||||
askEventTriggerInfo
|
||||
:: (QErrM m, CacheRM m)
|
||||
=> TriggerName -> m EventTriggerInfo
|
||||
askEventTriggerInfo trn = do
|
||||
ti <- askTabInfoFromTrigger trn
|
||||
=> SourceName -> TriggerName -> m EventTriggerInfo
|
||||
askEventTriggerInfo sourceName trn = do
|
||||
ti <- askTabInfoFromTrigger sourceName trn
|
||||
let etim = _tiEventTriggerInfoMap ti
|
||||
liftMaybe (err400 NotExists errMsg) $ M.lookup trn etim
|
||||
where
|
||||
@ -152,6 +176,8 @@ instance (HasHttpManager m) => HasHttpManager (TraceT m) where
|
||||
askHttpManager = lift askHttpManager
|
||||
instance (HasHttpManager m) => HasHttpManager (MetadataT m) where
|
||||
askHttpManager = lift askHttpManager
|
||||
instance (HasHttpManager m) => HasHttpManager (LazyTxT QErr m) where
|
||||
askHttpManager = lift askHttpManager
|
||||
|
||||
|
||||
data RemoteSchemaPermsCtx
|
||||
@ -192,6 +218,9 @@ instance (HasRemoteSchemaPermsCtx m)
|
||||
instance (HasRemoteSchemaPermsCtx m)
|
||||
=> HasRemoteSchemaPermsCtx (MetadataT m) where
|
||||
askRemoteSchemaPermsCtx = lift askRemoteSchemaPermsCtx
|
||||
instance (HasRemoteSchemaPermsCtx m)
|
||||
=> HasRemoteSchemaPermsCtx (LazyTxT QErr m) where
|
||||
askRemoteSchemaPermsCtx = lift askRemoteSchemaPermsCtx
|
||||
|
||||
class (Monad m) => HasSQLGenCtx m where
|
||||
askSQLGenCtx :: m SQLGenCtx
|
||||
@ -208,6 +237,12 @@ instance (HasSQLGenCtx m) => HasSQLGenCtx (TraceT m) where
|
||||
askSQLGenCtx = lift askSQLGenCtx
|
||||
instance (HasSQLGenCtx m) => HasSQLGenCtx (MetadataT m) where
|
||||
askSQLGenCtx = lift askSQLGenCtx
|
||||
instance (HasSQLGenCtx m) => HasSQLGenCtx (Q.TxET QErr m) where
|
||||
askSQLGenCtx = lift askSQLGenCtx
|
||||
instance (HasSQLGenCtx m) => HasSQLGenCtx (LazyTxT QErr m) where
|
||||
askSQLGenCtx = lift askSQLGenCtx
|
||||
instance (HasSQLGenCtx m) => HasSQLGenCtx (TableCacheRT b m) where
|
||||
askSQLGenCtx = lift askSQLGenCtx
|
||||
|
||||
class (Monad m) => HasSystemDefined m where
|
||||
askSystemDefined :: m SystemDefined
|
||||
@ -224,7 +259,7 @@ instance (HasSystemDefined m) => HasSystemDefined (TraceT m) where
|
||||
newtype HasSystemDefinedT m a
|
||||
= HasSystemDefinedT { unHasSystemDefinedT :: ReaderT SystemDefined m a }
|
||||
deriving ( Functor, Applicative, Monad, MonadTrans, MonadIO, MonadUnique, MonadError e, MonadTx
|
||||
, HasHttpManager, HasSQLGenCtx, TableCoreInfoRM b, CacheRM, UserInfoM, HasRemoteSchemaPermsCtx)
|
||||
, HasHttpManager, HasSQLGenCtx, SourceM, TableCoreInfoRM b, CacheRM, UserInfoM, HasRemoteSchemaPermsCtx)
|
||||
|
||||
runHasSystemDefinedT :: SystemDefined -> HasSystemDefinedT m a -> m a
|
||||
runHasSystemDefinedT systemDefined = flip runReaderT systemDefined . unHasSystemDefinedT
|
||||
@ -242,12 +277,41 @@ getTableInfo :: (QErrM m) => QualifiedTable -> HashMap QualifiedTable a -> m a
|
||||
getTableInfo tableName infoMap =
|
||||
M.lookup tableName infoMap `onNothing` throwTableDoesNotExist tableName
|
||||
|
||||
askTableCoreInfo :: (QErrM m, TableCoreInfoRM 'Postgres m) => QualifiedTable -> m (TableCoreInfo 'Postgres)
|
||||
askTableCoreInfo tableName =
|
||||
askTableCache
|
||||
:: (QErrM m, CacheRM m) => SourceName -> m (TableCache 'Postgres)
|
||||
askTableCache sourceName = do
|
||||
schemaCache <- askSchemaCache
|
||||
case M.lookup sourceName (scPostgres schemaCache) of
|
||||
Just tableCache -> pure $ _pcTables tableCache
|
||||
Nothing -> throw400 NotExists $ "source " <> sourceName <<> " does not exist"
|
||||
|
||||
askTableCoreInfo
|
||||
:: (QErrM m, CacheRM m) => SourceName -> TableName 'Postgres -> m (TableCoreInfo 'Postgres)
|
||||
askTableCoreInfo sourceName tableName =
|
||||
_tiCoreInfo <$> askTabInfo sourceName tableName
|
||||
|
||||
-- | Asking for a table core info without explicit @'SourceName' argument.
|
||||
-- The source name is implicitly inferred from @'SourceM' via @'TableCoreInfoRM'.
|
||||
-- This is useful in RQL DML queries which are executed in a particular source database.
|
||||
askTableCoreInfoSource
|
||||
:: (QErrM m, TableCoreInfoRM 'Postgres m) => QualifiedTable -> m (TableCoreInfo 'Postgres)
|
||||
askTableCoreInfoSource tableName =
|
||||
lookupTableCoreInfo tableName >>= (`onNothing` throwTableDoesNotExist tableName)
|
||||
|
||||
askFieldInfoMap :: (QErrM m, TableCoreInfoRM 'Postgres m) => QualifiedTable -> m (FieldInfoMap (FieldInfo 'Postgres))
|
||||
askFieldInfoMap = fmap _tciFieldInfoMap . askTableCoreInfo
|
||||
askFieldInfoMap
|
||||
:: (QErrM m, CacheRM m)
|
||||
=> SourceName -> TableName 'Postgres -> m (FieldInfoMap (FieldInfo 'Postgres))
|
||||
askFieldInfoMap sourceName tableName =
|
||||
_tciFieldInfoMap . _tiCoreInfo <$> askTabInfo sourceName tableName
|
||||
|
||||
-- | Asking for a table's fields info without explicit @'SourceName' argument.
|
||||
-- The source name is implicitly inferred from @'SourceM' via @'TableCoreInfoRM'.
|
||||
-- This is useful in RQL DML queries which are executed in a particular source database.
|
||||
askFieldInfoMapSource
|
||||
:: (QErrM m, TableCoreInfoRM 'Postgres m)
|
||||
=> QualifiedTable -> m (FieldInfoMap (FieldInfo 'Postgres))
|
||||
askFieldInfoMapSource tableName =
|
||||
_tciFieldInfoMap <$> askTableCoreInfoSource tableName
|
||||
|
||||
askPGType
|
||||
:: (MonadError QErr m)
|
||||
|
@ -45,6 +45,8 @@ module Hasura.RQL.Types.Action
|
||||
, amPermissions
|
||||
, ActionPermissionMetadata(..)
|
||||
|
||||
, ActionSourceInfo(..)
|
||||
, getActionSourceInfo
|
||||
, AnnActionExecution(..)
|
||||
, AnnActionMutationAsync(..)
|
||||
, ActionExecContext(..)
|
||||
@ -194,7 +196,7 @@ type ActionOutputFields = Map.HashMap G.Name G.GType
|
||||
|
||||
getActionOutputFields :: AnnotatedObjectType backend -> ActionOutputFields
|
||||
getActionOutputFields =
|
||||
Map.fromList . map ( (unObjectFieldName . _ofdName) &&& (fst . _ofdType)) . toList . _otdFields
|
||||
Map.fromList . map ( (unObjectFieldName . _ofdName) &&& (fst . _ofdType)) . toList . _otdFields . _aotDefinition
|
||||
|
||||
data ActionInfo (b :: BackendType)
|
||||
= ActionInfo
|
||||
@ -275,6 +277,13 @@ instance J.FromJSON ActionMetadata where
|
||||
|
||||
----------------- Resolve Types ----------------
|
||||
|
||||
data ActionSourceInfo b
|
||||
= ASINoSource -- ^ No relationships defined on the action output object
|
||||
| ASISource !(SourceConfig b) -- ^ All relationships refer to tables in one source
|
||||
|
||||
getActionSourceInfo :: AnnotatedObjectType b -> ActionSourceInfo b
|
||||
getActionSourceInfo = maybe ASINoSource ASISource . _aotSource
|
||||
|
||||
data AnnActionExecution (b :: BackendType) v
|
||||
= AnnActionExecution
|
||||
{ _aaeName :: !ActionName
|
||||
@ -289,6 +298,7 @@ data AnnActionExecution (b :: BackendType) v
|
||||
, _aaeForwardClientHeaders :: !Bool
|
||||
, _aaeStrfyNum :: !Bool
|
||||
, _aaeTimeOut :: !Timeout
|
||||
, _aaeSource :: !(ActionSourceInfo b)
|
||||
}
|
||||
|
||||
data AnnActionMutationAsync
|
||||
@ -314,6 +324,7 @@ data AnnActionAsyncQuery (b :: BackendType) v
|
||||
, _aaaqFields :: !(AsyncActionQueryFieldsG b v)
|
||||
, _aaaqDefinitionList :: ![(Column b, ScalarType b)]
|
||||
, _aaaqStringifyNum :: !Bool
|
||||
, _aaaqSource :: !(ActionSourceInfo b)
|
||||
}
|
||||
|
||||
data ActionExecContext
|
||||
|
@ -56,43 +56,47 @@ module Hasura.RQL.Types.Common
|
||||
, UrlConf(..)
|
||||
, resolveUrlConf
|
||||
, getEnv
|
||||
|
||||
, SourceName(..)
|
||||
, defaultSource
|
||||
, sourceNameToText
|
||||
) where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as HM
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Language.Haskell.TH.Syntax as TH
|
||||
import qualified PostgreSQL.Binary.Decoding as PD
|
||||
import qualified Test.QuickCheck as QC
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as HM
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Language.Haskell.TH.Syntax as TH
|
||||
import qualified PostgreSQL.Binary.Decoding as PD
|
||||
import qualified Test.QuickCheck as QC
|
||||
|
||||
import Control.Lens (makeLenses)
|
||||
import Control.Lens (makeLenses)
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import Data.Bifunctor (bimap)
|
||||
import Data.Kind (Type)
|
||||
import Data.Scientific (toBoundedInteger)
|
||||
import Data.Bifunctor (bimap)
|
||||
import Data.Kind (Type)
|
||||
import Data.Scientific (toBoundedInteger)
|
||||
import Data.Text.Extended
|
||||
import Data.Text.NonEmpty
|
||||
import Data.Typeable
|
||||
import Data.URL.Template
|
||||
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as PG
|
||||
import qualified Hasura.Backends.Postgres.SQL.Types as PG
|
||||
import qualified Hasura.Backends.Postgres.SQL.Value as PG
|
||||
import qualified Hasura.Backends.Postgres.Execute.Types as PG
|
||||
import qualified Hasura.Backends.Postgres.SQL.DML as PG
|
||||
import qualified Hasura.Backends.Postgres.SQL.Types as PG
|
||||
import qualified Hasura.Backends.Postgres.SQL.Value as PG
|
||||
|
||||
import Hasura.EncJSON
|
||||
import Hasura.Incremental (Cacheable)
|
||||
import Hasura.RQL.DDL.Headers ()
|
||||
import Hasura.Incremental (Cacheable)
|
||||
import Hasura.RQL.DDL.Headers ()
|
||||
import Hasura.RQL.Types.Error
|
||||
import Hasura.SQL.Backend
|
||||
import Hasura.SQL.Types
|
||||
|
||||
|
||||
type Representable a = (Show a, Eq a, Hashable a, Cacheable a, NFData a)
|
||||
|
||||
-- | Mapping from abstract types to concrete backend representation
|
||||
@ -162,6 +166,7 @@ class
|
||||
type XAILIKE b :: Type
|
||||
type XANILIKE b :: Type
|
||||
type XComputedFieldInfo b :: Type
|
||||
type SourceConfig b :: Type
|
||||
isComparableType :: ScalarType b -> Bool
|
||||
isNumType :: ScalarType b -> Bool
|
||||
|
||||
@ -182,6 +187,7 @@ instance Backend 'Postgres where
|
||||
type XAILIKE 'Postgres = ()
|
||||
type XANILIKE 'Postgres = ()
|
||||
type XComputedFieldInfo 'Postgres = ()
|
||||
type SourceConfig 'Postgres = PG.PGSourceConfig
|
||||
isComparableType = PG.isComparableType
|
||||
isNumType = PG.isNumType
|
||||
|
||||
@ -286,21 +292,57 @@ fromRel = FieldName . relNameToTxt
|
||||
class ToAesonPairs a where
|
||||
toAesonPairs :: (KeyValue v) => a -> [v]
|
||||
|
||||
data SourceName
|
||||
= SNDefault
|
||||
| SNName !NonEmptyText
|
||||
deriving (Show, Eq, Ord, Generic)
|
||||
|
||||
instance FromJSON SourceName where
|
||||
parseJSON = withText "String" $ \case
|
||||
"default" -> pure SNDefault
|
||||
t -> SNName <$> parseJSON (String t)
|
||||
|
||||
sourceNameToText :: SourceName -> Text
|
||||
sourceNameToText = \case
|
||||
SNDefault -> "default"
|
||||
SNName t -> unNonEmptyText t
|
||||
|
||||
instance ToJSON SourceName where
|
||||
toJSON = String . sourceNameToText
|
||||
|
||||
instance ToTxt SourceName where
|
||||
toTxt = sourceNameToText
|
||||
|
||||
instance ToJSONKey SourceName
|
||||
instance Hashable SourceName
|
||||
instance NFData SourceName
|
||||
instance Cacheable SourceName
|
||||
|
||||
instance Arbitrary SourceName where
|
||||
arbitrary = SNName <$> arbitrary
|
||||
|
||||
defaultSource :: SourceName
|
||||
defaultSource = SNDefault
|
||||
|
||||
data WithTable a
|
||||
= WithTable
|
||||
{ wtName :: !PG.QualifiedTable
|
||||
, wtInfo :: !a
|
||||
{ wtSource :: !SourceName
|
||||
, wtName :: !PG.QualifiedTable
|
||||
, wtInfo :: !a
|
||||
} deriving (Show, Eq)
|
||||
|
||||
instance (FromJSON a) => FromJSON (WithTable a) where
|
||||
parseJSON v@(Object o) =
|
||||
WithTable <$> o .: "table" <*> parseJSON v
|
||||
WithTable
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "table"
|
||||
<*> parseJSON v
|
||||
parseJSON _ =
|
||||
fail "expecting an Object with key 'table'"
|
||||
|
||||
instance (ToAesonPairs a) => ToJSON (WithTable a) where
|
||||
toJSON (WithTable tn rel) =
|
||||
object $ ("table" .= tn):toAesonPairs rel
|
||||
toJSON (WithTable sourceName tn rel) =
|
||||
object $ ("source" .= sourceName):("table" .= tn):toAesonPairs rel
|
||||
|
||||
type ColumnValues a = HM.HashMap PG.PGCol a
|
||||
|
||||
|
@ -26,7 +26,7 @@ module Hasura.RQL.Types.CustomTypes
|
||||
, NonObjectTypeMap
|
||||
, AnnotatedObjectFieldType(..)
|
||||
, fieldTypeToScalarType
|
||||
, AnnotatedObjectType
|
||||
, AnnotatedObjectType(..)
|
||||
, AnnotatedObjects
|
||||
, AnnotatedCustomTypes(..)
|
||||
, emptyAnnotatedCustomTypes
|
||||
@ -50,7 +50,8 @@ import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Incremental (Cacheable)
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.Types.Column
|
||||
import Hasura.RQL.Types.Common (RelType, ScalarType)
|
||||
import Hasura.RQL.Types.Common (RelType, ScalarType, SourceConfig, SourceName,
|
||||
defaultSource)
|
||||
import Hasura.RQL.Types.Table
|
||||
import Hasura.SQL.Backend
|
||||
|
||||
@ -128,13 +129,22 @@ data TypeRelationship t f
|
||||
= TypeRelationship
|
||||
{ _trName :: !RelationshipName
|
||||
, _trType :: !RelType
|
||||
, _trSource :: !SourceName
|
||||
, _trRemoteTable :: !t
|
||||
, _trFieldMapping :: !(Map.HashMap ObjectFieldName f)
|
||||
} deriving (Show, Eq, Generic)
|
||||
instance (NFData t, NFData f) => NFData (TypeRelationship t f)
|
||||
instance (Cacheable t, Cacheable f) => Cacheable (TypeRelationship t f)
|
||||
$(makeLenses ''TypeRelationship)
|
||||
$(J.deriveJSON (J.aesonDrop 3 J.snakeCase) ''TypeRelationship)
|
||||
$(J.deriveToJSON (J.aesonDrop 3 J.snakeCase) ''TypeRelationship)
|
||||
|
||||
instance (J.FromJSON t, J.FromJSON f) => J.FromJSON (TypeRelationship t f) where
|
||||
parseJSON = J.withObject "Object" $ \o ->
|
||||
TypeRelationship <$> o J..: "name"
|
||||
<*> o J..: "type"
|
||||
<*> o J..:? "source" J..!= defaultSource
|
||||
<*> o J..: "remote_table"
|
||||
<*> o J..: "field_mapping"
|
||||
|
||||
newtype ObjectTypeName
|
||||
= ObjectTypeName { unObjectTypeName :: G.Name }
|
||||
@ -274,8 +284,13 @@ fieldTypeToScalarType = \case
|
||||
| _stdName == boolScalar -> PGBoolean
|
||||
| otherwise -> PGJSON
|
||||
|
||||
type AnnotatedObjectType b =
|
||||
ObjectTypeDefinition (G.GType, AnnotatedObjectFieldType) (TableInfo b) (ColumnInfo b)
|
||||
data AnnotatedObjectType b
|
||||
= AnnotatedObjectType
|
||||
{ _aotDefinition :: !(ObjectTypeDefinition (G.GType, AnnotatedObjectFieldType) (TableInfo b) (ColumnInfo b))
|
||||
, _aotSource :: !(Maybe (SourceConfig b))
|
||||
} deriving (Generic)
|
||||
instance J.ToJSON (AnnotatedObjectType 'Postgres) where
|
||||
toJSON = J.toJSON . _aotDefinition
|
||||
|
||||
type AnnotatedObjects b = Map.HashMap G.Name (AnnotatedObjectType b)
|
||||
|
||||
|
@ -40,7 +40,7 @@ import Data.Text.NonEmpty
|
||||
import Hasura.Backends.Postgres.SQL.Types
|
||||
import Hasura.Incremental (Cacheable)
|
||||
import Hasura.RQL.DDL.Headers
|
||||
import Hasura.RQL.Types.Common (InputWebhook)
|
||||
import Hasura.RQL.Types.Common (InputWebhook, SourceName, defaultSource)
|
||||
|
||||
|
||||
-- This change helps us create functions for the event triggers
|
||||
@ -146,7 +146,8 @@ $(deriveToJSON (aesonDrop 3 snakeCase){omitNothingFields=True} ''WebhookConfInfo
|
||||
|
||||
data CreateEventTriggerQuery
|
||||
= CreateEventTriggerQuery
|
||||
{ cetqName :: !TriggerName
|
||||
{ cetqSource :: !SourceName
|
||||
, cetqName :: !TriggerName
|
||||
, cetqTable :: !QualifiedTable
|
||||
, cetqInsert :: !(Maybe SubscribeOpSpec)
|
||||
, cetqUpdate :: !(Maybe SubscribeOpSpec)
|
||||
@ -161,6 +162,7 @@ data CreateEventTriggerQuery
|
||||
|
||||
instance FromJSON CreateEventTriggerQuery where
|
||||
parseJSON (Object o) = do
|
||||
sourceName <- o .:? "source" .!= defaultSource
|
||||
name <- o .: "name"
|
||||
table <- o .: "table"
|
||||
insert <- o .:? "insert"
|
||||
@ -187,7 +189,7 @@ instance FromJSON CreateEventTriggerQuery where
|
||||
(Just _, Just _) -> fail "only one of webhook or webhook_from_env should be given"
|
||||
_ -> fail "must provide webhook or webhook_from_env"
|
||||
mapM_ checkEmptyCols [insert, update, delete]
|
||||
return $ CreateEventTriggerQuery name table insert update delete (Just enableManual) retryConf webhook webhookFromEnv headers replace
|
||||
return $ CreateEventTriggerQuery sourceName name table insert update delete (Just enableManual) retryConf webhook webhookFromEnv headers replace
|
||||
where
|
||||
checkEmptyCols spec
|
||||
= case spec of
|
||||
@ -210,10 +212,19 @@ instance NFData TriggerOpsDef
|
||||
instance Cacheable TriggerOpsDef
|
||||
$(deriveJSON (aesonDrop 2 snakeCase){omitNothingFields=True} ''TriggerOpsDef)
|
||||
|
||||
newtype DeleteEventTriggerQuery = DeleteEventTriggerQuery { detqName :: TriggerName }
|
||||
deriving (Show, Eq)
|
||||
data DeleteEventTriggerQuery
|
||||
= DeleteEventTriggerQuery
|
||||
{ detqSource :: !SourceName
|
||||
, detqName :: !TriggerName
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveJSON (aesonDrop 4 snakeCase){omitNothingFields=True} ''DeleteEventTriggerQuery)
|
||||
instance FromJSON DeleteEventTriggerQuery where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
DeleteEventTriggerQuery
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "name"
|
||||
|
||||
$(deriveToJSON (aesonDrop 4 snakeCase){omitNothingFields=True} ''DeleteEventTriggerQuery)
|
||||
|
||||
data EventTriggerConf
|
||||
= EventTriggerConf
|
||||
@ -227,17 +238,32 @@ data EventTriggerConf
|
||||
instance Cacheable EventTriggerConf
|
||||
$(deriveJSON (aesonDrop 3 snakeCase){omitNothingFields=True} ''EventTriggerConf)
|
||||
|
||||
newtype RedeliverEventQuery
|
||||
data RedeliverEventQuery
|
||||
= RedeliverEventQuery
|
||||
{ rdeqEventId :: EventId
|
||||
{ rdeqEventId :: !EventId
|
||||
, rdeqSource :: !SourceName
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveJSON (aesonDrop 4 snakeCase){omitNothingFields=True} ''RedeliverEventQuery)
|
||||
$(deriveToJSON (aesonDrop 4 snakeCase){omitNothingFields=True} ''RedeliverEventQuery)
|
||||
|
||||
instance FromJSON RedeliverEventQuery where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
RedeliverEventQuery
|
||||
<$> o .: "event_id"
|
||||
<*> o .:? "source" .!= defaultSource
|
||||
|
||||
data InvokeEventTriggerQuery
|
||||
= InvokeEventTriggerQuery
|
||||
{ ietqName :: !TriggerName
|
||||
, ietqSource :: !SourceName
|
||||
, ietqPayload :: !Value
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveJSON (aesonDrop 4 snakeCase){omitNothingFields=True} ''InvokeEventTriggerQuery)
|
||||
$(deriveToJSON (aesonDrop 4 snakeCase){omitNothingFields=True} ''InvokeEventTriggerQuery)
|
||||
|
||||
instance FromJSON InvokeEventTriggerQuery where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
InvokeEventTriggerQuery
|
||||
<$> o .: "name"
|
||||
<*> o .:? "source" .!= defaultSource
|
||||
<*> o .: "payload"
|
||||
|
@ -130,7 +130,8 @@ emptyFunctionConfig = FunctionConfig Nothing Nothing
|
||||
-- https://hasura.io/docs/1.0/graphql/core/api-reference/schema-metadata-api/custom-functions.html#track-function-v2
|
||||
data TrackFunctionV2
|
||||
= TrackFunctionV2
|
||||
{ _tfv2Function :: !QualifiedFunction
|
||||
{ _tfv2Source :: !SourceName
|
||||
, _tfv2Function :: !QualifiedFunction
|
||||
, _tfv2Configuration :: !FunctionConfig
|
||||
} deriving (Show, Eq, Generic)
|
||||
$(deriveToJSON (aesonDrop 5 snakeCase) ''TrackFunctionV2)
|
||||
@ -138,7 +139,8 @@ $(deriveToJSON (aesonDrop 5 snakeCase) ''TrackFunctionV2)
|
||||
instance FromJSON TrackFunctionV2 where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
TrackFunctionV2
|
||||
<$> o .: "function"
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "function"
|
||||
<*> o .:? "configuration" .!= emptyFunctionConfig
|
||||
|
||||
-- | Raw SQL function metadata from postgres
|
||||
|
@ -34,6 +34,7 @@ import Hasura.RQL.Types.Relationship
|
||||
import Hasura.RQL.Types.RemoteRelationship
|
||||
import Hasura.RQL.Types.RemoteSchema
|
||||
import Hasura.RQL.Types.ScheduledTrigger
|
||||
import Hasura.RQL.Types.Source
|
||||
import Hasura.RQL.Types.Table
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Backend
|
||||
@ -48,13 +49,19 @@ data TableMetadataObjId
|
||||
deriving (Show, Eq, Generic)
|
||||
instance Hashable TableMetadataObjId
|
||||
|
||||
data SourceMetadataObjId
|
||||
= SMOTable !QualifiedTable
|
||||
| SMOFunction !QualifiedFunction
|
||||
| SMOTableObj !QualifiedTable !TableMetadataObjId
|
||||
deriving (Show, Eq, Generic)
|
||||
instance Hashable SourceMetadataObjId
|
||||
|
||||
data MetadataObjId
|
||||
= MOTable !QualifiedTable
|
||||
| MOFunction !QualifiedFunction
|
||||
= MOSource !SourceName
|
||||
| MOSourceObjId !SourceName !SourceMetadataObjId
|
||||
| MORemoteSchema !RemoteSchemaName
|
||||
-- ^ Originates from user-defined '_arsqName'
|
||||
| MORemoteSchemaPermissions !RemoteSchemaName !RoleName
|
||||
| MOTableObj !QualifiedTable !TableMetadataObjId
|
||||
| MOCustomTypes
|
||||
| MOAction !ActionName
|
||||
| MOActionPermission !ActionName !RoleName
|
||||
@ -65,37 +72,41 @@ instance Hashable MetadataObjId
|
||||
|
||||
moiTypeName :: MetadataObjId -> Text
|
||||
moiTypeName = \case
|
||||
MOTable _ -> "table"
|
||||
MOFunction _ -> "function"
|
||||
MOSource _ -> "source"
|
||||
MOSourceObjId _ sourceObjId -> case sourceObjId of
|
||||
SMOTable _ -> "table"
|
||||
SMOFunction _ -> "function"
|
||||
SMOTableObj _ tableObjectId -> case tableObjectId of
|
||||
MTORel _ relType -> relTypeToTxt relType <> "_relation"
|
||||
MTOPerm _ permType -> permTypeToCode permType <> "_permission"
|
||||
MTOTrigger _ -> "event_trigger"
|
||||
MTOComputedField _ -> "computed_field"
|
||||
MTORemoteRelationship _ -> "remote_relationship"
|
||||
MORemoteSchema _ -> "remote_schema"
|
||||
MORemoteSchemaPermissions _ _ -> "remote_schema_permission"
|
||||
MOCronTrigger _ -> "cron_trigger"
|
||||
MOTableObj _ tableObjectId -> case tableObjectId of
|
||||
MTORel _ relType -> relTypeToTxt relType <> "_relation"
|
||||
MTOPerm _ permType -> permTypeToCode permType <> "_permission"
|
||||
MTOTrigger _ -> "event_trigger"
|
||||
MTOComputedField _ -> "computed_field"
|
||||
MTORemoteRelationship _ -> "remote_relationship"
|
||||
MOCustomTypes -> "custom_types"
|
||||
MOAction _ -> "action"
|
||||
MOActionPermission _ _ -> "action_permission"
|
||||
|
||||
moiName :: MetadataObjId -> Text
|
||||
moiName objectId = moiTypeName objectId <> " " <> case objectId of
|
||||
MOTable name -> toTxt name
|
||||
MOFunction name -> toTxt name
|
||||
MOSource name -> toTxt name
|
||||
MOSourceObjId source sourceObjId -> case sourceObjId of
|
||||
SMOTable name -> toTxt name <> " in source " <> toTxt source
|
||||
SMOFunction name -> toTxt name <> " in source " <> toTxt source
|
||||
SMOTableObj tableName tableObjectId ->
|
||||
let tableObjectName = case tableObjectId of
|
||||
MTORel name _ -> toTxt name
|
||||
MTOComputedField name -> toTxt name
|
||||
MTORemoteRelationship name -> toTxt name
|
||||
MTOPerm name _ -> toTxt name
|
||||
MTOTrigger name -> toTxt name
|
||||
in tableObjectName <> " in " <> moiName (MOSourceObjId source $ SMOTable tableName)
|
||||
MORemoteSchema name -> toTxt name
|
||||
MORemoteSchemaPermissions name roleName ->
|
||||
toTxt roleName <> " permission in remote schema " <> toTxt name
|
||||
MOCronTrigger name -> toTxt name
|
||||
MOTableObj tableName tableObjectId ->
|
||||
let tableObjectName = case tableObjectId of
|
||||
MTORel name _ -> toTxt name
|
||||
MTOComputedField name -> toTxt name
|
||||
MTORemoteRelationship name -> toTxt name
|
||||
MTOPerm name _ -> toTxt name
|
||||
MTOTrigger name -> toTxt name
|
||||
in tableObjectName <> " in " <> moiName (MOTable tableName)
|
||||
MOCustomTypes -> "custom_types"
|
||||
MOAction name -> toTxt name
|
||||
MOActionPermission name roleName -> toTxt roleName <> " permission in " <> toTxt name
|
||||
@ -168,12 +179,14 @@ parseListAsMap t mapFn listP = do
|
||||
data MetadataVersion
|
||||
= MVVersion1
|
||||
| MVVersion2
|
||||
| MVVersion3
|
||||
deriving (Show, Eq, Generic)
|
||||
|
||||
instance ToJSON MetadataVersion where
|
||||
toJSON = \case
|
||||
MVVersion1 -> toJSON @Int 1
|
||||
MVVersion2 -> toJSON @Int 2
|
||||
MVVersion3 -> toJSON @Int 3
|
||||
|
||||
instance FromJSON MetadataVersion where
|
||||
parseJSON v = do
|
||||
@ -181,10 +194,11 @@ instance FromJSON MetadataVersion where
|
||||
case version of
|
||||
1 -> pure MVVersion1
|
||||
2 -> pure MVVersion2
|
||||
i -> fail $ "expected 1 or 2, encountered " ++ show i
|
||||
3 -> pure MVVersion3
|
||||
i -> fail $ "expected 1, 2 or 3, encountered " ++ show i
|
||||
|
||||
currentMetadataVersion :: MetadataVersion
|
||||
currentMetadataVersion = MVVersion2
|
||||
currentMetadataVersion = MVVersion3
|
||||
|
||||
data ComputedFieldMetadata
|
||||
= ComputedFieldMetadata
|
||||
@ -329,7 +343,32 @@ type Allowlist = HSIns.InsOrdHashSet CollectionReq
|
||||
type Actions = InsOrdHashMap ActionName ActionMetadata
|
||||
type CronTriggers = InsOrdHashMap TriggerName CronTriggerMetadata
|
||||
|
||||
parseNonPostgresMetadata
|
||||
data SourceMetadata
|
||||
= SourceMetadata
|
||||
{ _smName :: !SourceName
|
||||
, _smTables :: !Tables
|
||||
, _smFunctions :: !Functions
|
||||
, _smConfiguration :: !SourceConfiguration
|
||||
} deriving (Show, Eq, Generic)
|
||||
instance Cacheable SourceMetadata
|
||||
$(makeLenses ''SourceMetadata)
|
||||
instance FromJSON SourceMetadata where
|
||||
parseJSON = withObject "Object" $ \o -> do
|
||||
_smName <- o .: "name"
|
||||
_smTables <- oMapFromL _tmTable <$> o .: "tables"
|
||||
_smFunctions <- oMapFromL _fmFunction <$> o .:? "functions" .!= []
|
||||
_smConfiguration <- o .: "configuration"
|
||||
pure SourceMetadata{..}
|
||||
|
||||
mkSourceMetadata
|
||||
:: SourceName -> UrlConf -> PostgresPoolSettings -> SourceMetadata
|
||||
mkSourceMetadata name urlConf connSettings =
|
||||
SourceMetadata name mempty mempty $
|
||||
SourceConfiguration (PostgresSourceConnInfo urlConf connSettings) Nothing
|
||||
|
||||
type Sources = InsOrdHashMap SourceName SourceMetadata
|
||||
|
||||
parseNonSourcesMetadata
|
||||
:: Object
|
||||
-> Parser
|
||||
( RemoteSchemas
|
||||
@ -339,7 +378,7 @@ parseNonPostgresMetadata
|
||||
, Actions
|
||||
, CronTriggers
|
||||
)
|
||||
parseNonPostgresMetadata o = do
|
||||
parseNonSourcesMetadata o = do
|
||||
remoteSchemas <- parseListAsMap "remote schemas" _rsmName $
|
||||
o .:? "remote_schemas" .!= []
|
||||
queryCollections <- parseListAsMap "query collections" _ccName $
|
||||
@ -357,36 +396,69 @@ parseNonPostgresMetadata o = do
|
||||
-- exported/replaced via metadata queries.
|
||||
data Metadata
|
||||
= Metadata
|
||||
{ _metaTables :: !Tables
|
||||
, _metaFunctions :: !Functions
|
||||
, _metaRemoteSchemas :: !RemoteSchemas
|
||||
, _metaQueryCollections :: !QueryCollections
|
||||
, _metaAllowlist :: !Allowlist
|
||||
, _metaCustomTypes :: !CustomTypes
|
||||
, _metaActions :: !Actions
|
||||
, _metaCronTriggers :: !CronTriggers
|
||||
{ _metaSources :: !Sources
|
||||
, _metaRemoteSchemas :: !RemoteSchemas
|
||||
, _metaQueryCollections :: !QueryCollections
|
||||
, _metaAllowlist :: !Allowlist
|
||||
, _metaCustomTypes :: !CustomTypes
|
||||
, _metaActions :: !Actions
|
||||
, _metaCronTriggers :: !CronTriggers
|
||||
} deriving (Show, Eq)
|
||||
$(makeLenses ''Metadata)
|
||||
|
||||
instance FromJSON Metadata where
|
||||
parseJSON = withObject "Object" $ \o -> do
|
||||
version <- o .:? "version" .!= MVVersion1
|
||||
tables <- parseListAsMap "tables" _tmTable $ o .: "tables"
|
||||
functions <-
|
||||
case version of
|
||||
MVVersion1 -> do
|
||||
functions <- parseListAsMap "functions" id $ o .:? "functions" .!= []
|
||||
pure $ flip OM.map functions $
|
||||
\function -> FunctionMetadata function emptyFunctionConfig
|
||||
MVVersion2 -> parseListAsMap "functions" _fmFunction $ o .:? "functions" .!= []
|
||||
when (version /= MVVersion3) $ fail $
|
||||
"unexpected metadata version from storage: " <> show version
|
||||
sources <- oMapFromL _smName <$> o .: "sources"
|
||||
(remoteSchemas, queryCollections, allowlist, customTypes,
|
||||
actions, cronTriggers) <- parseNonPostgresMetadata o
|
||||
pure $ Metadata tables functions remoteSchemas queryCollections
|
||||
allowlist customTypes actions cronTriggers
|
||||
actions, cronTriggers) <- parseNonSourcesMetadata o
|
||||
pure $ Metadata sources remoteSchemas queryCollections allowlist
|
||||
customTypes actions cronTriggers
|
||||
|
||||
emptyMetadata :: Metadata
|
||||
emptyMetadata =
|
||||
Metadata mempty mempty mempty mempty mempty emptyCustomTypes mempty mempty
|
||||
Metadata mempty mempty mempty mempty emptyCustomTypes mempty mempty
|
||||
|
||||
tableMetadataSetter
|
||||
:: SourceName -> QualifiedTable -> ASetter' Metadata TableMetadata
|
||||
tableMetadataSetter source table =
|
||||
metaSources.ix source.smTables.ix table
|
||||
|
||||
data MetadataNoSources
|
||||
= MetadataNoSources
|
||||
{ _mnsTables :: !Tables
|
||||
, _mnsFunctions :: !Functions
|
||||
, _mnsRemoteSchemas :: !RemoteSchemas
|
||||
, _mnsQueryCollections :: !QueryCollections
|
||||
, _mnsAllowlist :: !Allowlist
|
||||
, _mnsCustomTypes :: !CustomTypes
|
||||
, _mnsActions :: !Actions
|
||||
, _mnsCronTriggers :: !CronTriggers
|
||||
} deriving (Show, Eq)
|
||||
$(deriveToJSON (aesonDrop 4 snakeCase) ''MetadataNoSources)
|
||||
|
||||
instance FromJSON MetadataNoSources where
|
||||
parseJSON = withObject "Object" $ \o -> do
|
||||
version <- o .:? "version" .!= MVVersion1
|
||||
(tables, functions) <-
|
||||
case version of
|
||||
MVVersion1 -> do
|
||||
tables <- oMapFromL _tmTable <$> o .: "tables"
|
||||
functionList <- o .:? "functions" .!= []
|
||||
let functions = OM.fromList $ flip map functionList $
|
||||
\function -> (function, FunctionMetadata function emptyFunctionConfig)
|
||||
pure (tables, functions)
|
||||
MVVersion2 -> do
|
||||
tables <- oMapFromL _tmTable <$> o .: "tables"
|
||||
functions <- oMapFromL _fmFunction <$> o .:? "functions" .!= []
|
||||
pure (tables, functions)
|
||||
MVVersion3 -> fail "unexpected version for metadata without sources: 3"
|
||||
(remoteSchemas, queryCollections, allowlist, customTypes,
|
||||
actions, cronTriggers) <- parseNonSourcesMetadata o
|
||||
pure $ MetadataNoSources tables functions remoteSchemas queryCollections
|
||||
allowlist customTypes actions cronTriggers
|
||||
|
||||
newtype MetadataModifier =
|
||||
MetadataModifier {unMetadataModifier :: Metadata -> Metadata}
|
||||
@ -415,17 +487,15 @@ noMetadataModify = mempty
|
||||
-- See: https://github.com/hasura/graphql-engine/issues/6348
|
||||
metadataToOrdJSON :: Metadata -> AO.Value
|
||||
metadataToOrdJSON ( Metadata
|
||||
tables
|
||||
functions
|
||||
sources
|
||||
remoteSchemas
|
||||
queryCollections
|
||||
allowlist
|
||||
customTypes
|
||||
actions
|
||||
cronTriggers
|
||||
) = AO.object $ [versionPair, tablesPair] <>
|
||||
catMaybes [ functionsPair
|
||||
, remoteSchemasPair
|
||||
) = AO.object $ [versionPair, sourcesPair] <>
|
||||
catMaybes [ remoteSchemasPair
|
||||
, queryCollectionsPair
|
||||
, allowlistPair
|
||||
, actionsPair
|
||||
@ -434,8 +504,7 @@ metadataToOrdJSON ( Metadata
|
||||
]
|
||||
where
|
||||
versionPair = ("version", AO.toOrdered currentMetadataVersion)
|
||||
tablesPair = ("tables", AO.array $ map tableMetaToOrdJSON $ sortOn _tmTable $ OM.elems tables)
|
||||
functionsPair = listToMaybeOrdPairSort "functions" functionMetadataToOrdJSON _fmFunction functions
|
||||
sourcesPair = ("sources", AO.array $ map sourceMetaToOrdJSON $ sortOn _smName $ OM.elems sources)
|
||||
remoteSchemasPair = listToMaybeOrdPairSort "remote_schemas" remoteSchemaQToOrdJSON _rsmName remoteSchemas
|
||||
queryCollectionsPair = listToMaybeOrdPairSort "query_collections" createCollectionToOrdJSON _ccName queryCollections
|
||||
allowlistPair = listToMaybeOrdPairSort "allowlist" AO.toOrdered _crCollection allowlist
|
||||
@ -444,6 +513,16 @@ metadataToOrdJSON ( Metadata
|
||||
actionsPair = listToMaybeOrdPairSort "actions" actionMetadataToOrdJSON _amName actions
|
||||
cronTriggersPair = listToMaybeOrdPairSort "cron_triggers" crontriggerQToOrdJSON ctName cronTriggers
|
||||
|
||||
sourceMetaToOrdJSON :: SourceMetadata -> AO.Value
|
||||
sourceMetaToOrdJSON SourceMetadata{..} =
|
||||
let sourceNamePair = ("name", AO.toOrdered _smName)
|
||||
tablesPair = ("tables", AO.array $ map tableMetaToOrdJSON $ sortOn _tmTable $ OM.elems _smTables)
|
||||
functionsPair = listToMaybeOrdPairSort "functions" functionMetadataToOrdJSON _fmFunction _smFunctions
|
||||
|
||||
configurationPair = [("configuration", AO.toOrdered _smConfiguration)]
|
||||
|
||||
in AO.object $ [sourceNamePair, tablesPair] <> maybeToList functionsPair <> configurationPair
|
||||
|
||||
tableMetaToOrdJSON :: TableMetadata -> AO.Value
|
||||
tableMetaToOrdJSON ( TableMetadata
|
||||
table
|
||||
|
@ -99,7 +99,8 @@ type CreateObjRel = WithTable ObjRelDef
|
||||
|
||||
data DropRel
|
||||
= DropRel
|
||||
{ drTable :: !QualifiedTable
|
||||
{ drSource :: !SourceName
|
||||
, drTable :: !QualifiedTable
|
||||
, drRelationship :: !RelName
|
||||
, drCascade :: !Bool
|
||||
} deriving (Show, Eq)
|
||||
@ -108,13 +109,15 @@ $(deriveToJSON (aesonDrop 2 snakeCase){omitNothingFields=True} ''DropRel)
|
||||
instance FromJSON DropRel where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
DropRel
|
||||
<$> o .: "table"
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "table"
|
||||
<*> o .: "relationship"
|
||||
<*> o .:? "cascade" .!= False
|
||||
|
||||
data SetRelComment
|
||||
= SetRelComment
|
||||
{ arTable :: !QualifiedTable
|
||||
{ arSource :: !SourceName
|
||||
, arTable :: !QualifiedTable
|
||||
, arRelationship :: !RelName
|
||||
, arComment :: !(Maybe T.Text)
|
||||
} deriving (Show, Eq)
|
||||
@ -122,13 +125,15 @@ $(deriveToJSON (aesonDrop 2 snakeCase){omitNothingFields=True} ''SetRelComment)
|
||||
instance FromJSON SetRelComment where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
SetRelComment
|
||||
<$> o .: "table"
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "table"
|
||||
<*> o .: "relationship"
|
||||
<*> o .:? "comment"
|
||||
|
||||
data RenameRel
|
||||
= RenameRel
|
||||
{ rrTable :: !QualifiedTable
|
||||
{ rrSource :: !SourceName
|
||||
, rrTable :: !QualifiedTable
|
||||
, rrName :: !RelName
|
||||
, rrNewName :: !RelName
|
||||
} deriving (Show, Eq)
|
||||
@ -137,6 +142,7 @@ $(deriveToJSON (aesonDrop 2 snakeCase) ''RenameRel)
|
||||
instance FromJSON RenameRel where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
RenameRel
|
||||
<$> o .: "table"
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "table"
|
||||
<*> o .: "name"
|
||||
<*> o .: "new_name"
|
||||
|
@ -241,7 +241,10 @@ data RemoteRelationship =
|
||||
{ rtrName :: !RemoteRelationshipName
|
||||
-- ^ Field name to which we'll map the remote in hasura; this becomes part
|
||||
-- of the hasura schema.
|
||||
, rtrSource :: !SourceName
|
||||
, rtrTable :: !QualifiedTable
|
||||
-- ^ (SourceName, QualifiedTable) determines the table on which the relationship
|
||||
-- is defined
|
||||
, rtrHasuraFields :: !(Set FieldName) -- TODO (from master)? change to PGCol
|
||||
-- ^ The hasura fields from 'rtrTable' that will be in scope when resolving
|
||||
-- the remote objects in 'rtrRemoteField'.
|
||||
@ -251,7 +254,17 @@ data RemoteRelationship =
|
||||
} deriving (Show, Eq, Generic)
|
||||
instance NFData RemoteRelationship
|
||||
instance Cacheable RemoteRelationship
|
||||
$(deriveJSON (aesonDrop 3 snakeCase) ''RemoteRelationship)
|
||||
$(deriveToJSON (aesonDrop 3 snakeCase) ''RemoteRelationship)
|
||||
|
||||
instance FromJSON RemoteRelationship where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
RemoteRelationship
|
||||
<$> o .: "name"
|
||||
<*> o .:? "source" .!= defaultSource
|
||||
<*> o .: "table"
|
||||
<*> o .: "hasura_fields"
|
||||
<*> o .: "remote_schema"
|
||||
<*> o .: "remote_field"
|
||||
|
||||
data RemoteRelationshipDef
|
||||
= RemoteRelationshipDef
|
||||
@ -263,10 +276,17 @@ instance Cacheable RemoteRelationshipDef
|
||||
$(deriveJSON (aesonDrop 4 snakeCase) ''RemoteRelationshipDef)
|
||||
$(makeLenses ''RemoteRelationshipDef)
|
||||
|
||||
data DeleteRemoteRelationship =
|
||||
DeleteRemoteRelationship
|
||||
{ drrTable :: QualifiedTable
|
||||
, drrName :: RemoteRelationshipName
|
||||
} deriving (Show, Eq)
|
||||
data DeleteRemoteRelationship
|
||||
= DeleteRemoteRelationship
|
||||
{ drrSource :: !SourceName
|
||||
, drrTable :: !QualifiedTable
|
||||
, drrName :: !RemoteRelationshipName
|
||||
} deriving (Show, Eq)
|
||||
instance FromJSON DeleteRemoteRelationship where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
DeleteRemoteRelationship
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "table"
|
||||
<*> o .: "name"
|
||||
|
||||
$(deriveJSON (aesonDrop 3 snakeCase){omitNothingFields=True} ''DeleteRemoteRelationship)
|
||||
$(deriveToJSON (aesonDrop 3 snakeCase){omitNothingFields=True} ''DeleteRemoteRelationship)
|
||||
|
@ -3,6 +3,7 @@
|
||||
module Hasura.RQL.Types.Run
|
||||
( RunT(..)
|
||||
, RunCtx(..)
|
||||
, runQueryLazyTx
|
||||
, peelRun
|
||||
) where
|
||||
|
||||
@ -28,17 +29,18 @@ data RunCtx
|
||||
}
|
||||
|
||||
newtype RunT m a
|
||||
= RunT { unRunT :: ReaderT RunCtx (LazyTxT QErr m) a }
|
||||
= RunT { unRunT :: ReaderT RunCtx (ExceptT QErr m) a }
|
||||
deriving ( Functor, Applicative, Monad
|
||||
, MonadError QErr
|
||||
, MonadReader RunCtx
|
||||
, MonadTx
|
||||
, MonadIO
|
||||
, MonadUnique
|
||||
, MonadMetadataStorage
|
||||
)
|
||||
|
||||
instance (MonadMetadataStorage m) => MonadScheduledEvents (RunT m)
|
||||
instance (MonadIO m) => MonadUnique (RunT m) where
|
||||
newUnique = liftIO newUnique
|
||||
|
||||
instance (MonadMetadataStorage m) => MonadMetadataStorageQueryAPI (RunT m)
|
||||
|
||||
deriving instance (MonadIO m, MonadBase IO m) => MonadBase IO (RunT m)
|
||||
deriving instance (MonadIO m, MonadBaseControl IO m) => MonadBaseControl IO (RunT m)
|
||||
@ -55,18 +57,29 @@ instance (Monad m) => HasSQLGenCtx (RunT m) where
|
||||
instance (Monad m) => HasRemoteSchemaPermsCtx (RunT m) where
|
||||
askRemoteSchemaPermsCtx = asks _rcRemoteSchemaPermsCtx
|
||||
|
||||
peelRun
|
||||
instance (MonadResolveSource m) => MonadResolveSource (RunT m) where
|
||||
getSourceResolver = RunT . lift . lift $ getSourceResolver
|
||||
|
||||
runQueryLazyTx
|
||||
:: ( MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
, MonadError QErr m
|
||||
, Tracing.MonadTrace m
|
||||
, UserInfoM m
|
||||
)
|
||||
=> RunCtx
|
||||
-> PGExecCtx
|
||||
=> PGExecCtx
|
||||
-> Q.TxAccess
|
||||
-> Maybe Tracing.TraceContext
|
||||
-> RunT m a
|
||||
-> ExceptT QErr m a
|
||||
peelRun runCtx pgExecCtx txAccess ctx (RunT m) =
|
||||
runLazyTx pgExecCtx txAccess $
|
||||
maybe id withTraceContext ctx $ withUserInfo userInfo $ runReaderT m runCtx
|
||||
where
|
||||
userInfo = _rcUserInfo runCtx
|
||||
-> LazyTxT QErr m a
|
||||
-> m a
|
||||
runQueryLazyTx pgExecCtx txAccess tx = do
|
||||
traceCtx <- Tracing.currentContext
|
||||
userInfo <- askUserInfo
|
||||
liftEitherM
|
||||
$ runExceptT
|
||||
$ runLazyTx pgExecCtx txAccess
|
||||
$ withTraceContext traceCtx
|
||||
$ withUserInfo userInfo tx
|
||||
|
||||
peelRun
|
||||
:: RunCtx -> RunT m a -> ExceptT QErr m a
|
||||
peelRun runCtx (RunT m) = runReaderT m runCtx
|
||||
|
@ -12,6 +12,8 @@ module Hasura.RQL.Types.SchemaCache
|
||||
, TableConfig(..)
|
||||
, emptyTableConfig
|
||||
, getAllRemoteSchemas
|
||||
, getPGFunctionInfo
|
||||
, getPGTableInfo
|
||||
|
||||
, TableCoreCache
|
||||
, TableCache
|
||||
@ -56,10 +58,13 @@ module Hasura.RQL.Types.SchemaCache
|
||||
, DepMap
|
||||
, WithDeps
|
||||
|
||||
, SourceM(..)
|
||||
, SourceT(..)
|
||||
, TableCoreInfoRM(..)
|
||||
, TableCoreCacheRT(..)
|
||||
, TableInfoRM(..)
|
||||
, TableCacheRT(..)
|
||||
, CacheRM(..)
|
||||
, CacheRT(..)
|
||||
|
||||
, FieldInfoMap
|
||||
, FieldInfo(..)
|
||||
@ -120,7 +125,7 @@ module Hasura.RQL.Types.SchemaCache
|
||||
, CronTriggerInfo(..)
|
||||
) where
|
||||
|
||||
import Control.Lens (makeLenses)
|
||||
import Control.Lens (makeLenses)
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
@ -138,9 +143,10 @@ import System.Cron.Types
|
||||
import qualified Hasura.GraphQL.Parser as P
|
||||
|
||||
import Hasura.Backends.Postgres.Connection
|
||||
import Hasura.Backends.Postgres.SQL.Types (QualifiedTable, QualifiedFunction, PGCol)
|
||||
import Hasura.Backends.Postgres.SQL.Types (PGCol, QualifiedFunction, QualifiedTable)
|
||||
import Hasura.GraphQL.Context (GQLContext, RemoteField, RoleContext)
|
||||
import Hasura.Incremental (Dependency, MonadDepend (..), selectKeyD, Cacheable)
|
||||
import Hasura.Incremental (Cacheable, Dependency, MonadDepend (..),
|
||||
selectKeyD)
|
||||
import Hasura.RQL.IR.BoolExp
|
||||
import Hasura.RQL.Types.Action
|
||||
import Hasura.RQL.Types.Common hiding (FunctionName)
|
||||
@ -154,6 +160,7 @@ import Hasura.RQL.Types.QueryCollection
|
||||
import Hasura.RQL.Types.RemoteSchema
|
||||
import Hasura.RQL.Types.ScheduledTrigger
|
||||
import Hasura.RQL.Types.SchemaCacheTypes
|
||||
import Hasura.RQL.Types.Source
|
||||
import Hasura.RQL.Types.Table
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Backend
|
||||
@ -163,17 +170,17 @@ import Hasura.Tracing (TraceT)
|
||||
reportSchemaObjs :: [SchemaObjId] -> Text
|
||||
reportSchemaObjs = commaSeparated . sort . map reportSchemaObj
|
||||
|
||||
mkParentDep :: QualifiedTable -> SchemaDependency
|
||||
mkParentDep tn = SchemaDependency (SOTable tn) DRTable
|
||||
mkParentDep :: SourceName -> QualifiedTable -> SchemaDependency
|
||||
mkParentDep s tn = SchemaDependency (SOSourceObj s $ SOITable tn) DRTable
|
||||
|
||||
mkColDep :: DependencyReason -> QualifiedTable -> PGCol -> SchemaDependency
|
||||
mkColDep reason tn col =
|
||||
flip SchemaDependency reason . SOTableObj tn $ TOCol col
|
||||
mkColDep :: DependencyReason -> SourceName -> QualifiedTable -> PGCol -> SchemaDependency
|
||||
mkColDep reason source tn col =
|
||||
flip SchemaDependency reason . SOSourceObj source . SOITableObj tn $ TOCol col
|
||||
|
||||
mkComputedFieldDep
|
||||
:: DependencyReason -> QualifiedTable -> ComputedFieldName -> SchemaDependency
|
||||
mkComputedFieldDep reason tn computedField =
|
||||
flip SchemaDependency reason . SOTableObj tn $ TOComputedField computedField
|
||||
:: DependencyReason -> SourceName -> QualifiedTable -> ComputedFieldName -> SchemaDependency
|
||||
mkComputedFieldDep reason s tn computedField =
|
||||
flip SchemaDependency reason . SOSourceObj s . SOITableObj tn $ TOComputedField computedField
|
||||
|
||||
type WithDeps a = (a, [SchemaDependency])
|
||||
|
||||
@ -244,12 +251,21 @@ incSchemaCacheVer (SchemaCacheVer prev) =
|
||||
|
||||
type ActionCache = M.HashMap ActionName (ActionInfo 'Postgres) -- info of all actions
|
||||
|
||||
getPGFunctionInfo
|
||||
:: SourceName -> QualifiedFunction -> SourceCache 'Postgres -> Maybe FunctionInfo
|
||||
getPGFunctionInfo sourceName qualifiedFunction m =
|
||||
M.lookup sourceName m >>= M.lookup qualifiedFunction . _pcFunctions
|
||||
|
||||
getPGTableInfo
|
||||
:: SourceName -> QualifiedTable -> SourceCache 'Postgres -> Maybe (TableInfo 'Postgres)
|
||||
getPGTableInfo sourceName qualifiedTable m =
|
||||
M.lookup sourceName m >>= M.lookup qualifiedTable . _pcTables
|
||||
|
||||
data SchemaCache
|
||||
= SchemaCache
|
||||
{ scTables :: !(TableCache 'Postgres)
|
||||
{ scPostgres :: !(SourceCache 'Postgres)
|
||||
, scActions :: !ActionCache
|
||||
, scFunctions :: !FunctionCache
|
||||
, scRemoteSchemas :: !(M.HashMap RemoteSchemaName RemoteSchemaCtx)
|
||||
, scRemoteSchemas :: !RemoteSchemaMap
|
||||
, scAllowlist :: !(HS.HashSet GQLQuery)
|
||||
, scGQLContext :: !(HashMap RoleName (RoleContext GQLContext))
|
||||
, scUnauthenticatedGQLContext :: !GQLContext
|
||||
@ -274,12 +290,31 @@ getAllRemoteSchemas sc =
|
||||
getInconsistentRemoteSchemas $ scInconsistentObjs sc
|
||||
in consistentRemoteSchemas <> inconsistentRemoteSchemas
|
||||
|
||||
class (Monad m) => SourceM m where
|
||||
askCurrentSource :: m SourceName
|
||||
|
||||
instance (SourceM m) => SourceM (ReaderT r m) where
|
||||
askCurrentSource = lift askCurrentSource
|
||||
instance (SourceM m) => SourceM (StateT s m) where
|
||||
askCurrentSource = lift askCurrentSource
|
||||
instance (Monoid w, SourceM m) => SourceM (WriterT w m) where
|
||||
askCurrentSource = lift askCurrentSource
|
||||
instance (SourceM m) => SourceM (TraceT m) where
|
||||
askCurrentSource = lift askCurrentSource
|
||||
|
||||
newtype SourceT m a
|
||||
= SourceT { runSourceT :: SourceName -> m a }
|
||||
deriving (Functor, Applicative, Monad, MonadIO, MonadError e, MonadState s, MonadWriter w, MonadTx, TableCoreInfoRM b, CacheRM)
|
||||
via (ReaderT SourceName m)
|
||||
deriving (MonadTrans) via (ReaderT SourceName)
|
||||
|
||||
instance (Monad m) => SourceM (SourceT m) where
|
||||
askCurrentSource = SourceT pure
|
||||
|
||||
-- | A more limited version of 'CacheRM' that is used when building the schema cache, since the
|
||||
-- entire schema cache has not been built yet.
|
||||
class (Monad m) => TableCoreInfoRM b m where
|
||||
class (SourceM m) => TableCoreInfoRM b m where
|
||||
lookupTableCoreInfo :: TableName b -> m (Maybe (TableCoreInfo b))
|
||||
default lookupTableCoreInfo :: (CacheRM m, b ~ 'Postgres) => TableName b -> m (Maybe (TableCoreInfo b))
|
||||
lookupTableCoreInfo tableName = fmap _tiCoreInfo . M.lookup tableName . scTables <$> askSchemaCache
|
||||
|
||||
instance (TableCoreInfoRM b m) => TableCoreInfoRM b (ReaderT r m) where
|
||||
lookupTableCoreInfo = lift . lookupTableCoreInfo
|
||||
@ -291,18 +326,55 @@ instance (TableCoreInfoRM b m) => TableCoreInfoRM b (TraceT m) where
|
||||
lookupTableCoreInfo = lift . lookupTableCoreInfo
|
||||
|
||||
newtype TableCoreCacheRT b m a
|
||||
= TableCoreCacheRT { runTableCoreCacheRT :: Dependency (TableCoreCache b) -> m a }
|
||||
= TableCoreCacheRT { runTableCoreCacheRT :: (SourceName, Dependency (TableCoreCache b)) -> m a }
|
||||
deriving (Functor, Applicative, Monad, MonadIO, MonadError e, MonadState s, MonadWriter w, MonadTx)
|
||||
via (ReaderT (Dependency (TableCoreCache b)) m)
|
||||
deriving (MonadTrans) via (ReaderT (Dependency (TableCoreCache b)))
|
||||
via (ReaderT (SourceName, Dependency (TableCoreCache b)) m)
|
||||
deriving (MonadTrans) via (ReaderT (SourceName, Dependency (TableCoreCache b)))
|
||||
|
||||
instance (MonadReader r m) => MonadReader r (TableCoreCacheRT b m) where
|
||||
ask = lift ask
|
||||
local f m = TableCoreCacheRT (local f . runTableCoreCacheRT m)
|
||||
instance (MonadDepend m, Backend b) => TableCoreInfoRM b (TableCoreCacheRT b m) where
|
||||
lookupTableCoreInfo tableName = TableCoreCacheRT (dependOnM . selectKeyD tableName)
|
||||
|
||||
class (TableCoreInfoRM 'Postgres m) => CacheRM m where
|
||||
instance (Monad m) => SourceM (TableCoreCacheRT b m) where
|
||||
askCurrentSource =
|
||||
TableCoreCacheRT (pure . fst)
|
||||
|
||||
instance (MonadDepend m, Backend b) => TableCoreInfoRM b (TableCoreCacheRT b m) where
|
||||
lookupTableCoreInfo tableName =
|
||||
TableCoreCacheRT (dependOnM . selectKeyD tableName . snd)
|
||||
|
||||
-- | All our RQL DML queries operate over a single source. This typeclass facilitates that.
|
||||
class (TableCoreInfoRM b m) => TableInfoRM b m where
|
||||
lookupTableInfo :: TableName b -> m (Maybe (TableInfo b))
|
||||
|
||||
instance (TableInfoRM b m) => TableInfoRM b (ReaderT r m) where
|
||||
lookupTableInfo tableName = lift $ lookupTableInfo tableName
|
||||
instance (TableInfoRM b m) => TableInfoRM b (StateT s m) where
|
||||
lookupTableInfo tableName = lift $ lookupTableInfo tableName
|
||||
instance (Monoid w, TableInfoRM b m) => TableInfoRM b (WriterT w m) where
|
||||
lookupTableInfo tableName = lift $ lookupTableInfo tableName
|
||||
instance (TableInfoRM b m) => TableInfoRM b (TraceT m) where
|
||||
lookupTableInfo tableName = lift $ lookupTableInfo tableName
|
||||
|
||||
newtype TableCacheRT b m a
|
||||
= TableCacheRT { runTableCacheRT :: (SourceName, TableCache b) -> m a }
|
||||
deriving (Functor, Applicative, Monad, MonadIO, MonadError e, MonadState s, MonadWriter w, MonadTx)
|
||||
via (ReaderT (SourceName, TableCache b) m)
|
||||
deriving (MonadTrans) via (ReaderT (SourceName, TableCache b))
|
||||
|
||||
instance (Monad m) => SourceM (TableCacheRT b m) where
|
||||
askCurrentSource =
|
||||
TableCacheRT (pure . fst)
|
||||
|
||||
instance (Monad m, Backend b) => TableCoreInfoRM b (TableCacheRT b m) where
|
||||
lookupTableCoreInfo tableName =
|
||||
TableCacheRT (pure . fmap _tiCoreInfo . M.lookup tableName . snd)
|
||||
|
||||
instance (Monad m, Backend b) => TableInfoRM b (TableCacheRT b m) where
|
||||
lookupTableInfo tableName =
|
||||
TableCacheRT (pure . M.lookup tableName . snd)
|
||||
|
||||
class (Monad m) => CacheRM m where
|
||||
askSchemaCache :: m SchemaCache
|
||||
|
||||
instance (CacheRM m) => CacheRM (ReaderT r m) where
|
||||
@ -313,20 +385,15 @@ instance (Monoid w, CacheRM m) => CacheRM (WriterT w m) where
|
||||
askSchemaCache = lift askSchemaCache
|
||||
instance (CacheRM m) => CacheRM (TraceT m) where
|
||||
askSchemaCache = lift askSchemaCache
|
||||
|
||||
newtype CacheRT m a = CacheRT { runCacheRT :: SchemaCache -> m a }
|
||||
deriving (Functor, Applicative, Monad, MonadError e, MonadWriter w) via (ReaderT SchemaCache m)
|
||||
deriving (MonadTrans) via (ReaderT SchemaCache)
|
||||
instance (Monad m) => TableCoreInfoRM 'Postgres (CacheRT m)
|
||||
instance (Monad m) => CacheRM (CacheRT m) where
|
||||
askSchemaCache = CacheRT pure
|
||||
instance (CacheRM m) => CacheRM (LazyTxT QErr m) where
|
||||
askSchemaCache = lift askSchemaCache
|
||||
|
||||
askFunctionInfo
|
||||
:: (CacheRM m, QErrM m)
|
||||
=> QualifiedFunction -> m FunctionInfo
|
||||
askFunctionInfo qf = do
|
||||
=> SourceName -> QualifiedFunction -> m FunctionInfo
|
||||
askFunctionInfo sourceName qf = do
|
||||
sc <- askSchemaCache
|
||||
onNothing (M.lookup qf $ scFunctions sc) throwNoFn
|
||||
onNothing (getPGFunctionInfo sourceName qf $ scPostgres sc) throwNoFn
|
||||
where
|
||||
throwNoFn = throw400 NotExists $
|
||||
"function not found in cache " <>> qf
|
||||
@ -343,7 +410,9 @@ getDependentObjsWith f sc objId =
|
||||
isDependency deps = not $ HS.null $ flip HS.filter deps $
|
||||
\(SchemaDependency depId reason) -> objId `induces` depId && f reason
|
||||
-- induces a b : is b dependent on a
|
||||
induces (SOTable tn1) (SOTable tn2) = tn1 == tn2
|
||||
induces (SOTable tn1) (SOTableObj tn2 _) = tn1 == tn2
|
||||
induces objId1 objId2 = objId1 == objId2
|
||||
induces (SOSource s1) (SOSource s2) = s1 == s2
|
||||
induces (SOSource s1) (SOSourceObj s2 _) = s1 == s2
|
||||
induces (SOSourceObj s1 (SOITable tn1)) (SOSourceObj s2 (SOITable tn2)) = s1 == s2 && tn1 == tn2
|
||||
induces (SOSourceObj s1 (SOITable tn1)) (SOSourceObj s2 (SOITableObj tn2 _)) = s1 == s2 && tn1 == tn2
|
||||
induces objId1 objId2 = objId1 == objId2
|
||||
-- allDeps = toList $ fromMaybe HS.empty $ M.lookup objId $ scDepMap sc
|
||||
|
@ -31,6 +31,8 @@ import qualified Data.Sequence as Seq
|
||||
|
||||
import Control.Arrow.Extended
|
||||
import Control.Lens
|
||||
import Control.Monad.Morph
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Control.Monad.Unique
|
||||
import Data.Aeson (toJSON)
|
||||
import Data.Aeson.Casing
|
||||
@ -39,6 +41,7 @@ import Data.List (nub)
|
||||
import Data.Text.Extended
|
||||
|
||||
import Hasura.Backends.Postgres.Connection
|
||||
import Hasura.RQL.Types.Common
|
||||
import Hasura.RQL.Types.Error
|
||||
import Hasura.RQL.Types.Metadata
|
||||
import Hasura.RQL.Types.RemoteSchema (RemoteSchemaName)
|
||||
@ -118,7 +121,7 @@ data BuildReason
|
||||
-- updated the catalog. Since that instance already updated table event triggers in @hdb_catalog@,
|
||||
-- this build should be read-only.
|
||||
| CatalogSync
|
||||
deriving (Show, Eq)
|
||||
deriving (Eq)
|
||||
|
||||
data CacheInvalidations = CacheInvalidations
|
||||
{ ciMetadata :: !Bool
|
||||
@ -127,13 +130,17 @@ data CacheInvalidations = CacheInvalidations
|
||||
, ciRemoteSchemas :: !(HashSet RemoteSchemaName)
|
||||
-- ^ Force refetching of the given remote schemas, even if their definition has not changed. Set
|
||||
-- by the @reload_remote_schema@ API.
|
||||
, ciSources :: !(HashSet SourceName)
|
||||
-- ^ Force re-establishing connections of the given data sources, even if their configuration has not changed. Set
|
||||
-- by the @pg_reload_source@ API.
|
||||
}
|
||||
$(deriveJSON (aesonDrop 2 snakeCase) ''CacheInvalidations)
|
||||
|
||||
instance Semigroup CacheInvalidations where
|
||||
CacheInvalidations a1 b1 <> CacheInvalidations a2 b2 = CacheInvalidations (a1 || a2) (b1 <> b2)
|
||||
CacheInvalidations a1 b1 c1 <> CacheInvalidations a2 b2 c2 =
|
||||
CacheInvalidations (a1 || a2) (b1 <> b2) (c1 <> c2)
|
||||
instance Monoid CacheInvalidations where
|
||||
mempty = CacheInvalidations False mempty
|
||||
mempty = CacheInvalidations False mempty mempty
|
||||
|
||||
instance (CacheRWM m) => CacheRWM (ReaderT r m) where
|
||||
buildSchemaCacheWithOptions a b c = lift $ buildSchemaCacheWithOptions a b c
|
||||
@ -141,6 +148,8 @@ instance (CacheRWM m) => CacheRWM (StateT s m) where
|
||||
buildSchemaCacheWithOptions a b c = lift $ buildSchemaCacheWithOptions a b c
|
||||
instance (CacheRWM m) => CacheRWM (TraceT m) where
|
||||
buildSchemaCacheWithOptions a b c = lift $ buildSchemaCacheWithOptions a b c
|
||||
instance (CacheRWM m) => CacheRWM (LazyTxT QErr m) where
|
||||
buildSchemaCacheWithOptions a b c = lift $ buildSchemaCacheWithOptions a b c
|
||||
|
||||
-- | A simple monad class which enables fetching and setting @'Metadata'
|
||||
-- in the state.
|
||||
@ -165,9 +174,12 @@ newtype MetadataT m a
|
||||
deriving
|
||||
( Functor, Applicative, Monad, MonadTrans
|
||||
, MonadIO, MonadUnique, MonadReader r, MonadError e, MonadTx
|
||||
, TableCoreInfoRM b, CacheRM, CacheRWM
|
||||
, SourceM, TableCoreInfoRM b, CacheRM, CacheRWM, MFunctor
|
||||
)
|
||||
|
||||
deriving instance (MonadBase IO m) => MonadBase IO (MetadataT m)
|
||||
deriving instance (MonadBaseControl IO m) => MonadBaseControl IO (MetadataT m)
|
||||
|
||||
instance (Monad m) => MetadataM (MetadataT m) where
|
||||
getMetadata = MetadataT get
|
||||
putMetadata = MetadataT . put
|
||||
|
@ -31,40 +31,51 @@ data TableObjId
|
||||
deriving (Show, Eq, Generic)
|
||||
instance Hashable TableObjId
|
||||
|
||||
data SourceObjId
|
||||
= SOITable !QualifiedTable
|
||||
| SOITableObj !QualifiedTable !TableObjId
|
||||
| SOIFunction !QualifiedFunction
|
||||
deriving (Show, Eq, Generic)
|
||||
instance Hashable SourceObjId
|
||||
|
||||
data SchemaObjId
|
||||
= SOTable !QualifiedTable
|
||||
| SOTableObj !QualifiedTable !TableObjId
|
||||
| SOFunction !QualifiedFunction
|
||||
= SOSource !SourceName
|
||||
| SOSourceObj !SourceName !SourceObjId
|
||||
| SORemoteSchema !RemoteSchemaName
|
||||
| SORemoteSchemaPermission !RemoteSchemaName !RoleName
|
||||
deriving (Eq, Generic)
|
||||
|
||||
instance Hashable SchemaObjId
|
||||
|
||||
reportSchemaObj :: SchemaObjId -> Text
|
||||
reportSchemaObj (SOTable tn) = "table " <> qualifiedObjectToText tn
|
||||
reportSchemaObj (SOFunction fn) = "function " <> qualifiedObjectToText fn
|
||||
reportSchemaObj (SOTableObj tn (TOCol cn)) =
|
||||
"column " <> qualifiedObjectToText tn <> "." <> getPGColTxt cn
|
||||
reportSchemaObj (SOTableObj tn (TORel cn)) =
|
||||
"relationship " <> qualifiedObjectToText tn <> "." <> relNameToTxt cn
|
||||
reportSchemaObj (SOTableObj tn (TOForeignKey cn)) =
|
||||
"constraint " <> qualifiedObjectToText tn <> "." <> getConstraintTxt cn
|
||||
reportSchemaObj (SOTableObj tn (TOPerm rn pt)) =
|
||||
"permission " <> qualifiedObjectToText tn <> "." <> roleNameToTxt rn
|
||||
<> "." <> permTypeToCode pt
|
||||
reportSchemaObj (SOTableObj tn (TOTrigger trn )) =
|
||||
"event-trigger " <> qualifiedObjectToText tn <> "." <> triggerNameToTxt trn
|
||||
reportSchemaObj (SOTableObj tn (TOComputedField ccn)) =
|
||||
"computed field " <> qualifiedObjectToText tn <> "." <> computedFieldNameToText ccn
|
||||
reportSchemaObj (SOTableObj tn (TORemoteRel rn)) =
|
||||
"remote relationship " <> qualifiedObjectToText tn <> "." <> remoteRelationshipNameToText rn
|
||||
reportSchemaObj (SORemoteSchema remoteSchemaName) =
|
||||
"remote schema " <> unNonEmptyText (unRemoteSchemaName remoteSchemaName)
|
||||
reportSchemaObj (SORemoteSchemaPermission remoteSchemaName roleName) =
|
||||
"remote schema permission "
|
||||
<> unNonEmptyText (unRemoteSchemaName remoteSchemaName)
|
||||
<> "." <>> roleName
|
||||
reportSchemaObj :: SchemaObjId -> T.Text
|
||||
reportSchemaObj = \case
|
||||
SOSource source -> "source " <> sourceNameToText source
|
||||
SOSourceObj source sourceObjId -> inSource source $
|
||||
case sourceObjId of
|
||||
SOITable tn -> "table " <> qualifiedObjectToText tn
|
||||
SOIFunction fn -> "function " <> qualifiedObjectToText fn
|
||||
SOITableObj tn (TOCol cn) ->
|
||||
"column " <> qualifiedObjectToText tn <> "." <> getPGColTxt cn
|
||||
SOITableObj tn (TORel cn) ->
|
||||
"relationship " <> qualifiedObjectToText tn <> "." <> relNameToTxt cn
|
||||
SOITableObj tn (TOForeignKey cn) ->
|
||||
"constraint " <> qualifiedObjectToText tn <> "." <> getConstraintTxt cn
|
||||
SOITableObj tn (TOPerm rn pt) ->
|
||||
"permission " <> qualifiedObjectToText tn <> "." <> roleNameToTxt rn <> "." <> permTypeToCode pt
|
||||
SOITableObj tn (TOTrigger trn ) ->
|
||||
"event-trigger " <> qualifiedObjectToText tn <> "." <> triggerNameToTxt trn
|
||||
SOITableObj tn (TOComputedField ccn) ->
|
||||
"computed field " <> qualifiedObjectToText tn <> "." <> computedFieldNameToText ccn
|
||||
SOITableObj tn (TORemoteRel rn) ->
|
||||
"remote relationship " <> qualifiedObjectToText tn <> "." <> remoteRelationshipNameToText rn
|
||||
SORemoteSchema remoteSchemaName ->
|
||||
"remote schema " <> unNonEmptyText (unRemoteSchemaName remoteSchemaName)
|
||||
SORemoteSchemaPermission remoteSchemaName roleName ->
|
||||
"remote schema permission "
|
||||
<> unNonEmptyText (unRemoteSchemaName remoteSchemaName)
|
||||
<> "." <>> roleName
|
||||
where
|
||||
inSource s t = t <> " in source " <>> s
|
||||
|
||||
instance Show SchemaObjId where
|
||||
show soi = T.unpack $ reportSchemaObj soi
|
||||
|
100
server/src-lib/Hasura/RQL/Types/Source.hs
Normal file
100
server/src-lib/Hasura/RQL/Types/Source.hs
Normal file
@ -0,0 +1,100 @@
|
||||
module Hasura.RQL.Types.Source where
|
||||
|
||||
import Hasura.Backends.Postgres.Connection
|
||||
import Hasura.Incremental (Cacheable (..))
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.Types.Common
|
||||
import Hasura.RQL.Types.Error
|
||||
import Hasura.RQL.Types.Function
|
||||
import Hasura.RQL.Types.Table
|
||||
import Hasura.SQL.Backend
|
||||
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
|
||||
import Control.Lens
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
|
||||
data SourceInfo b
|
||||
= SourceInfo
|
||||
{ _pcName :: !SourceName
|
||||
, _pcTables :: !(TableCache b)
|
||||
, _pcFunctions :: !FunctionCache
|
||||
, _pcConfiguration :: !(SourceConfig b)
|
||||
} deriving (Generic)
|
||||
$(makeLenses ''SourceInfo)
|
||||
instance ToJSON (SourceInfo 'Postgres) where
|
||||
toJSON = genericToJSON $ aesonDrop 3 snakeCase
|
||||
|
||||
type SourceCache b = HashMap SourceName (SourceInfo b)
|
||||
|
||||
-- | Contains Postgres connection configuration and essential metadata from the
|
||||
-- database to build schema cache for tables and function.
|
||||
data ResolvedPGSource
|
||||
= ResolvedPGSource
|
||||
{ _rsConfig :: !(SourceConfig 'Postgres)
|
||||
, _rsTables :: !(DBTablesMetadata 'Postgres)
|
||||
, _rsFunctions :: !PostgresFunctionsMetadata
|
||||
, _rsPgScalars :: !(HashSet (ScalarType 'Postgres))
|
||||
} deriving (Eq)
|
||||
|
||||
type SourceTables b = HashMap SourceName (TableCache b)
|
||||
|
||||
data PostgresPoolSettings
|
||||
= PostgresPoolSettings
|
||||
{ _ppsMaxConnections :: !Int
|
||||
, _ppsIdleTimeout :: !Int
|
||||
, _ppsRetries :: !Int
|
||||
} deriving (Show, Eq, Generic)
|
||||
instance Cacheable PostgresPoolSettings
|
||||
$(deriveToJSON (aesonDrop 4 snakeCase) ''PostgresPoolSettings)
|
||||
|
||||
instance FromJSON PostgresPoolSettings where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
PostgresPoolSettings
|
||||
<$> o .:? "max_connections" .!= _ppsMaxConnections defaultPostgresPoolSettings
|
||||
<*> o .:? "idle_timeout" .!= _ppsIdleTimeout defaultPostgresPoolSettings
|
||||
<*> o .:? "retries" .!= _ppsRetries defaultPostgresPoolSettings
|
||||
|
||||
defaultPostgresPoolSettings :: PostgresPoolSettings
|
||||
defaultPostgresPoolSettings =
|
||||
PostgresPoolSettings
|
||||
{ _ppsMaxConnections = 50
|
||||
, _ppsIdleTimeout = 180
|
||||
, _ppsRetries = 1
|
||||
}
|
||||
|
||||
data PostgresSourceConnInfo
|
||||
= PostgresSourceConnInfo
|
||||
{ _psciDatabaseUrl :: !UrlConf
|
||||
, _psciPoolSettings :: !PostgresPoolSettings
|
||||
} deriving (Show, Eq, Generic)
|
||||
instance Cacheable PostgresSourceConnInfo
|
||||
$(deriveJSON (aesonDrop 5 snakeCase) ''PostgresSourceConnInfo)
|
||||
|
||||
data SourceConfiguration
|
||||
= SourceConfiguration
|
||||
{ _scConnectionInfo :: !PostgresSourceConnInfo
|
||||
, _scReadReplicas :: !(Maybe (NonEmpty PostgresSourceConnInfo))
|
||||
} deriving (Show, Eq, Generic)
|
||||
instance Cacheable SourceConfiguration
|
||||
$(deriveJSON (aesonDrop 3 snakeCase){omitNothingFields = True} ''SourceConfiguration)
|
||||
|
||||
type SourceResolver =
|
||||
SourceConfiguration -> IO (Either QErr (SourceConfig 'Postgres))
|
||||
|
||||
class (Monad m) => MonadResolveSource m where
|
||||
getSourceResolver :: m SourceResolver
|
||||
|
||||
instance (MonadResolveSource m) => MonadResolveSource (ExceptT e m) where
|
||||
getSourceResolver = lift getSourceResolver
|
||||
|
||||
instance (MonadResolveSource m) => MonadResolveSource (ReaderT r m) where
|
||||
getSourceResolver = lift getSourceResolver
|
||||
|
||||
instance (MonadResolveSource m) => MonadResolveSource (Tracing.TraceT m) where
|
||||
getSourceResolver = lift getSourceResolver
|
||||
|
||||
instance (MonadResolveSource m) => MonadResolveSource (LazyTxT QErr m) where
|
||||
getSourceResolver = lift getSourceResolver
|
@ -5,27 +5,38 @@ module Hasura.Server.API.PGDump
|
||||
) where
|
||||
|
||||
import Control.Exception (IOException, try)
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import Data.Char (isSpace)
|
||||
import qualified Data.List as L
|
||||
import qualified Data.Text as T
|
||||
import Data.Text.Conversions
|
||||
import qualified Database.PG.Query as Q
|
||||
import Hasura.Prelude
|
||||
import qualified Hasura.RQL.Types.Error as RTE
|
||||
import Hasura.RQL.Types (SourceName, defaultSource)
|
||||
import System.Exit
|
||||
import System.Process
|
||||
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.List as L
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Hasura.RQL.Types.Error as RTE
|
||||
import qualified Text.Regex.TDFA as TDFA
|
||||
|
||||
data PGDumpReqBody =
|
||||
PGDumpReqBody
|
||||
{ prbOpts :: ![String]
|
||||
, prbCleanOutput :: !(Maybe Bool)
|
||||
{ prbSource :: !SourceName
|
||||
, prbOpts :: ![String]
|
||||
, prbCleanOutput :: !Bool
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveJSON (aesonDrop 3 snakeCase) ''PGDumpReqBody)
|
||||
$(deriveToJSON (aesonDrop 3 snakeCase) ''PGDumpReqBody)
|
||||
|
||||
instance FromJSON PGDumpReqBody where
|
||||
parseJSON = withObject "Object" $ \o ->
|
||||
PGDumpReqBody
|
||||
<$> o .:? "source" .!= defaultSource
|
||||
<*> o .: "opts"
|
||||
<*> o .:? "clean_output" .!= False
|
||||
|
||||
execPGDump
|
||||
:: (MonadError RTE.QErr m, MonadIO m)
|
||||
@ -35,10 +46,8 @@ execPGDump
|
||||
execPGDump b ci = do
|
||||
eOutput <- liftIO $ try execProcess
|
||||
output <- onLeft eOutput throwException
|
||||
case output of
|
||||
Left err ->
|
||||
RTE.throw500 $ "error while executing pg_dump: " <> err
|
||||
Right dump -> return dump
|
||||
onLeft output $ \err ->
|
||||
RTE.throw500 $ "error while executing pg_dump: " <> err
|
||||
where
|
||||
throwException :: (MonadError RTE.QErr m) => IOException -> m a
|
||||
throwException _ = RTE.throw500 "internal exception while executing pg_dump"
|
||||
@ -53,7 +62,7 @@ execPGDump b ci = do
|
||||
opts = connString : "--encoding=utf8" : prbOpts b
|
||||
|
||||
clean str
|
||||
| Just True == prbCleanOutput b =
|
||||
| prbCleanOutput b =
|
||||
unlines $ filter (not . shouldDropLine) (lines str)
|
||||
| otherwise = str
|
||||
|
||||
|
@ -120,7 +120,7 @@ data RQLQueryV1
|
||||
|
||||
| RQRunSql !RunSQL
|
||||
|
||||
| RQReplaceMetadata !Metadata
|
||||
| RQReplaceMetadata !ReplaceMetadata
|
||||
| RQExportMetadata !ExportMetadata
|
||||
| RQClearMetadata !ClearMetadata
|
||||
| RQReloadMetadata !ReloadMetadata
|
||||
@ -184,19 +184,26 @@ $(deriveJSON
|
||||
runQuery
|
||||
:: ( HasVersion, MonadIO m, Tracing.MonadTrace m
|
||||
, MonadBaseControl IO m, MonadMetadataStorage m
|
||||
, MonadResolveSource m
|
||||
)
|
||||
=> Env.Environment -> PGExecCtx -> InstanceId
|
||||
=> Env.Environment
|
||||
-> InstanceId
|
||||
-> UserInfo -> RebuildableSchemaCache -> HTTP.Manager
|
||||
-> SQLGenCtx -> RemoteSchemaPermsCtx -> RQLQuery -> m (EncJSON, RebuildableSchemaCache)
|
||||
runQuery env pgExecCtx instanceId userInfo sc hMgr sqlGenCtx remoteSchemaPermsCtx query = do
|
||||
accessMode <- getQueryAccessMode query
|
||||
traceCtx <- Tracing.currentContext
|
||||
runQuery env instanceId userInfo sc hMgr sqlGenCtx remoteSchemaPermsCtx query = do
|
||||
metadata <- fetchMetadata
|
||||
result <- runQueryM env query & Tracing.interpTraceT \x -> do
|
||||
let sources = scPostgres $ lastBuiltSchemaCache sc
|
||||
|
||||
(sourceName, _) <- case HM.toList sources of
|
||||
[] -> throw400 NotSupported "no postgres source exist"
|
||||
[s] -> pure $ second _pcConfiguration s
|
||||
_ -> throw400 NotSupported "multiple postgres sources found"
|
||||
|
||||
result <- runQueryM env sourceName query & Tracing.interpTraceT \x -> do
|
||||
(((js, tracemeta), meta), rsc, ci) <-
|
||||
x & runMetadataT metadata
|
||||
& runCacheRWT sc
|
||||
& peelRun runCtx pgExecCtx accessMode (Just traceCtx)
|
||||
& peelRun runCtx
|
||||
& runExceptT
|
||||
& liftEitherM
|
||||
pure ((js, rsc, ci, meta), tracemeta)
|
||||
@ -346,17 +353,19 @@ reconcileAccessModes (Just mode1) (Just mode2)
|
||||
| otherwise = Left mode2
|
||||
|
||||
runQueryM
|
||||
:: ( HasVersion, QErrM m, CacheRWM m, UserInfoM m, MonadTx m
|
||||
, MonadIO m, MonadUnique m, HasHttpManager m, HasSQLGenCtx m
|
||||
:: ( HasVersion, CacheRWM m, UserInfoM m
|
||||
, MonadBaseControl IO m, MonadIO m, MonadUnique m
|
||||
, HasHttpManager m, HasSQLGenCtx m
|
||||
, HasRemoteSchemaPermsCtx m
|
||||
, Tracing.MonadTrace m
|
||||
, MetadataM m
|
||||
, MonadScheduledEvents m
|
||||
, MonadMetadataStorageQueryAPI m
|
||||
)
|
||||
=> Env.Environment
|
||||
-> SourceName
|
||||
-> RQLQuery
|
||||
-> m EncJSON
|
||||
runQueryM env rq = withPathK "args" $ case rq of
|
||||
runQueryM env source rq = withPathK "args" $ case rq of
|
||||
RQV1 q -> runQueryV1M q
|
||||
RQV2 q -> runQueryV2M q
|
||||
where
|
||||
@ -392,11 +401,11 @@ runQueryM env rq = withPathK "args" $ case rq of
|
||||
RQGetInconsistentMetadata q -> runGetInconsistentMetadata q
|
||||
RQDropInconsistentMetadata q -> runDropInconsistentMetadata q
|
||||
|
||||
RQInsert q -> runInsert env q
|
||||
RQSelect q -> runSelect q
|
||||
RQUpdate q -> runUpdate env q
|
||||
RQDelete q -> runDelete env q
|
||||
RQCount q -> runCount q
|
||||
RQInsert q -> runInsert env source q
|
||||
RQSelect q -> runSelect source q
|
||||
RQUpdate q -> runUpdate env source q
|
||||
RQDelete q -> runDelete env source q
|
||||
RQCount q -> runCount source q
|
||||
|
||||
RQAddRemoteSchema q -> runAddRemoteSchema env q
|
||||
RQRemoveRemoteSchema q -> runRemoveRemoteSchema q
|
||||
@ -440,19 +449,18 @@ runQueryM env rq = withPathK "args" $ case rq of
|
||||
|
||||
RQDumpInternalState q -> runDumpInternalState q
|
||||
|
||||
RQRunSql q -> runRunSQL q
|
||||
RQRunSql q -> runRunSQL defaultSource q
|
||||
|
||||
RQSetCustomTypes q -> runSetCustomTypes q
|
||||
RQSetTableCustomization q -> runSetTableCustomization q
|
||||
|
||||
RQBulk qs -> encJFromList <$> indexedMapM (runQueryM env) qs
|
||||
RQBulk qs -> encJFromList <$> indexedMapM (runQueryM env source) qs
|
||||
|
||||
runQueryV2M = \case
|
||||
RQV2TrackTable q -> runTrackTableV2Q q
|
||||
RQV2SetTableCustomFields q -> runSetTableCustomFieldsQV2 q
|
||||
RQV2TrackFunction q -> runTrackFunctionV2 q
|
||||
|
||||
|
||||
requiresAdmin :: RQLQuery -> Bool
|
||||
requiresAdmin = \case
|
||||
RQV1 q -> case q of
|
||||
|
@ -13,7 +13,6 @@ import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.HashSet as S
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Types as HTTP
|
||||
import qualified Network.Wai.Extended as Wai
|
||||
@ -31,6 +30,7 @@ import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.Aeson hiding (json)
|
||||
import Data.IORef
|
||||
import Data.String (fromString)
|
||||
import Data.Text.Extended
|
||||
import Network.Mime (defaultMimeLookup)
|
||||
import System.FilePath (joinPath, takeFileName)
|
||||
import Web.Spock.Core ((<//>))
|
||||
@ -50,6 +50,7 @@ import qualified Hasura.Logging as L
|
||||
import qualified Hasura.Server.API.PGDump as PGD
|
||||
import qualified Hasura.Tracing as Tracing
|
||||
|
||||
import Hasura.Backends.Postgres.Execute.Types
|
||||
import Hasura.EncJSON
|
||||
import Hasura.GraphQL.Logging (MonadQueryLog (..))
|
||||
import Hasura.HTTP
|
||||
@ -93,9 +94,7 @@ data SchemaCacheRef
|
||||
|
||||
data ServerCtx
|
||||
= ServerCtx
|
||||
{ scPGExecCtx :: !PGExecCtx
|
||||
, scConnInfo :: !Q.ConnInfo
|
||||
, scLogger :: !(L.Logger L.Hasura)
|
||||
{ scLogger :: !(L.Logger L.Hasura)
|
||||
, scCacheRef :: !SchemaCacheRef
|
||||
, scAuthMode :: !AuthMode
|
||||
, scManager :: !HTTP.Manager
|
||||
@ -108,7 +107,7 @@ data ServerCtx
|
||||
, scEkgStore :: !EKG.Store
|
||||
, scResponseInternalErrorsConfig :: !ResponseInternalErrorsConfig
|
||||
, scEnvironment :: !Env.Environment
|
||||
, scRemoteSchemaPermsCtx :: !RemoteSchemaPermsCtx
|
||||
, scRemoteSchemaPermsCtx :: !RemoteSchemaPermsCtx
|
||||
}
|
||||
|
||||
data HandlerCtx
|
||||
@ -372,8 +371,7 @@ mkSpockAction serverCtx qErrEncoder qErrModifier apiHandler = do
|
||||
|
||||
v1QueryHandler
|
||||
:: ( HasVersion, MonadIO m, MonadBaseControl IO m, MetadataApiAuthorization m, Tracing.MonadTrace m
|
||||
, MonadReader HandlerCtx m
|
||||
, MonadMetadataStorage m
|
||||
, MonadReader HandlerCtx m , MonadMetadataStorage m, MonadResolveSource m
|
||||
)
|
||||
=> RQLQuery
|
||||
-> m (HttpResponse EncJSON)
|
||||
@ -385,20 +383,20 @@ v1QueryHandler query = do
|
||||
return $ HttpResponse res []
|
||||
where
|
||||
action = do
|
||||
userInfo <- asks hcUser
|
||||
scRef <- asks (scCacheRef . hcServerCtx)
|
||||
schemaCache <- fmap fst $ liftIO $ readIORef $ _scrCache scRef
|
||||
httpMgr <- asks (scManager . hcServerCtx)
|
||||
sqlGenCtx <- asks (scSQLGenCtx . hcServerCtx)
|
||||
pgExecCtx <- asks (scPGExecCtx . hcServerCtx)
|
||||
instanceId <- asks (scInstanceId . hcServerCtx)
|
||||
env <- asks (scEnvironment . hcServerCtx)
|
||||
userInfo <- asks hcUser
|
||||
scRef <- asks (scCacheRef . hcServerCtx)
|
||||
schemaCache <- fmap fst $ liftIO $ readIORef $ _scrCache scRef
|
||||
httpMgr <- asks (scManager . hcServerCtx)
|
||||
sqlGenCtx <- asks (scSQLGenCtx . hcServerCtx)
|
||||
instanceId <- asks (scInstanceId . hcServerCtx)
|
||||
env <- asks (scEnvironment . hcServerCtx)
|
||||
remoteSchemaPermsCtx <- asks (scRemoteSchemaPermsCtx . hcServerCtx)
|
||||
runQuery env pgExecCtx instanceId userInfo schemaCache httpMgr sqlGenCtx remoteSchemaPermsCtx query
|
||||
runQuery env instanceId userInfo schemaCache httpMgr sqlGenCtx remoteSchemaPermsCtx query
|
||||
|
||||
v1Alpha1GQHandler
|
||||
:: ( HasVersion
|
||||
, MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
, E.MonadGQLExecutionCheck m
|
||||
, MonadQueryLog m
|
||||
, Tracing.MonadTrace m
|
||||
@ -418,7 +416,6 @@ v1Alpha1GQHandler queryType query = do
|
||||
manager <- asks (scManager . hcServerCtx)
|
||||
scRef <- asks (scCacheRef . hcServerCtx)
|
||||
(sc, scVer) <- liftIO $ readIORef $ _scrCache scRef
|
||||
pgExecCtx <- asks (scPGExecCtx . hcServerCtx)
|
||||
sqlGenCtx <- asks (scSQLGenCtx . hcServerCtx)
|
||||
-- planCache <- asks (scPlanCache . hcServerCtx)
|
||||
enableAL <- asks (scEnableAllowlist . hcServerCtx)
|
||||
@ -426,7 +423,7 @@ v1Alpha1GQHandler queryType query = do
|
||||
responseErrorsConfig <- asks (scResponseInternalErrorsConfig . hcServerCtx)
|
||||
env <- asks (scEnvironment . hcServerCtx)
|
||||
|
||||
let execCtx = E.ExecutionCtx logger sqlGenCtx pgExecCtx {- planCache -}
|
||||
let execCtx = E.ExecutionCtx logger sqlGenCtx {- planCache -}
|
||||
(lastBuiltSchemaCache sc) scVer manager enableAL
|
||||
|
||||
flip runReaderT execCtx $
|
||||
@ -435,6 +432,7 @@ v1Alpha1GQHandler queryType query = do
|
||||
v1GQHandler
|
||||
:: ( HasVersion
|
||||
, MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
, E.MonadGQLExecutionCheck m
|
||||
, MonadQueryLog m
|
||||
, Tracing.MonadTrace m
|
||||
@ -451,6 +449,7 @@ v1GQHandler = v1Alpha1GQHandler E.QueryHasura
|
||||
v1GQRelayHandler
|
||||
:: ( HasVersion
|
||||
, MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
, E.MonadGQLExecutionCheck m
|
||||
, MonadQueryLog m
|
||||
, Tracing.MonadTrace m
|
||||
@ -466,9 +465,9 @@ v1GQRelayHandler = v1Alpha1GQHandler E.QueryRelay
|
||||
|
||||
gqlExplainHandler
|
||||
:: forall m. ( MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
, MonadError QErr m
|
||||
, MonadReader HandlerCtx m
|
||||
, MonadMetadataStorage (MetadataStorageT m)
|
||||
)
|
||||
=> GE.GQLExplain
|
||||
-> m (HttpResponse EncJSON)
|
||||
@ -476,7 +475,6 @@ gqlExplainHandler query = do
|
||||
onlyAdmin
|
||||
scRef <- asks (scCacheRef . hcServerCtx)
|
||||
sc <- getSCFromRef scRef
|
||||
pgExecCtx <- asks (scPGExecCtx . hcServerCtx)
|
||||
-- sqlGenCtx <- asks (scSQLGenCtx . hcServerCtx)
|
||||
-- env <- asks (scEnvironment . hcServerCtx)
|
||||
-- logger <- asks (scLogger . hcServerCtx)
|
||||
@ -487,13 +485,19 @@ gqlExplainHandler query = do
|
||||
-- let runTx rttx = ExceptT . ReaderT $ \ctx -> do
|
||||
-- runExceptT (Tracing.interpTraceT (runLazyTx pgExecCtx Q.ReadOnly) (runReaderT rttx ctx))
|
||||
|
||||
res <- GE.explainGQLQuery pgExecCtx sc query
|
||||
res <- GE.explainGQLQuery sc query
|
||||
return $ HttpResponse res []
|
||||
|
||||
v1Alpha1PGDumpHandler :: (MonadIO m, MonadError QErr m, MonadReader HandlerCtx m) => PGD.PGDumpReqBody -> m APIResp
|
||||
v1Alpha1PGDumpHandler b = do
|
||||
onlyAdmin
|
||||
ci <- asks (scConnInfo . hcServerCtx)
|
||||
scRef <- asks (scCacheRef . hcServerCtx)
|
||||
sc <- getSCFromRef scRef
|
||||
let sources = scPostgres sc
|
||||
sourceName = PGD.prbSource b
|
||||
ci <- fmap (_pscConnInfo . _pcConfiguration) $
|
||||
onNothing (M.lookup sourceName sources) $
|
||||
throw400 NotFound $ "source " <> sourceName <<> " not found"
|
||||
output <- PGD.execPGDump b ci
|
||||
return $ RawResp $ HttpResponse output [sqlHeader]
|
||||
|
||||
@ -562,6 +566,7 @@ legacyQueryHandler
|
||||
:: ( HasVersion, MonadIO m, MonadBaseControl IO m, MetadataApiAuthorization m, Tracing.MonadTrace m
|
||||
, MonadReader HandlerCtx m
|
||||
, MonadMetadataStorage m
|
||||
, MonadResolveSource m
|
||||
)
|
||||
=> PG.TableName -> Text -> Object
|
||||
-> m (HttpResponse EncJSON)
|
||||
@ -613,20 +618,15 @@ mkWaiApp
|
||||
, EQ.MonadQueryInstrumentation m
|
||||
, HasResourceLimits m
|
||||
, MonadMetadataStorage (MetadataStorageT m)
|
||||
, MonadResolveSource m
|
||||
)
|
||||
=> Env.Environment
|
||||
-- ^ Set of environment variables for reference in UIs
|
||||
-> Q.TxIsolation
|
||||
-- ^ postgres transaction isolation to be used in the entire app
|
||||
-> L.Logger L.Hasura
|
||||
-- ^ a 'L.Hasura' specific logger
|
||||
-> SQLGenCtx
|
||||
-> Bool
|
||||
-- ^ is AllowList enabled - TODO: change this boolean to sumtype
|
||||
-> Q.PGPool
|
||||
-> Maybe PGExecCtx
|
||||
-> Q.ConnInfo
|
||||
-- ^ postgres connection parameters
|
||||
-> HTTP.Manager
|
||||
-- ^ HTTP manager so that we can re-use sessions
|
||||
-> AuthMode
|
||||
@ -652,9 +652,11 @@ mkWaiApp
|
||||
-> RemoteSchemaPermsCtx
|
||||
-> WS.ConnectionOptions
|
||||
-> KeepAliveDelay
|
||||
-- ^ Metadata storage connection pool
|
||||
-> m HasuraApp
|
||||
mkWaiApp env isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpManager mode corsCfg enableConsole consoleAssetsDir
|
||||
enableTelemetry instanceId apis lqOpts _ {- planCacheOptions -} responseErrorsConfig liveQueryHook schemaCache ekgStore enableRSPermsCtx connectionOptions keepAliveDelay = do
|
||||
mkWaiApp env logger sqlGenCtx enableAL httpManager mode corsCfg enableConsole consoleAssetsDir
|
||||
enableTelemetry instanceId apis lqOpts _ {- planCacheOptions -} responseErrorsConfig
|
||||
liveQueryHook schemaCache ekgStore enableRSPermsCtx connectionOptions keepAliveDelay = do
|
||||
|
||||
-- See Note [Temporarily disabling query plan caching]
|
||||
-- (planCache, schemaCacheRef) <- initialiseCache
|
||||
@ -662,17 +664,14 @@ mkWaiApp env isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpMana
|
||||
let getSchemaCache = first lastBuiltSchemaCache <$> readIORef (_scrCache schemaCacheRef)
|
||||
|
||||
let corsPolicy = mkDefaultCorsPolicy corsCfg
|
||||
pgExecCtx = fromMaybe (mkPGExecCtx isoLevel pool) pgExecCtxCustom
|
||||
postPollHook = fromMaybe (EL.defaultLiveQueryPostPollHook logger) liveQueryHook
|
||||
|
||||
lqState <- liftIO $ EL.initLiveQueriesState lqOpts pgExecCtx postPollHook
|
||||
wsServerEnv <- WS.createWSServerEnv logger pgExecCtx lqState getSchemaCache httpManager
|
||||
lqState <- liftIO $ EL.initLiveQueriesState lqOpts postPollHook
|
||||
wsServerEnv <- WS.createWSServerEnv logger lqState getSchemaCache httpManager
|
||||
corsPolicy sqlGenCtx enableAL keepAliveDelay {- planCache -}
|
||||
|
||||
let serverCtx = ServerCtx
|
||||
{ scPGExecCtx = pgExecCtx
|
||||
, scConnInfo = ci
|
||||
, scLogger = logger
|
||||
{ scLogger = logger
|
||||
, scCacheRef = schemaCacheRef
|
||||
, scAuthMode = mode
|
||||
, scManager = httpManager
|
||||
@ -729,6 +728,7 @@ httpApp
|
||||
, EQ.MonadQueryInstrumentation m
|
||||
, MonadMetadataStorage (MetadataStorageT m)
|
||||
, HasResourceLimits m
|
||||
, MonadResolveSource m
|
||||
)
|
||||
=> CorsConfig
|
||||
-> ServerCtx
|
||||
@ -748,7 +748,8 @@ httpApp corsCfg serverCtx enableConsole consoleAssetsDir enableTelemetry = do
|
||||
-- Health check endpoint
|
||||
Spock.get "healthz" $ do
|
||||
sc <- getSCFromRef $ scCacheRef serverCtx
|
||||
dbOk <- liftIO $ _pecCheckHealth $ scPGExecCtx serverCtx
|
||||
eitherHealth <- runMetadataStorageT checkMetadataStorageHealth
|
||||
let dbOk = either (const False) id eitherHealth
|
||||
if dbOk
|
||||
then Spock.setStatus HTTP.status200 >> Spock.text (if null (scInconsistentObjs sc)
|
||||
then "OK"
|
||||
|
@ -12,13 +12,13 @@ import qualified Data.Aeson.TH as J
|
||||
import qualified Data.HashSet as Set
|
||||
import qualified Data.String as DataString
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Text.Encoding as TE
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.Haskell.TH.Syntax as TH
|
||||
import qualified Text.PrettyPrint.ANSI.Leijen as PP
|
||||
|
||||
import Data.FileEmbed (embedStringFile)
|
||||
import Data.Time (NominalDiffTime)
|
||||
import Data.URL.Template
|
||||
import Network.Wai.Handler.Warp (HostPreference)
|
||||
import qualified Network.WebSockets as WS
|
||||
import Options.Applicative
|
||||
@ -30,7 +30,7 @@ import qualified Hasura.Logging as L
|
||||
|
||||
import Hasura.Backends.Postgres.Connection
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.Types (QErr, SchemaCache (..), RemoteSchemaPermsCtx (..))
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Server.Auth
|
||||
import Hasura.Server.Cors
|
||||
import Hasura.Server.Init.Config
|
||||
@ -97,11 +97,13 @@ withEnvJwtConf :: Maybe JWTConfig -> String -> WithEnv (Maybe JWTConfig)
|
||||
withEnvJwtConf jVal envVar =
|
||||
maybe (considerEnv envVar) returnJust jVal
|
||||
|
||||
mkHGEOptions :: L.EnabledLogTypes impl => RawHGEOptions impl -> WithEnv (HGEOptions impl)
|
||||
mkHGEOptions (HGEOptionsG rawConnInfo rawCmd) =
|
||||
HGEOptionsG <$> connInfo <*> cmd
|
||||
mkHGEOptions
|
||||
:: L.EnabledLogTypes impl => RawHGEOptions impl -> WithEnv (HGEOptions impl)
|
||||
mkHGEOptions (HGEOptionsG rawConnInfo rawMetadataDbUrl rawCmd) =
|
||||
HGEOptionsG <$> connInfo <*> metadataDbUrl <*> cmd
|
||||
where
|
||||
connInfo = mkRawConnInfo rawConnInfo
|
||||
connInfo = processPostgresConnInfo rawConnInfo
|
||||
metadataDbUrl = withEnv rawMetadataDbUrl $ fst metadataDbUrlEnv
|
||||
cmd = case rawCmd of
|
||||
HCServe rso -> HCServe <$> mkServeOptions rso
|
||||
HCExport -> return HCExport
|
||||
@ -110,16 +112,33 @@ mkHGEOptions (HGEOptionsG rawConnInfo rawCmd) =
|
||||
HCVersion -> return HCVersion
|
||||
HCDowngrade tgt -> return (HCDowngrade tgt)
|
||||
|
||||
mkRawConnInfo :: RawConnInfo -> WithEnv RawConnInfo
|
||||
mkRawConnInfo rawConnInfo = do
|
||||
withEnvUrl <- withEnv rawDBUrl $ fst databaseUrlEnv
|
||||
withEnvRetries <- withEnv retries $ fst retriesNumEnv
|
||||
return $ rawConnInfo { connUrl = withEnvUrl
|
||||
, connRetries = withEnvRetries
|
||||
}
|
||||
where
|
||||
rawDBUrl = connUrl rawConnInfo
|
||||
retries = connRetries rawConnInfo
|
||||
processPostgresConnInfo
|
||||
:: PostgresConnInfo (Maybe PostgresRawConnInfo)
|
||||
-> WithEnv (PostgresConnInfo UrlConf)
|
||||
processPostgresConnInfo PostgresConnInfo{..} = do
|
||||
withEnvRetries <- withEnv _pciRetries $ fst retriesNumEnv
|
||||
databaseUrl <- rawConnInfoToUrlConf _pciDatabaseConn
|
||||
pure $ PostgresConnInfo databaseUrl withEnvRetries
|
||||
|
||||
rawConnInfoToUrlConf :: Maybe PostgresRawConnInfo -> WithEnv UrlConf
|
||||
rawConnInfoToUrlConf maybeRawConnInfo = do
|
||||
env <- ask
|
||||
let databaseUrlEnvVar = fst databaseUrlEnv
|
||||
hasDatabaseUrlEnv = any ((== databaseUrlEnvVar) . fst) env
|
||||
|
||||
case maybeRawConnInfo of
|
||||
-- If no --database-url or connection options provided in CLI command
|
||||
Nothing -> if hasDatabaseUrlEnv then
|
||||
-- Consider env variable as is in order to store it as @`UrlConf`
|
||||
-- in default source configuration in metadata
|
||||
pure $ UrlFromEnv $ T.pack databaseUrlEnvVar
|
||||
else throwError $
|
||||
"Fatal Error: Required --database-url or connection options or env var "
|
||||
<> databaseUrlEnvVar
|
||||
Just databaseConn ->
|
||||
pure $ UrlValue . InputWebhook $ case databaseConn of
|
||||
PGConnDatabaseUrl urlTemplate -> urlTemplate
|
||||
PGConnDetails connDetails -> rawConnDetailsToUrl connDetails
|
||||
|
||||
mkServeOptions :: L.EnabledLogTypes impl => RawServeOptions impl -> WithEnv (ServeOptions impl)
|
||||
mkServeOptions rso = do
|
||||
@ -270,6 +289,12 @@ databaseUrlEnv =
|
||||
, "Postgres database URL. Example postgres://foo:bar@example.com:2345/database"
|
||||
)
|
||||
|
||||
metadataDbUrlEnv :: (String, String)
|
||||
metadataDbUrlEnv =
|
||||
( "HASURA_GRAPHQL_METADATA_DATABASE_URL"
|
||||
, "Postgres database URL for Metadata storage. Example postgres://foo:bar@example.com:2345/database"
|
||||
)
|
||||
|
||||
serveCmdFooter :: PP.Doc
|
||||
serveCmdFooter =
|
||||
examplesDoc PP.<$> PP.text "" PP.<$> envVarDoc
|
||||
@ -528,11 +553,39 @@ adminInternalErrorsEnv =
|
||||
, "Enables including 'internal' information in an error response for requests made by an 'admin' (default: true)"
|
||||
)
|
||||
|
||||
parseRawConnInfo :: Parser RawConnInfo
|
||||
parseRawConnInfo =
|
||||
RawConnInfo <$> host <*> port <*> user <*> password
|
||||
<*> dbUrl <*> dbName <*> options
|
||||
<*> retries
|
||||
parsePostgresConnInfo :: Parser (PostgresConnInfo (Maybe PostgresRawConnInfo))
|
||||
parsePostgresConnInfo = do
|
||||
retries' <- retries
|
||||
maybeRawConnInfo <-
|
||||
(fmap PGConnDatabaseUrl <$> parseDatabaseUrl)
|
||||
<|> (fmap PGConnDetails <$> parseRawConnDetails)
|
||||
pure $ PostgresConnInfo maybeRawConnInfo retries'
|
||||
where
|
||||
retries = optional $
|
||||
option auto ( long "retries" <>
|
||||
metavar "NO OF RETRIES" <>
|
||||
help (snd retriesNumEnv)
|
||||
)
|
||||
|
||||
parseDatabaseUrl :: Parser (Maybe URLTemplate)
|
||||
parseDatabaseUrl = optional $
|
||||
option (eitherReader (parseURLTemplate . T.pack) )
|
||||
( long "database-url" <>
|
||||
metavar "<DATABASE-URL>" <>
|
||||
help (snd databaseUrlEnv)
|
||||
)
|
||||
|
||||
parseRawConnDetails :: Parser (Maybe PostgresRawConnDetails)
|
||||
parseRawConnDetails = do
|
||||
host' <- host
|
||||
port' <- port
|
||||
user' <- user
|
||||
password' <- password
|
||||
dbName' <- dbName
|
||||
options' <- options
|
||||
pure $ PostgresRawConnDetails
|
||||
<$> host' <*> port' <*> user' <*> (pure password')
|
||||
<*> dbName' <*> (pure options')
|
||||
where
|
||||
host = optional $
|
||||
strOption ( long "host" <>
|
||||
@ -558,13 +611,6 @@ parseRawConnInfo =
|
||||
help "Password of the user"
|
||||
)
|
||||
|
||||
dbUrl = optional $
|
||||
strOption
|
||||
( long "database-url" <>
|
||||
metavar "<DATABASE-URL>" <>
|
||||
help (snd databaseUrlEnv)
|
||||
)
|
||||
|
||||
dbName = optional $
|
||||
strOption ( long "dbname" <>
|
||||
short 'd' <>
|
||||
@ -579,28 +625,12 @@ parseRawConnInfo =
|
||||
help "PostgreSQL options"
|
||||
)
|
||||
|
||||
retries = optional $
|
||||
option auto ( long "retries" <>
|
||||
metavar "NO OF RETRIES" <>
|
||||
help (snd retriesNumEnv)
|
||||
)
|
||||
|
||||
mkConnInfo :: RawConnInfo -> Either String Q.ConnInfo
|
||||
mkConnInfo (RawConnInfo mHost mPort mUser password mURL mDB opts mRetries) =
|
||||
Q.ConnInfo retries <$>
|
||||
case (mHost, mPort, mUser, mDB, mURL) of
|
||||
|
||||
(Just host, Just port, Just user, Just db, Nothing) ->
|
||||
return $ Q.CDOptions $ Q.ConnOptions host port user password db opts
|
||||
|
||||
(_, _, _, _, Just dbURL) ->
|
||||
return $ Q.CDDatabaseURI $ TE.encodeUtf8 $ T.pack dbURL
|
||||
_ -> throwError $ "Invalid options. "
|
||||
++ "Expecting all database connection params "
|
||||
++ "(host, port, user, dbname, password) or "
|
||||
++ "database-url (HASURA_GRAPHQL_DATABASE_URL)"
|
||||
where
|
||||
retries = fromMaybe 1 mRetries
|
||||
parseMetadataDbUrl :: Parser (Maybe String)
|
||||
parseMetadataDbUrl = optional $
|
||||
strOption ( long "metadata-database-url" <>
|
||||
metavar "<METADATA-DATABASE-URL>" <>
|
||||
help (snd metadataDbUrlEnv)
|
||||
)
|
||||
|
||||
parseTxIsolation :: Parser (Maybe Q.TxIsolation)
|
||||
parseTxIsolation = optional $
|
||||
|
@ -8,11 +8,13 @@ import qualified Data.HashSet as Set
|
||||
import qualified Data.String as DataString
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Network.WebSockets as WS
|
||||
|
||||
|
||||
import Data.Char (toLower)
|
||||
import Data.Time
|
||||
import Data.URL.Template
|
||||
import Network.Wai.Handler.Warp (HostPreference)
|
||||
import qualified Network.WebSockets as WS
|
||||
|
||||
import qualified Hasura.Cache.Bounded as Cache
|
||||
import qualified Hasura.GraphQL.Execute.LiveQuery as LQ
|
||||
@ -20,7 +22,7 @@ import qualified Hasura.GraphQL.Execute.Plan as E
|
||||
import qualified Hasura.Logging as L
|
||||
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.Types (RemoteSchemaPermsCtx (..))
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Server.Auth
|
||||
import Hasura.Server.Cors
|
||||
import Hasura.Session
|
||||
@ -127,18 +129,41 @@ data DowngradeOptions
|
||||
, dgoDryRun :: !Bool
|
||||
} deriving (Show, Eq)
|
||||
|
||||
data RawConnInfo =
|
||||
RawConnInfo
|
||||
{ connHost :: !(Maybe String)
|
||||
, connPort :: !(Maybe Int)
|
||||
, connUser :: !(Maybe String)
|
||||
data PostgresConnInfo a
|
||||
= PostgresConnInfo
|
||||
{ _pciDatabaseConn :: !a
|
||||
, _pciRetries :: !(Maybe Int)
|
||||
} deriving (Show, Eq, Functor, Foldable, Traversable)
|
||||
|
||||
data PostgresRawConnDetails =
|
||||
PostgresRawConnDetails
|
||||
{ connHost :: !String
|
||||
, connPort :: !Int
|
||||
, connUser :: !String
|
||||
, connPassword :: !String
|
||||
, connUrl :: !(Maybe String)
|
||||
, connDatabase :: !(Maybe String)
|
||||
, connDatabase :: !String
|
||||
, connOptions :: !(Maybe String)
|
||||
, connRetries :: !(Maybe Int)
|
||||
} deriving (Eq, Read, Show)
|
||||
|
||||
data PostgresRawConnInfo
|
||||
= PGConnDatabaseUrl !URLTemplate
|
||||
| PGConnDetails !PostgresRawConnDetails
|
||||
deriving (Show, Eq)
|
||||
|
||||
rawConnDetailsToUrl :: PostgresRawConnDetails -> URLTemplate
|
||||
rawConnDetailsToUrl =
|
||||
mkPlainURLTemplate . rawConnDetailsToUrlText
|
||||
|
||||
rawConnDetailsToUrlText :: PostgresRawConnDetails -> Text
|
||||
rawConnDetailsToUrlText PostgresRawConnDetails{..} =
|
||||
T.pack $
|
||||
"postgresql://" <> connUser <>
|
||||
":" <> connPassword <>
|
||||
"@" <> connHost <>
|
||||
":" <> show connPort <>
|
||||
"/" <> connDatabase <>
|
||||
maybe "" ("?options=" <>) connOptions
|
||||
|
||||
data HGECommandG a
|
||||
= HCServe !a
|
||||
| HCExport
|
||||
@ -161,19 +186,20 @@ $(J.deriveJSON (J.defaultOptions { J.constructorTagModifier = map toLower })
|
||||
|
||||
instance Hashable API
|
||||
|
||||
$(J.deriveJSON (J.aesonDrop 4 J.camelCase){J.omitNothingFields=True} ''RawConnInfo)
|
||||
$(J.deriveJSON (J.aesonDrop 4 J.camelCase){J.omitNothingFields=True} ''PostgresRawConnDetails)
|
||||
|
||||
type HGECommand impl = HGECommandG (ServeOptions impl)
|
||||
type RawHGECommand impl = HGECommandG (RawServeOptions impl)
|
||||
|
||||
data HGEOptionsG a
|
||||
data HGEOptionsG a b
|
||||
= HGEOptionsG
|
||||
{ hoConnInfo :: !RawConnInfo
|
||||
, hoCommand :: !(HGECommandG a)
|
||||
{ hoConnInfo :: !(PostgresConnInfo a)
|
||||
, hoMetadataDbUrl :: !(Maybe String)
|
||||
, hoCommand :: !(HGECommandG b)
|
||||
} deriving (Show, Eq)
|
||||
|
||||
type RawHGEOptions impl = HGEOptionsG (RawServeOptions impl)
|
||||
type HGEOptions impl = HGEOptionsG (ServeOptions impl)
|
||||
type RawHGEOptions impl = HGEOptionsG (Maybe PostgresRawConnInfo) (RawServeOptions impl)
|
||||
type HGEOptions impl = HGEOptionsG UrlConf (ServeOptions impl)
|
||||
|
||||
type Env = [(String, String)]
|
||||
|
||||
@ -294,6 +320,9 @@ instance FromEnv L.LogLevel where
|
||||
instance FromEnv Cache.CacheSize where
|
||||
fromEnv = Cache.parseCacheSize
|
||||
|
||||
instance FromEnv URLTemplate where
|
||||
fromEnv = parseURLTemplate . T.pack
|
||||
|
||||
type WithEnv a = ReaderT Env (ExceptT String Identity) a
|
||||
|
||||
runWithEnv :: Env -> WithEnv a -> Either String a
|
||||
|
@ -25,13 +25,13 @@ module Hasura.Server.Migrate
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson as A
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.Text.IO as TIO
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Database.PG.Query.Connection as Q
|
||||
import qualified Language.Haskell.TH.Lib as TH
|
||||
import qualified Language.Haskell.TH.Syntax as TH
|
||||
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.Time.Clock (UTCTime)
|
||||
import System.Directory (doesFileExist)
|
||||
|
||||
@ -44,7 +44,6 @@ import Hasura.Server.Init (DowngradeOptions (..))
|
||||
import Hasura.Server.Logging (StartupLog (..))
|
||||
import Hasura.Server.Migrate.Version (latestCatalogVersion,
|
||||
latestCatalogVersionString)
|
||||
import Hasura.Server.Version
|
||||
|
||||
dropCatalog :: (MonadTx m) => m ()
|
||||
dropCatalog = liftTx $ Q.catchE defaultTxErrorHandler $
|
||||
@ -81,60 +80,36 @@ data MigrationPair m = MigrationPair
|
||||
|
||||
migrateCatalog
|
||||
:: forall m
|
||||
. ( HasVersion
|
||||
. ( MonadTx m
|
||||
, MonadIO m
|
||||
, MonadTx m
|
||||
, HasHttpManager m
|
||||
, HasSQLGenCtx m
|
||||
, HasRemoteSchemaPermsCtx m
|
||||
, MonadBaseControl IO m
|
||||
)
|
||||
=> Env.Environment
|
||||
=> SourceConfiguration
|
||||
-> UTCTime
|
||||
-> m (MigrationResult, RebuildableSchemaCache)
|
||||
migrateCatalog env migrationTime = do
|
||||
-> m (MigrationResult, Metadata)
|
||||
migrateCatalog defaultSourceConfig migrationTime = do
|
||||
migrationResult <- doesSchemaExist (SchemaName "hdb_catalog") >>= \case
|
||||
False -> initialize True
|
||||
True -> doesTableExist (SchemaName "hdb_catalog") (TableName "hdb_version") >>= \case
|
||||
False -> initialize False
|
||||
True -> migrateFrom =<< getCatalogVersion
|
||||
metadata <- liftTx fetchMetadataFromCatalog
|
||||
schemaCache <- buildRebuildableSchemaCache env metadata
|
||||
pure (migrationResult, schemaCache)
|
||||
pure (migrationResult, metadata)
|
||||
where
|
||||
-- initializes the catalog, creating the schema if necessary
|
||||
initialize :: Bool -> m MigrationResult
|
||||
initialize createSchema = do
|
||||
liftTx $ Q.catchE defaultTxErrorHandler $
|
||||
when createSchema $ Q.unitQ "CREATE SCHEMA hdb_catalog" () False
|
||||
|
||||
isExtensionAvailable "pgcrypto" >>= \case
|
||||
-- only if we created the schema, create the extension
|
||||
True -> when createSchema $ liftTx $ Q.unitQE needsPGCryptoError
|
||||
"CREATE EXTENSION IF NOT EXISTS pgcrypto SCHEMA public" () False
|
||||
False -> throw500 $
|
||||
"pgcrypto extension is required, but could not find the extension in the "
|
||||
<> "PostgreSQL server. Please make sure this extension is available."
|
||||
|
||||
enablePgcryptoExtension
|
||||
runTx $(Q.sqlFromFile "src-rsr/initialise.sql")
|
||||
updateCatalogVersion
|
||||
-- insert metadata with default source
|
||||
let defaultSourceMetadata =
|
||||
SourceMetadata defaultSource mempty mempty defaultSourceConfig
|
||||
sources = OMap.singleton defaultSource defaultSourceMetadata
|
||||
liftTx $ setMetadataInCatalog emptyMetadata{_metaSources = sources}
|
||||
pure MRInitialized
|
||||
where
|
||||
needsPGCryptoError e@(Q.PGTxErr _ _ _ err) =
|
||||
case err of
|
||||
Q.PGIUnexpected _ -> requiredError
|
||||
Q.PGIStatement pgErr -> case Q.edStatusCode pgErr of
|
||||
Just "42501" -> err500 PostgresError permissionsMessage
|
||||
_ -> requiredError
|
||||
where
|
||||
requiredError =
|
||||
(err500 PostgresError requiredMessage) { qeInternal = Just $ A.toJSON e }
|
||||
requiredMessage =
|
||||
"pgcrypto extension is required, but it could not be created;"
|
||||
<> " encountered unknown postgres error"
|
||||
permissionsMessage =
|
||||
"pgcrypto extension is required, but the current user doesn’t have permission to"
|
||||
<> " create it. Please grant superuser permission, or setup the initial schema via"
|
||||
<> " https://hasura.io/docs/1.0/graphql/manual/deployment/postgres-permissions.html"
|
||||
|
||||
-- migrates an existing catalog to the latest version from an existing verion
|
||||
migrateFrom :: Text -> m MigrationResult
|
||||
@ -151,14 +126,14 @@ migrateCatalog env migrationTime = do
|
||||
pure $ MRMigrated previousVersion
|
||||
where
|
||||
neededMigrations =
|
||||
dropWhile ((/= previousVersion) . fst) (migrations False)
|
||||
dropWhile ((/= previousVersion) . fst) (migrations defaultSourceConfig False)
|
||||
|
||||
updateCatalogVersion = setCatalogVersion latestCatalogVersionString migrationTime
|
||||
|
||||
downgradeCatalog
|
||||
:: forall m. (MonadIO m, MonadTx m)
|
||||
=> DowngradeOptions -> UTCTime -> m MigrationResult
|
||||
downgradeCatalog opts time = do
|
||||
=> SourceConfiguration -> DowngradeOptions -> UTCTime -> m MigrationResult
|
||||
downgradeCatalog defaultSourceConfig opts time = do
|
||||
downgradeFrom =<< getCatalogVersion
|
||||
where
|
||||
-- downgrades an existing catalog to the specified version
|
||||
@ -184,7 +159,7 @@ downgradeCatalog opts time = do
|
||||
where
|
||||
neededDownMigrations newVersion =
|
||||
downgrade previousVersion newVersion
|
||||
(reverse (migrations (dgoDryRun opts)))
|
||||
(reverse (migrations defaultSourceConfig (dgoDryRun opts)))
|
||||
|
||||
downgrade
|
||||
:: Text
|
||||
@ -227,8 +202,8 @@ setCatalogVersion ver time = liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
|
||||
migrations
|
||||
:: forall m. (MonadIO m, MonadTx m)
|
||||
=> Bool -> [(Text, MigrationPair m)]
|
||||
migrations dryRun =
|
||||
=> SourceConfiguration -> Bool -> [(Text, MigrationPair m)]
|
||||
migrations defaultSourceConfig dryRun =
|
||||
-- We need to build the list of migrations at compile-time so that we can compile the SQL
|
||||
-- directly into the executable using `Q.sqlFromFile`. The GHC stage restriction makes
|
||||
-- doing this a little bit awkward (we can’t use any definitions in this module at
|
||||
@ -295,17 +270,29 @@ migrations dryRun =
|
||||
let query = $(Q.sqlFromFile "src-rsr/migrations/42_to_43.sql")
|
||||
if dryRun then (liftIO . TIO.putStrLn . Q.getQueryText) query
|
||||
else do
|
||||
metadata <- fetchMetadataFromHdbTables
|
||||
metadataV2 <- fetchMetadataFromHdbTables
|
||||
runTx query
|
||||
liftTx $ setMetadataInCatalog metadata
|
||||
let metadataV3 =
|
||||
let MetadataNoSources{..} = metadataV2
|
||||
defaultSourceMetadata =
|
||||
SourceMetadata defaultSource _mnsTables _mnsFunctions defaultSourceConfig
|
||||
in Metadata (OMap.singleton defaultSource defaultSourceMetadata)
|
||||
_mnsRemoteSchemas _mnsQueryCollections _mnsAllowlist _mnsCustomTypes _mnsActions _mnsCronTriggers
|
||||
liftTx $ setMetadataInCatalog metadataV3
|
||||
|
||||
from43To42 = do
|
||||
let query = $(Q.sqlFromFile "src-rsr/migrations/43_to_42.sql")
|
||||
if dryRun then (liftIO . TIO.putStrLn . Q.getQueryText) query
|
||||
else do
|
||||
metadata <- liftTx fetchMetadataFromCatalog
|
||||
Metadata{..} <- liftTx fetchMetadataFromCatalog
|
||||
runTx query
|
||||
liftTx $ runHasSystemDefinedT (SystemDefined False) $ saveMetadataToHdbTables metadata
|
||||
metadataV2 <- case OMap.toList _metaSources of
|
||||
[] -> pure $ MetadataNoSources mempty mempty mempty mempty mempty emptyCustomTypes mempty mempty
|
||||
[(_, SourceMetadata{..})] ->
|
||||
pure $ MetadataNoSources _smTables _smFunctions _metaRemoteSchemas _metaQueryCollections
|
||||
_metaAllowlist _metaCustomTypes _metaActions _metaCronTriggers
|
||||
_ -> throw400 NotSupported "Cannot downgrade since there are more than one source"
|
||||
liftTx $ runHasSystemDefinedT (SystemDefined False) $ saveMetadataToHdbTables metadataV2
|
||||
recreateSystemMetadata
|
||||
|
||||
|
||||
|
@ -7,20 +7,19 @@ module Hasura.Server.SchemaUpdate
|
||||
)
|
||||
where
|
||||
|
||||
import Hasura.Backends.Postgres.Connection
|
||||
import Hasura.Logging
|
||||
import Hasura.Metadata.Class
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.DDL.Schema (runCacheRWT)
|
||||
import Hasura.RQL.DDL.Schema (runCacheRWT)
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.RQL.Types.Run
|
||||
import Hasura.Server.App (SchemaCacheRef (..), withSCUpdate)
|
||||
import Hasura.Server.App (SchemaCacheRef (..), withSCUpdate)
|
||||
import Hasura.Server.Logging
|
||||
import Hasura.Server.Types (InstanceId (..))
|
||||
import Hasura.Server.Types (InstanceId (..))
|
||||
import Hasura.Session
|
||||
|
||||
import Control.Monad.Trans.Managed (ManagedT)
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Control.Monad.Trans.Managed (ManagedT)
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
@ -29,14 +28,14 @@ import Data.IORef
|
||||
import GHC.AssertNF
|
||||
#endif
|
||||
|
||||
import qualified Control.Concurrent.Extended as C
|
||||
import qualified Control.Concurrent.STM as STM
|
||||
import qualified Control.Immortal as Immortal
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Time as UTC
|
||||
import qualified Database.PG.Query as PG
|
||||
import qualified Database.PostgreSQL.LibPQ as PQ
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Control.Concurrent.Extended as C
|
||||
import qualified Control.Concurrent.STM as STM
|
||||
import qualified Control.Immortal as Immortal
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Time as UTC
|
||||
import qualified Database.PG.Query as PG
|
||||
import qualified Database.PostgreSQL.LibPQ as PQ
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
|
||||
pgChannel :: PG.PGChannel
|
||||
pgChannel = "hasura_schema_update"
|
||||
@ -181,9 +180,11 @@ startSchemaSyncListenerThread pool logger instanceId = do
|
||||
-- | An async thread which processes the schema sync events
|
||||
-- See Note [Schema Cache Sync]
|
||||
startSchemaSyncProcessorThread
|
||||
:: (C.ForkableMonadIO m, MonadMetadataStorage (MetadataStorageT m))
|
||||
:: ( C.ForkableMonadIO m
|
||||
, MonadMetadataStorage (MetadataStorageT m)
|
||||
, MonadResolveSource m
|
||||
)
|
||||
=> SQLGenCtx
|
||||
-> PG.PGPool
|
||||
-> Logger Hasura
|
||||
-> HTTP.Manager
|
||||
-> SchemaSyncEventRef
|
||||
@ -192,11 +193,11 @@ startSchemaSyncProcessorThread
|
||||
-> UTC.UTCTime
|
||||
-> RemoteSchemaPermsCtx
|
||||
-> ManagedT m Immortal.Thread
|
||||
startSchemaSyncProcessorThread sqlGenCtx pool logger httpMgr
|
||||
startSchemaSyncProcessorThread sqlGenCtx logger httpMgr
|
||||
schemaSyncEventRef cacheRef instanceId cacheInitStartTime remoteSchemaPermsCtx = do
|
||||
-- Start processor thread
|
||||
processorThread <- C.forkManagedT "SchemeUpdate.processor" logger $
|
||||
processor sqlGenCtx pool logger httpMgr schemaSyncEventRef cacheRef instanceId cacheInitStartTime remoteSchemaPermsCtx
|
||||
processor sqlGenCtx logger httpMgr schemaSyncEventRef cacheRef instanceId cacheInitStartTime remoteSchemaPermsCtx
|
||||
logThreadStarted logger instanceId TTProcessor processorThread
|
||||
pure processorThread
|
||||
|
||||
@ -247,9 +248,9 @@ processor
|
||||
:: forall m void.
|
||||
( C.ForkableMonadIO m
|
||||
, MonadMetadataStorage (MetadataStorageT m)
|
||||
, MonadResolveSource m
|
||||
)
|
||||
=> SQLGenCtx
|
||||
-> PG.PGPool
|
||||
-> Logger Hasura
|
||||
-> HTTP.Manager
|
||||
-> SchemaSyncEventRef
|
||||
@ -258,7 +259,7 @@ processor
|
||||
-> UTC.UTCTime
|
||||
-> RemoteSchemaPermsCtx
|
||||
-> m void
|
||||
processor sqlGenCtx pool logger httpMgr updateEventRef
|
||||
processor sqlGenCtx logger httpMgr updateEventRef
|
||||
cacheRef instanceId cacheInitStartTime remoteSchemaPermsCtx =
|
||||
-- Never exits
|
||||
forever $ do
|
||||
@ -279,8 +280,8 @@ processor sqlGenCtx pool logger httpMgr updateEventRef
|
||||
pure (_sseprShouldReload, _sseprCacheInvalidations)
|
||||
|
||||
when shouldReload $
|
||||
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef cacheInvalidations
|
||||
threadType "schema cache reloaded" remoteSchemaPermsCtx
|
||||
refreshSchemaCache sqlGenCtx logger httpMgr cacheRef cacheInvalidations
|
||||
threadType remoteSchemaPermsCtx "schema cache reloaded"
|
||||
where
|
||||
-- checks if there is an event
|
||||
-- and replaces it with Nothing
|
||||
@ -297,19 +298,19 @@ refreshSchemaCache
|
||||
:: ( MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
, MonadMetadataStorage (MetadataStorageT m)
|
||||
, MonadResolveSource m
|
||||
)
|
||||
=> SQLGenCtx
|
||||
-> PG.PGPool
|
||||
-> Logger Hasura
|
||||
-> HTTP.Manager
|
||||
-> SchemaCacheRef
|
||||
-> CacheInvalidations
|
||||
-> ThreadType
|
||||
-> Text
|
||||
-> RemoteSchemaPermsCtx
|
||||
-> Text
|
||||
-> m ()
|
||||
refreshSchemaCache sqlGenCtx pool logger httpManager
|
||||
cacheRef invalidations threadType msg remoteSchemaPermsCtx = do
|
||||
refreshSchemaCache sqlGenCtx logger httpManager
|
||||
cacheRef invalidations threadType remoteSchemaPermsCtx msg = do
|
||||
-- Reload schema cache from catalog
|
||||
eitherMetadata <- runMetadataStorageT fetchMetadata
|
||||
resE <- runExceptT $ do
|
||||
@ -318,14 +319,13 @@ refreshSchemaCache sqlGenCtx pool logger httpManager
|
||||
rebuildableCache <- fst <$> liftIO (readIORef $ _scrCache cacheRef)
|
||||
((), cache, _) <- buildSchemaCacheWithOptions CatalogSync invalidations metadata
|
||||
& runCacheRWT rebuildableCache
|
||||
& peelRun runCtx pgCtx PG.ReadWrite Nothing
|
||||
& peelRun runCtx
|
||||
pure ((), cache)
|
||||
case resE of
|
||||
Left e -> logError logger threadType $ TEQueryError e
|
||||
Right () -> logInfo logger threadType $ object ["message" .= msg]
|
||||
where
|
||||
runCtx = RunCtx adminUserInfo httpManager sqlGenCtx remoteSchemaPermsCtx
|
||||
pgCtx = mkPGExecCtx PG.Serializable pool
|
||||
|
||||
logInfo :: (MonadIO m) => Logger Hasura -> ThreadType -> Value -> m ()
|
||||
logInfo logger threadType val = unLogger logger $
|
||||
|
@ -165,13 +165,17 @@ computeMetrics sc _mtServiceTimings _mtPgVersion =
|
||||
_mtEventTriggers = Map.size $ Map.filter (not . Map.null)
|
||||
$ Map.map _tiEventTriggerInfoMap userTables
|
||||
_mtRemoteSchemas = Map.size $ scRemoteSchemas sc
|
||||
_mtFunctions = Map.size $ Map.filter (not . isSystemDefined . fiSystemDefined) $ scFunctions sc
|
||||
-- TODO: multiple sources
|
||||
_mtFunctions = Map.size $ Map.filter (not . isSystemDefined . fiSystemDefined) $ maybe mempty _pcFunctions $ Map.lookup defaultSource $ scPostgres sc
|
||||
_mtActions = computeActionsMetrics $ scActions sc
|
||||
|
||||
in Metrics{..}
|
||||
|
||||
where
|
||||
userTables = Map.filter (not . isSystemDefined . _tciSystemDefined . _tiCoreInfo) $ scTables sc
|
||||
userTables =
|
||||
Map.filter (not . isSystemDefined . _tciSystemDefined . _tiCoreInfo) $
|
||||
-- TODO: multiple sources
|
||||
maybe mempty _pcTables $ Map.lookup defaultSource $ scPostgres sc
|
||||
countUserTables predicate = length . filter predicate $ Map.elems userTables
|
||||
|
||||
calcPerms :: (RolePermInfo 'Postgres -> Maybe a) -> [RolePermInfo 'Postgres] -> Int
|
||||
@ -194,7 +198,7 @@ computeActionsMetrics actionCache =
|
||||
|
||||
typeRelationships =
|
||||
length . L.nub . concatMap
|
||||
(map _trName . maybe [] toList . _otdRelationships . _aiOutputObject) $
|
||||
(map _trName . maybe [] toList . _otdRelationships . _aotDefinition . _aiOutputObject) $
|
||||
actions
|
||||
|
||||
-- | Logging related
|
||||
|
95
server/src-rsr/init_pg_source.sql
Normal file
95
server/src-rsr/init_pg_source.sql
Normal file
@ -0,0 +1,95 @@
|
||||
/* We define our own uuid generator function that uses gen_random_uuid() underneath.
|
||||
Since the column default is not directly referencing gen_random_uuid(),
|
||||
it prevents the column default to be dropped when pgcrypto or public schema is dropped unwittingly.
|
||||
|
||||
See https://github.com/hasura/graphql-engine/issues/4217
|
||||
*/
|
||||
CREATE OR REPLACE FUNCTION hdb_catalog.gen_hasura_uuid() RETURNS uuid AS
|
||||
-- We assume gen_random_uuid() is available in the search_path.
|
||||
-- This may not be true but we can't do much till https://github.com/hasura/graphql-engine/issues/3657
|
||||
'select gen_random_uuid()' LANGUAGE SQL;
|
||||
|
||||
CREATE TABLE hdb_catalog.hdb_source_catalog_version(
|
||||
version TEXT NOT NULL,
|
||||
upgraded_on TIMESTAMPTZ NOT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX hdb_source_catalog_version_one_row
|
||||
ON hdb_catalog.hdb_source_catalog_version((version IS NOT NULL));
|
||||
|
||||
CREATE TABLE hdb_catalog.event_log
|
||||
(
|
||||
id TEXT DEFAULT hdb_catalog.gen_hasura_uuid() PRIMARY KEY,
|
||||
schema_name TEXT NOT NULL,
|
||||
table_name TEXT NOT NULL,
|
||||
trigger_name TEXT NOT NULL,
|
||||
payload JSONB NOT NULL,
|
||||
delivered BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
error BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
tries INTEGER NOT NULL DEFAULT 0,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
/* when locked IS NULL the event is unlocked and can be processed */
|
||||
locked TIMESTAMPTZ,
|
||||
next_retry_at TIMESTAMP,
|
||||
archived BOOLEAN NOT NULL DEFAULT FALSE
|
||||
);
|
||||
|
||||
CREATE INDEX ON hdb_catalog.event_log (trigger_name);
|
||||
CREATE INDEX ON hdb_catalog.event_log (locked);
|
||||
CREATE INDEX ON hdb_catalog.event_log (delivered);
|
||||
CREATE INDEX ON hdb_catalog.event_log (created_at);
|
||||
|
||||
CREATE TABLE hdb_catalog.event_invocation_logs
|
||||
(
|
||||
id TEXT DEFAULT hdb_catalog.gen_hasura_uuid() PRIMARY KEY,
|
||||
event_id TEXT,
|
||||
status INTEGER,
|
||||
request JSON,
|
||||
response JSON,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
FOREIGN KEY (event_id) REFERENCES hdb_catalog.event_log (id)
|
||||
);
|
||||
|
||||
CREATE INDEX ON hdb_catalog.event_invocation_logs (event_id);
|
||||
|
||||
CREATE OR REPLACE FUNCTION
|
||||
hdb_catalog.insert_event_log(schema_name text, table_name text, trigger_name text, op text, row_data json)
|
||||
RETURNS text AS $$
|
||||
DECLARE
|
||||
id text;
|
||||
payload json;
|
||||
session_variables json;
|
||||
server_version_num int;
|
||||
trace_context json;
|
||||
BEGIN
|
||||
id := gen_random_uuid();
|
||||
server_version_num := current_setting('server_version_num');
|
||||
IF server_version_num >= 90600 THEN
|
||||
session_variables := current_setting('hasura.user', 't');
|
||||
trace_context := current_setting('hasura.tracecontext', 't');
|
||||
ELSE
|
||||
BEGIN
|
||||
session_variables := current_setting('hasura.user');
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
session_variables := NULL;
|
||||
END;
|
||||
BEGIN
|
||||
trace_context := current_setting('hasura.tracecontext');
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
trace_context := NULL;
|
||||
END;
|
||||
END IF;
|
||||
payload := json_build_object(
|
||||
'op', op,
|
||||
'data', row_data,
|
||||
'session_variables', session_variables,
|
||||
'trace_context', trace_context
|
||||
);
|
||||
INSERT INTO hdb_catalog.event_log
|
||||
(id, schema_name, table_name, trigger_name, payload)
|
||||
VALUES
|
||||
(id, schema_name, table_name, trigger_name, payload);
|
||||
RETURN id;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
@ -27,83 +27,6 @@ CREATE TABLE hdb_catalog.hdb_metadata
|
||||
metadata JSON NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE hdb_catalog.event_log
|
||||
(
|
||||
id TEXT DEFAULT hdb_catalog.gen_hasura_uuid() PRIMARY KEY,
|
||||
schema_name TEXT NOT NULL,
|
||||
table_name TEXT NOT NULL,
|
||||
trigger_name TEXT NOT NULL,
|
||||
payload JSONB NOT NULL,
|
||||
delivered BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
error BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
tries INTEGER NOT NULL DEFAULT 0,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
/* when locked IS NULL the event is unlocked and can be processed */
|
||||
locked TIMESTAMPTZ,
|
||||
next_retry_at TIMESTAMP,
|
||||
archived BOOLEAN NOT NULL DEFAULT FALSE
|
||||
);
|
||||
|
||||
CREATE INDEX ON hdb_catalog.event_log (trigger_name);
|
||||
CREATE INDEX ON hdb_catalog.event_log (locked);
|
||||
CREATE INDEX ON hdb_catalog.event_log (delivered);
|
||||
CREATE INDEX ON hdb_catalog.event_log (created_at);
|
||||
|
||||
CREATE TABLE hdb_catalog.event_invocation_logs
|
||||
(
|
||||
id TEXT DEFAULT hdb_catalog.gen_hasura_uuid() PRIMARY KEY,
|
||||
event_id TEXT,
|
||||
status INTEGER,
|
||||
request JSON,
|
||||
response JSON,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
FOREIGN KEY (event_id) REFERENCES hdb_catalog.event_log (id)
|
||||
);
|
||||
|
||||
CREATE INDEX ON hdb_catalog.event_invocation_logs (event_id);
|
||||
|
||||
CREATE OR REPLACE FUNCTION
|
||||
hdb_catalog.insert_event_log(schema_name text, table_name text, trigger_name text, op text, row_data json)
|
||||
RETURNS text AS $$
|
||||
DECLARE
|
||||
id text;
|
||||
payload json;
|
||||
session_variables json;
|
||||
server_version_num int;
|
||||
trace_context json;
|
||||
BEGIN
|
||||
id := gen_random_uuid();
|
||||
server_version_num := current_setting('server_version_num');
|
||||
IF server_version_num >= 90600 THEN
|
||||
session_variables := current_setting('hasura.user', 't');
|
||||
trace_context := current_setting('hasura.tracecontext', 't');
|
||||
ELSE
|
||||
BEGIN
|
||||
session_variables := current_setting('hasura.user');
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
session_variables := NULL;
|
||||
END;
|
||||
BEGIN
|
||||
trace_context := current_setting('hasura.tracecontext');
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
trace_context := NULL;
|
||||
END;
|
||||
END IF;
|
||||
payload := json_build_object(
|
||||
'op', op,
|
||||
'data', row_data,
|
||||
'session_variables', session_variables,
|
||||
'trace_context', trace_context
|
||||
);
|
||||
INSERT INTO hdb_catalog.event_log
|
||||
(id, schema_name, table_name, trigger_name, payload)
|
||||
VALUES
|
||||
(id, schema_name, table_name, trigger_name, payload);
|
||||
RETURN id;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TABLE hdb_catalog.hdb_action_log
|
||||
(
|
||||
id UUID PRIMARY KEY DEFAULT hdb_catalog.gen_hasura_uuid(),
|
||||
|
@ -47,3 +47,22 @@ CREATE TABLE hdb_catalog.hdb_metadata
|
||||
|
||||
-- DROP hdb_views schema (https://github.com/hasura/graphql-engine/pull/6135)
|
||||
DROP SCHEMA IF EXISTS hdb_views CASCADE;
|
||||
|
||||
-- Note [Migration of schema related to table event triggers log]
|
||||
|
||||
-- Table event triggers log related schema is
|
||||
-- - TABLE hdb_catalog.event_log
|
||||
-- - TABLE hdb_catalog.event_invocation_logs
|
||||
-- - PROCEDURE hdb_catalog.insert_event_log
|
||||
|
||||
-- We define this schema in any pg source to support table event triggers.
|
||||
-- There's a possibility of using metadata storage database as a source
|
||||
-- (more likely if server is started with only --database-url option).
|
||||
-- In this case, dropping the schema in this up (42 to 43) migration and re-creating the
|
||||
-- same while defining as a pg source causes loss of event trigger logs.
|
||||
-- To avoid this we won't drop the schema in this migration. While defining
|
||||
-- a pg source we will define this schema only if this doesn't exist. This also
|
||||
-- raises a question, "What happens if old database is only used as metadata storage?".
|
||||
-- Then, definitely, this schema will be of no use. But, this helps a lot in down
|
||||
-- migration (opposite to this migration, 43 to 42) as we create this schema only if this
|
||||
-- doesn't exist.
|
||||
|
@ -699,3 +699,81 @@ DROP TABLE hdb_catalog.hdb_metadata;
|
||||
|
||||
-- Add hdb_views schema
|
||||
CREATE SCHEMA IF NOT EXISTS hdb_views;
|
||||
|
||||
-- See Note [Migration of schema related to table event triggers log] in 42_to_43.sql
|
||||
CREATE TABLE IF NOT EXISTS hdb_catalog.event_log
|
||||
(
|
||||
id TEXT DEFAULT hdb_catalog.gen_hasura_uuid() PRIMARY KEY,
|
||||
schema_name TEXT NOT NULL,
|
||||
table_name TEXT NOT NULL,
|
||||
trigger_name TEXT NOT NULL,
|
||||
payload JSONB NOT NULL,
|
||||
delivered BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
error BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
tries INTEGER NOT NULL DEFAULT 0,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
/* when locked IS NULL the event is unlocked and can be processed */
|
||||
locked TIMESTAMPTZ,
|
||||
next_retry_at TIMESTAMP,
|
||||
archived BOOLEAN NOT NULL DEFAULT FALSE
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS event_log_trigger_name_idx ON hdb_catalog.event_log (trigger_name);
|
||||
CREATE INDEX IF NOT EXISTS event_log_locked_idx ON hdb_catalog.event_log (locked);
|
||||
CREATE INDEX IF NOT EXISTS event_log_delivered_idx ON hdb_catalog.event_log (delivered);
|
||||
CREATE INDEX IF NOT EXISTS event_log_created_at_idx ON hdb_catalog.event_log (created_at);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS hdb_catalog.event_invocation_logs
|
||||
(
|
||||
id TEXT DEFAULT hdb_catalog.gen_hasura_uuid() PRIMARY KEY,
|
||||
event_id TEXT,
|
||||
status INTEGER,
|
||||
request JSON,
|
||||
response JSON,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
FOREIGN KEY (event_id) REFERENCES hdb_catalog.event_log (id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS event_invocation_logs_event_id_idx ON hdb_catalog.event_invocation_logs (event_id);
|
||||
|
||||
CREATE OR REPLACE FUNCTION
|
||||
hdb_catalog.insert_event_log(schema_name text, table_name text, trigger_name text, op text, row_data json)
|
||||
RETURNS text AS $$
|
||||
DECLARE
|
||||
id text;
|
||||
payload json;
|
||||
session_variables json;
|
||||
server_version_num int;
|
||||
trace_context json;
|
||||
BEGIN
|
||||
id := gen_random_uuid();
|
||||
server_version_num := current_setting('server_version_num');
|
||||
IF server_version_num >= 90600 THEN
|
||||
session_variables := current_setting('hasura.user', 't');
|
||||
trace_context := current_setting('hasura.tracecontext', 't');
|
||||
ELSE
|
||||
BEGIN
|
||||
session_variables := current_setting('hasura.user');
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
session_variables := NULL;
|
||||
END;
|
||||
BEGIN
|
||||
trace_context := current_setting('hasura.tracecontext');
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
trace_context := NULL;
|
||||
END;
|
||||
END IF;
|
||||
payload := json_build_object(
|
||||
'op', op,
|
||||
'data', row_data,
|
||||
'session_variables', session_variables,
|
||||
'trace_context', trace_context
|
||||
);
|
||||
INSERT INTO hdb_catalog.event_log
|
||||
(id, schema_name, table_name, trigger_name, payload)
|
||||
VALUES
|
||||
(id, schema_name, table_name, trigger_name, payload);
|
||||
RETURN id;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
@ -5,11 +5,11 @@ module Hasura.Server.MigrateSpec (CacheRefT(..), spec) where
|
||||
import Hasura.Prelude
|
||||
|
||||
import Control.Concurrent.MVar.Lifted
|
||||
import Control.Monad.Morph
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Control.Monad.Unique
|
||||
import Control.Natural ((:~>) (..))
|
||||
import Data.Time.Clock (getCurrentTime)
|
||||
import Data.Tuple (swap)
|
||||
import Test.Hspec.Core.Spec
|
||||
import Test.Hspec.Expectations.Lifted
|
||||
|
||||
@ -18,6 +18,7 @@ import qualified Database.PG.Query as Q
|
||||
|
||||
import Hasura.RQL.DDL.Metadata (ClearMetadata (..), runClearMetadata)
|
||||
import Hasura.RQL.DDL.Schema
|
||||
import Hasura.RQL.DDL.Schema.Cache.Common
|
||||
import Hasura.RQL.DDL.Schema.LegacyCatalog
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Server.API.PGDump
|
||||
@ -31,17 +32,21 @@ newtype CacheRefT m a
|
||||
= CacheRefT { runCacheRefT :: MVar RebuildableSchemaCache -> m a }
|
||||
deriving
|
||||
( Functor, Applicative, Monad, MonadIO, MonadError e, MonadBase b, MonadBaseControl b
|
||||
, MonadTx, MonadUnique, UserInfoM, HasHttpManager, HasSQLGenCtx )
|
||||
, MonadTx, MonadUnique, UserInfoM, HasHttpManager, HasSQLGenCtx)
|
||||
via (ReaderT (MVar RebuildableSchemaCache) m)
|
||||
|
||||
instance MonadTrans CacheRefT where
|
||||
lift = CacheRefT . const
|
||||
|
||||
instance (MonadBase IO m) => TableCoreInfoRM 'Postgres (CacheRefT m)
|
||||
instance MFunctor CacheRefT where
|
||||
hoist f (CacheRefT m) = CacheRefT (f . m)
|
||||
|
||||
-- instance (MonadBase IO m) => TableCoreInfoRM 'Postgres (CacheRefT m)
|
||||
instance (MonadBase IO m) => CacheRM (CacheRefT m) where
|
||||
askSchemaCache = CacheRefT (fmap lastBuiltSchemaCache . readMVar)
|
||||
|
||||
instance (MonadIO m, MonadBaseControl IO m, MonadTx m, HasHttpManager m, HasSQLGenCtx m, HasRemoteSchemaPermsCtx m) => CacheRWM (CacheRefT m) where
|
||||
instance (MonadIO m, MonadBaseControl IO m, MonadTx m, HasHttpManager m
|
||||
, HasSQLGenCtx m, HasRemoteSchemaPermsCtx m, MonadResolveSource m) => CacheRWM (CacheRefT m) where
|
||||
buildSchemaCacheWithOptions reason invalidations metadata = CacheRefT $ flip modifyMVar \schemaCache -> do
|
||||
((), cache, _) <- runCacheRWT schemaCache (buildSchemaCacheWithOptions reason invalidations metadata)
|
||||
pure (cache, ())
|
||||
@ -56,19 +61,25 @@ singleTransaction :: MetadataT (CacheRefT m) () -> MetadataT (CacheRefT m) ()
|
||||
singleTransaction = id
|
||||
|
||||
spec
|
||||
:: ( HasVersion
|
||||
:: forall m
|
||||
. ( HasVersion
|
||||
, MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
, MonadTx m
|
||||
, MonadError QErr m
|
||||
, HasHttpManager m
|
||||
, HasSQLGenCtx m
|
||||
, HasRemoteSchemaPermsCtx m
|
||||
, MonadResolveSource m
|
||||
)
|
||||
=> Q.ConnInfo -> SpecWithCache m
|
||||
spec pgConnInfo = do
|
||||
let dropAndInit env time = lift $ CacheRefT $ flip modifyMVar \_ ->
|
||||
dropCatalog *> (swap <$> migrateCatalog env time)
|
||||
downgradeTo v = downgradeCatalog DowngradeOptions{ dgoDryRun = False, dgoTargetVersion = v }
|
||||
=> SourceConfiguration -> PGExecCtx -> Q.ConnInfo -> SpecWithCache m
|
||||
spec srcConfig pgExecCtx pgConnInfo = do
|
||||
let migrateCatalogAndBuildCache env time = do
|
||||
(migrationResult, metadata) <- runTx pgExecCtx $ migrateCatalog srcConfig time
|
||||
(,migrationResult) <$> runCacheBuildM (buildRebuildableSchemaCache env metadata)
|
||||
|
||||
dropAndInit env time = lift $ CacheRefT $ flip modifyMVar \_ ->
|
||||
(runTx pgExecCtx dropCatalog) *> (migrateCatalogAndBuildCache env time)
|
||||
downgradeTo v = runTx pgExecCtx . downgradeCatalog srcConfig DowngradeOptions{ dgoDryRun = False, dgoTargetVersion = v }
|
||||
|
||||
describe "migrateCatalog" $ do
|
||||
it "initializes the catalog" $ singleTransaction do
|
||||
@ -77,7 +88,7 @@ spec pgConnInfo = do
|
||||
dropAndInit env time `shouldReturn` MRInitialized
|
||||
|
||||
it "is idempotent" \(NT transact) -> do
|
||||
let dumpSchema = execPGDump (PGDumpReqBody ["--schema-only"] (Just False)) pgConnInfo
|
||||
let dumpSchema = execPGDump (PGDumpReqBody defaultSource ["--schema-only"] False) pgConnInfo
|
||||
env <- Env.getEnvironment
|
||||
time <- getCurrentTime
|
||||
transact (dropAndInit env time) `shouldReturn` MRInitialized
|
||||
@ -88,7 +99,7 @@ spec pgConnInfo = do
|
||||
|
||||
it "supports upgrades after downgrade to version 12" \(NT transact) -> do
|
||||
let upgradeToLatest env time = lift $ CacheRefT $ flip modifyMVar \_ ->
|
||||
swap <$> migrateCatalog env time
|
||||
migrateCatalogAndBuildCache env time
|
||||
env <- Env.getEnvironment
|
||||
time <- getCurrentTime
|
||||
transact (dropAndInit env time) `shouldReturn` MRInitialized
|
||||
@ -112,7 +123,7 @@ spec pgConnInfo = do
|
||||
-- t `shouldSatisfy` (`elem` supportedDowngrades)
|
||||
|
||||
describe "recreateSystemMetadata" $ do
|
||||
let dumpMetadata = execPGDump (PGDumpReqBody ["--schema=hdb_catalog"] (Just False)) pgConnInfo
|
||||
let dumpMetadata = execPGDump (PGDumpReqBody defaultSource ["--schema=hdb_catalog"] False) pgConnInfo
|
||||
|
||||
it "is idempotent" \(NT transact) -> do
|
||||
env <- Env.getEnvironment
|
||||
@ -124,7 +135,7 @@ spec pgConnInfo = do
|
||||
MRMigrated{} -> True
|
||||
_ -> False
|
||||
firstDump <- transact dumpMetadata
|
||||
transact recreateSystemMetadata
|
||||
transact (runTx pgExecCtx recreateSystemMetadata)
|
||||
secondDump <- transact dumpMetadata
|
||||
secondDump `shouldBe` firstDump
|
||||
|
||||
@ -133,6 +144,11 @@ spec pgConnInfo = do
|
||||
time <- getCurrentTime
|
||||
transact (dropAndInit env time) `shouldReturn` MRInitialized
|
||||
firstDump <- transact dumpMetadata
|
||||
transact (runClearMetadata ClearMetadata) `shouldReturn` successMsg
|
||||
transact (hoist (hoist (runTx pgExecCtx)) $ runClearMetadata ClearMetadata) `shouldReturn` successMsg
|
||||
secondDump <- transact dumpMetadata
|
||||
secondDump `shouldBe` firstDump
|
||||
|
||||
runTx
|
||||
:: (MonadError QErr m, MonadIO m, MonadBaseControl IO m)
|
||||
=> PGExecCtx -> LazyTxT QErr m a -> m a
|
||||
runTx pgExecCtx = liftEitherM . runExceptT . runLazyTx pgExecCtx Q.ReadWrite
|
||||
|
@ -3,45 +3,44 @@ module Main (main) where
|
||||
import Hasura.Prelude
|
||||
|
||||
import Control.Concurrent.MVar
|
||||
import Control.Natural ((:~>) (..))
|
||||
import Data.Time.Clock (getCurrentTime)
|
||||
import Control.Natural ((:~>) (..))
|
||||
import Data.Time.Clock (getCurrentTime)
|
||||
import Data.URL.Template
|
||||
import Options.Applicative
|
||||
import System.Environment (getEnvironment)
|
||||
import System.Exit (exitFailure)
|
||||
import System.Environment (getEnvironment)
|
||||
import System.Exit (exitFailure)
|
||||
import Test.Hspec
|
||||
|
||||
import qualified Data.Aeson as A
|
||||
import qualified Data.ByteString.Lazy.Char8 as BL
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Client.TLS as HTTP
|
||||
import qualified Test.Hspec.Runner as Hspec
|
||||
import qualified Data.Aeson as A
|
||||
import qualified Data.ByteString.Lazy.Char8 as BL
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Client.TLS as HTTP
|
||||
import qualified Test.Hspec.Runner as Hspec
|
||||
|
||||
import Hasura.Backends.Postgres.Connection (liftTx, mkPGExecCtx)
|
||||
import Hasura.RQL.DDL.Schema.Catalog (fetchMetadataFromCatalog)
|
||||
import Hasura.RQL.Types (SQLGenCtx (..), runMetadataT, RemoteSchemaPermsCtx (..))
|
||||
import Hasura.RQL.Types.Run
|
||||
import Hasura.Server.Init (RawConnInfo, mkConnInfo, mkRawConnInfo,
|
||||
parseRawConnInfo, runWithEnv)
|
||||
import Hasura.RQL.DDL.Schema.Cache
|
||||
import Hasura.RQL.DDL.Schema.Cache.Common
|
||||
import Hasura.RQL.DDL.Schema.Source
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Server.Init
|
||||
import Hasura.Server.Migrate
|
||||
import Hasura.Server.Version
|
||||
import Hasura.Session (adminUserInfo)
|
||||
|
||||
import qualified Data.NonNegativeIntSpec as NonNegetiveIntSpec
|
||||
import qualified Data.Parser.CacheControlSpec as CacheControlParser
|
||||
import qualified Data.Parser.JSONPathSpec as JsonPath
|
||||
import qualified Data.Parser.URLTemplate as URLTemplate
|
||||
import qualified Data.TimeSpec as TimeSpec
|
||||
import qualified Hasura.IncrementalSpec as IncrementalSpec
|
||||
import qualified Data.NonNegativeIntSpec as NonNegetiveIntSpec
|
||||
import qualified Data.Parser.CacheControlSpec as CacheControlParser
|
||||
import qualified Data.Parser.JSONPathSpec as JsonPath
|
||||
import qualified Data.Parser.URLTemplate as URLTemplate
|
||||
import qualified Data.TimeSpec as TimeSpec
|
||||
import qualified Hasura.IncrementalSpec as IncrementalSpec
|
||||
-- import qualified Hasura.RQL.MetadataSpec as MetadataSpec
|
||||
import qualified Hasura.CacheBoundedSpec as CacheBoundedSpec
|
||||
import qualified Hasura.Server.AuthSpec as AuthSpec
|
||||
import qualified Hasura.Server.MigrateSpec as MigrateSpec
|
||||
import qualified Hasura.Server.TelemetrySpec as TelemetrySpec
|
||||
import qualified Hasura.CacheBoundedSpec as CacheBoundedSpec
|
||||
import qualified Hasura.Server.AuthSpec as AuthSpec
|
||||
import qualified Hasura.Server.MigrateSpec as MigrateSpec
|
||||
import qualified Hasura.Server.TelemetrySpec as TelemetrySpec
|
||||
|
||||
data TestSuites
|
||||
= AllSuites !RawConnInfo
|
||||
= AllSuites !(Maybe URLTemplate)
|
||||
-- ^ Run all test suites. It probably doesn't make sense to be able to specify additional
|
||||
-- hspec args here.
|
||||
| SingleSuite ![String] !TestSuite
|
||||
@ -49,7 +48,7 @@ data TestSuites
|
||||
|
||||
data TestSuite
|
||||
= UnitSuite
|
||||
| PostgresSuite !RawConnInfo
|
||||
| PostgresSuite !(Maybe URLTemplate)
|
||||
|
||||
main :: IO ()
|
||||
main = withVersion $$(getVersionFromEnvironment) $ parseArgs >>= \case
|
||||
@ -73,46 +72,63 @@ unitSpecs = do
|
||||
describe "Hasura.Server.Auth" AuthSpec.spec
|
||||
describe "Hasura.Cache.Bounded" CacheBoundedSpec.spec
|
||||
|
||||
buildPostgresSpecs :: HasVersion => RawConnInfo -> IO Spec
|
||||
buildPostgresSpecs pgConnOptions = do
|
||||
buildPostgresSpecs :: HasVersion => Maybe URLTemplate -> IO Spec
|
||||
buildPostgresSpecs maybeUrlTemplate = do
|
||||
env <- getEnvironment
|
||||
let envMap = Env.mkEnvironment env
|
||||
|
||||
rawPGConnInfo <- flip onLeft printErrExit $ runWithEnv env (mkRawConnInfo pgConnOptions)
|
||||
pgConnInfo <- flip onLeft printErrExit $ mkConnInfo rawPGConnInfo
|
||||
pgUrlTemplate <- flip onLeft printErrExit $ runWithEnv env $ do
|
||||
let envVar = fst databaseUrlEnv
|
||||
maybeV <- withEnv maybeUrlTemplate envVar
|
||||
onNothing maybeV $ throwError $
|
||||
"Expected: --database-url or " <> envVar
|
||||
|
||||
let setupCacheRef = do
|
||||
pgPool <- Q.initPGPool pgConnInfo Q.defaultConnParams { Q.cpConns = 1 } print
|
||||
let pgContext = mkPGExecCtx Q.Serializable pgPool
|
||||
pgUrlText <- flip onLeft printErrExit $ renderURLTemplate envMap pgUrlTemplate
|
||||
let pgConnInfo = Q.ConnInfo 1 $ Q.CDDatabaseURI $ txtToBs pgUrlText
|
||||
urlConf = UrlValue $ InputWebhook pgUrlTemplate
|
||||
sourceConnInfo = PostgresSourceConnInfo urlConf defaultPostgresPoolSettings
|
||||
sourceConfig = SourceConfiguration sourceConnInfo Nothing
|
||||
|
||||
pgPool <- Q.initPGPool pgConnInfo Q.defaultConnParams { Q.cpConns = 1 } print
|
||||
let pgContext = mkPGExecCtx Q.Serializable pgPool
|
||||
|
||||
setupCacheRef = do
|
||||
httpManager <- HTTP.newManager HTTP.tlsManagerSettings
|
||||
let runContext = RunCtx adminUserInfo httpManager (SQLGenCtx False) RemoteSchemaPermsDisabled
|
||||
let sqlGenCtx = SQLGenCtx False
|
||||
cacheBuildParams = CacheBuildParams httpManager sqlGenCtx RemoteSchemaPermsDisabled
|
||||
(mkPgSourceResolver print)
|
||||
|
||||
runAsAdmin :: RunT IO a -> IO a
|
||||
runAsAdmin =
|
||||
peelRun runContext pgContext Q.ReadWrite Nothing
|
||||
run :: CacheBuild a -> IO a
|
||||
run =
|
||||
runCacheBuild cacheBuildParams
|
||||
>>> runExceptT
|
||||
>=> flip onLeft printErrJExit
|
||||
|
||||
(schemaCache, metadata) <- runAsAdmin do
|
||||
sc <- snd <$> (migrateCatalog (Env.mkEnvironment env) =<< liftIO getCurrentTime)
|
||||
metadata <- liftTx fetchMetadataFromCatalog
|
||||
pure (sc, metadata)
|
||||
(metadata, schemaCache) <- run do
|
||||
metadata <- snd <$> (liftEitherM . runExceptT . runLazyTx pgContext Q.ReadWrite)
|
||||
(migrateCatalog sourceConfig =<< liftIO getCurrentTime)
|
||||
schemaCache <- buildRebuildableSchemaCache envMap metadata
|
||||
pure (metadata, schemaCache)
|
||||
|
||||
cacheRef <- newMVar schemaCache
|
||||
pure $ NT (runAsAdmin . flip MigrateSpec.runCacheRefT cacheRef . fmap fst . runMetadataT metadata)
|
||||
pure $ NT (run . flip MigrateSpec.runCacheRefT cacheRef . fmap fst . runMetadataT metadata)
|
||||
|
||||
pure $ beforeAll setupCacheRef $
|
||||
describe "Hasura.Server.Migrate" $ MigrateSpec.spec pgConnInfo
|
||||
describe "Hasura.Server.Migrate" $ MigrateSpec.spec sourceConfig pgContext pgConnInfo
|
||||
|
||||
parseArgs :: IO TestSuites
|
||||
parseArgs = execParser $ info (helper <*> (parseNoCommand <|> parseSubCommand)) $
|
||||
fullDesc <> header "Hasura GraphQL Engine test suite"
|
||||
where
|
||||
parseNoCommand = AllSuites <$> parseRawConnInfo
|
||||
parseDbUrlTemplate =
|
||||
parseDatabaseUrl <|> (fmap rawConnDetailsToUrl <$> parseRawConnDetails)
|
||||
parseNoCommand = AllSuites <$> parseDbUrlTemplate
|
||||
parseSubCommand = SingleSuite <$> parseHspecPassThroughArgs <*> subCmd
|
||||
where
|
||||
subCmd = subparser $ mconcat
|
||||
[ command "unit" $ info (pure UnitSuite) $
|
||||
progDesc "Only run unit tests"
|
||||
, command "postgres" $ info (helper <*> (PostgresSuite <$> parseRawConnInfo)) $
|
||||
, command "postgres" $ info (helper <*> (PostgresSuite <$> parseDbUrlTemplate)) $
|
||||
progDesc "Only run Postgres integration tests"
|
||||
]
|
||||
-- Add additional arguments and tweak as needed:
|
||||
|
@ -12,6 +12,7 @@ response:
|
||||
schema: public
|
||||
name: user
|
||||
name: Names
|
||||
source: default
|
||||
type: array
|
||||
field_mapping:
|
||||
names: name
|
||||
|
@ -30,8 +30,13 @@
|
||||
- c1: 1
|
||||
c2: world
|
||||
returning: []
|
||||
- type: clear_metadata
|
||||
args: {}
|
||||
|
||||
- description: Clear metadata
|
||||
url: /v1/query
|
||||
status: 200
|
||||
query:
|
||||
type: clear_metadata
|
||||
args: {}
|
||||
|
||||
- description: ensure the event was archived
|
||||
url: /v1/query
|
||||
|
@ -60,6 +60,7 @@ inconsistent_objects:
|
||||
schema: public
|
||||
name: article
|
||||
name: articles
|
||||
source: default
|
||||
comment:
|
||||
table:
|
||||
schema: public
|
||||
|
@ -0,0 +1,6 @@
|
||||
type: bulk
|
||||
args:
|
||||
- type: run_sql
|
||||
args:
|
||||
sql: |
|
||||
drop table "user" cascade;
|
@ -21,10 +21,9 @@
|
||||
table: random
|
||||
name: get_articles
|
||||
response:
|
||||
path: "$.args.table"
|
||||
error: table "random" does not exist
|
||||
path: $.args.table
|
||||
error: 'table "random" does not exist in source: default'
|
||||
code: not-exists
|
||||
|
||||
- description: Drop a non existed computed field
|
||||
url: /v1/query
|
||||
status: 400
|
||||
@ -34,7 +33,7 @@
|
||||
table: author
|
||||
name: random
|
||||
response:
|
||||
path: "$.args.name"
|
||||
path: $.args.name
|
||||
error: computed field "random" does not exist
|
||||
code: not-exists
|
||||
|
||||
|
@ -10,9 +10,8 @@
|
||||
function: full_name
|
||||
response:
|
||||
path: $.args.table
|
||||
error: table "random" does not exist
|
||||
error: 'table "random" does not exist in source: default'
|
||||
code: not-exists
|
||||
|
||||
- description: Try adding computed field with existing column name
|
||||
url: /v1/query
|
||||
status: 400
|
||||
@ -31,6 +30,7 @@
|
||||
schema: public
|
||||
name: full_name
|
||||
name: first_name
|
||||
source: default
|
||||
comment:
|
||||
table:
|
||||
schema: public
|
||||
@ -40,7 +40,6 @@
|
||||
path: $.args
|
||||
error: field definition conflicts with postgres column
|
||||
code: constraint-violation
|
||||
|
||||
- description: Try adding computed field with invalid function
|
||||
url: /v1/query
|
||||
status: 400
|
||||
@ -59,6 +58,7 @@
|
||||
schema: public
|
||||
name: random_function
|
||||
name: full_name
|
||||
source: default
|
||||
comment:
|
||||
table:
|
||||
schema: public
|
||||
@ -70,7 +70,6 @@
|
||||
error: 'in table "author": in computed field "full_name": no such function exists
|
||||
in postgres : "random_function"'
|
||||
code: constraint-violation
|
||||
|
||||
- description: Try adding computed field with invalid table argument name
|
||||
url: /v1/query
|
||||
status: 400
|
||||
@ -91,6 +90,7 @@
|
||||
name: full_name
|
||||
table_argument: random
|
||||
name: full_name
|
||||
source: default
|
||||
comment:
|
||||
table:
|
||||
schema: public
|
||||
@ -104,7 +104,6 @@
|
||||
cannot be added to table "author" because "random" is not an input argument
|
||||
of the function "full_name"'
|
||||
code: constraint-violation
|
||||
|
||||
- description: Try adding computed field with a volatile function
|
||||
url: /v1/query
|
||||
status: 400
|
||||
@ -125,6 +124,7 @@
|
||||
name: fetch_articles_volatile
|
||||
table_argument: random
|
||||
name: get_articles
|
||||
source: default
|
||||
comment:
|
||||
table:
|
||||
schema: public
|
||||
@ -142,7 +142,6 @@
|
||||
\ cannot be added as a computed field\n • \"random\" is not an input argument\
|
||||
\ of the function \"fetch_articles_volatile\"\n"
|
||||
code: constraint-violation
|
||||
|
||||
- description: Try adding a computed field with a function with no input arguments
|
||||
url: /v1/query
|
||||
status: 400
|
||||
@ -161,6 +160,7 @@
|
||||
schema: public
|
||||
name: hello_world
|
||||
name: hello_world
|
||||
source: default
|
||||
comment:
|
||||
table:
|
||||
schema: public
|
||||
@ -174,7 +174,6 @@
|
||||
"hello_world" cannot be added to table "author" because the function "hello_world"
|
||||
has no input arguments'
|
||||
code: constraint-violation
|
||||
|
||||
- description: Try adding a computed field with first argument as table argument
|
||||
url: /v1/query
|
||||
status: 400
|
||||
@ -193,6 +192,7 @@
|
||||
schema: public
|
||||
name: fetch_articles
|
||||
name: get_articles
|
||||
source: default
|
||||
comment:
|
||||
table:
|
||||
schema: public
|
||||
@ -210,7 +210,6 @@
|
||||
\ type\n • first argument of the function \"fetch_articles\" of type \"pg_catalog.text\"\
|
||||
\ is not the table to which the computed field is being added\n"
|
||||
code: constraint-violation
|
||||
|
||||
- description: Try adding a computed field with an invalid session argument name
|
||||
url: /v1/query
|
||||
status: 400
|
||||
@ -231,6 +230,7 @@
|
||||
name: full_name
|
||||
session_argument: random
|
||||
name: full_name
|
||||
source: default
|
||||
comment:
|
||||
table:
|
||||
schema: public
|
||||
@ -244,7 +244,6 @@
|
||||
cannot be added to table "author" because "random" is not an input argument
|
||||
of the function "full_name"'
|
||||
code: constraint-violation
|
||||
|
||||
- description: Try adding a computed field with a non-JSON session argument
|
||||
url: /v1/query
|
||||
status: 400
|
||||
@ -264,19 +263,20 @@
|
||||
function:
|
||||
schema: public
|
||||
name: fetch_articles
|
||||
table_argument: author_row
|
||||
session_argument: search
|
||||
table_argument: author_row
|
||||
name: fetch_articles
|
||||
source: default
|
||||
comment:
|
||||
table:
|
||||
schema: public
|
||||
name: author
|
||||
reason: 'in table "author": in computed field "fetch_articles": the computed field
|
||||
"fetch_articles" cannot be added to table "author" because "search" argument
|
||||
of the function "fetch_articles" is not of type JSON'
|
||||
reason: 'in table "author": in computed field "fetch_articles": the computed
|
||||
field "fetch_articles" cannot be added to table "author" because "search"
|
||||
argument of the function "fetch_articles" is not of type JSON'
|
||||
type: computed_field
|
||||
path: $.args
|
||||
error: 'in table "author": in computed field "fetch_articles": the computed field
|
||||
"fetch_articles" cannot be added to table "author" because "search" argument of
|
||||
the function "fetch_articles" is not of type JSON'
|
||||
"fetch_articles" cannot be added to table "author" because "search" argument
|
||||
of the function "fetch_articles" is not of type JSON'
|
||||
code: constraint-violation
|
||||
|
@ -43,6 +43,7 @@
|
||||
internal:
|
||||
- definition:
|
||||
role: user
|
||||
source: default
|
||||
comment:
|
||||
permission:
|
||||
allow_aggregations: false
|
||||
@ -80,6 +81,7 @@
|
||||
internal:
|
||||
- definition:
|
||||
role: user
|
||||
source: default
|
||||
comment:
|
||||
permission:
|
||||
allow_aggregations: false
|
||||
@ -140,9 +142,9 @@
|
||||
name: full_name
|
||||
response:
|
||||
path: $.args
|
||||
error: 'cannot drop due to the following dependent objects : permission author.user.select'
|
||||
error: 'cannot drop due to the following dependent objects : permission author.user.select
|
||||
in source "default"'
|
||||
code: dependency-error
|
||||
|
||||
- description: Drop a computed field with cascade
|
||||
url: /v1/query
|
||||
status: 200
|
||||
|
@ -21,10 +21,10 @@
|
||||
sql: |
|
||||
ALTER FUNCTION fetch_articles(text, author) RENAME TO fetch_articles_renamed
|
||||
response:
|
||||
path: "$.args"
|
||||
error: 'cannot drop due to the following dependent objects : computed field author.get_articles'
|
||||
path: $.args
|
||||
error: 'cannot drop due to the following dependent objects : computed field author.get_articles
|
||||
in source "default"'
|
||||
code: dependency-error
|
||||
|
||||
- description: Try to alter the fetch_articles function to VOLATILE
|
||||
url: /v1/query
|
||||
status: 400
|
||||
@ -34,7 +34,7 @@
|
||||
sql: |
|
||||
ALTER FUNCTION fetch_articles(text, author) VOLATILE
|
||||
response:
|
||||
path: "$.args"
|
||||
path: $.args
|
||||
error: The type of function "fetch_articles" associated with computed field "get_articles"
|
||||
of table "author" is being altered to "VOLATILE"
|
||||
code: not-supported
|
||||
@ -57,12 +57,13 @@
|
||||
LIMIT $3
|
||||
$$ LANGUAGE sql STABLE;
|
||||
response:
|
||||
path: "$.args"
|
||||
path: $.args
|
||||
error: The function "fetch_articles" associated with computed field"get_articles"
|
||||
of table "author" is being overloaded
|
||||
code: not-supported
|
||||
|
||||
- description: Drop the function fetch_articles and create a new function with the same name
|
||||
- description: Drop the function fetch_articles and create a new function with the
|
||||
same name
|
||||
url: /v1/query
|
||||
status: 400
|
||||
query:
|
||||
@ -81,10 +82,10 @@
|
||||
LIMIT $3
|
||||
$$ LANGUAGE sql STABLE;
|
||||
response:
|
||||
path: "$.args"
|
||||
error: 'cannot drop due to the following dependent objects : computed field author.get_articles'
|
||||
path: $.args
|
||||
error: 'cannot drop due to the following dependent objects : computed field author.get_articles
|
||||
in source "default"'
|
||||
code: dependency-error
|
||||
|
||||
- description: Safely alter the definition of function fetch_articles
|
||||
url: /v1/query
|
||||
status: 200
|
||||
@ -102,7 +103,7 @@
|
||||
$$ LANGUAGE sql STABLE;
|
||||
response:
|
||||
result_type: CommandOk
|
||||
result: null
|
||||
result:
|
||||
|
||||
- description: Drop computed field get_articles from author table
|
||||
url: /v1/query
|
||||
|
@ -10,9 +10,11 @@
|
||||
- description: Check if metadata is cleared
|
||||
url: /v1/query
|
||||
status: 200
|
||||
response:
|
||||
version: 2
|
||||
tables: []
|
||||
# FIXME:- Using export_metadata will dump
|
||||
# the source configuration dependent on --database-url
|
||||
# response:
|
||||
# version: 2
|
||||
# tables: []
|
||||
query:
|
||||
type: export_metadata
|
||||
args: {}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user