mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-15 01:12:56 +03:00
Pass environment variables around as a data structure, via @sordina (#5374)
* Pass environment variables around as a data structure, via @sordina * Resolving build error * Adding Environment passing note to changelog * Removing references to ILTPollerLog as this seems to have been reintroduced from a bad merge * removing commented-out imports * Language pragmas already set by project * Linking async thread * Apply suggestions from code review Use `runQueryTx` instead of `runLazyTx` for queries. * remove the non-user facing entry in the changelog Co-authored-by: Phil Freeman <paf31@cantab.net> Co-authored-by: Phil Freeman <phil@hasura.io> Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com>
This commit is contained in:
parent
078f3955aa
commit
8904e063e9
@ -125,6 +125,8 @@ constraints: any.Cabal ==3.2.0.0,
|
||||
entropy -halvm,
|
||||
any.errors ==2.3.0,
|
||||
any.exceptions ==0.10.4,
|
||||
exceptions +transformers-0-4,
|
||||
any.fail ==4.9.0.0,
|
||||
any.fast-logger ==3.0.1,
|
||||
any.file-embed ==0.0.11.2,
|
||||
any.filepath ==1.4.2.1,
|
||||
|
@ -63,36 +63,6 @@ Date: Wed Jul 15 03:40:48 2020 -0700
|
||||
|
||||
Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com>
|
||||
|
||||
commit 24592a516b2e920d3d41244b0aac4c060dc321ae
|
||||
Author: Lyndon Maydwell <lyndon@sordina.net>
|
||||
Date: Wed Jul 15 05:00:58 2020 +1000
|
||||
|
||||
Pass environment variables around as a data structure, via @sordina (#5374)
|
||||
|
||||
* Pass environment variables around as a data structure, via @sordina
|
||||
|
||||
* Resolving build error
|
||||
|
||||
* Adding Environment passing note to changelog
|
||||
|
||||
* Removing references to ILTPollerLog as this seems to have been reintroduced from a bad merge
|
||||
|
||||
* removing commented-out imports
|
||||
|
||||
* Language pragmas already set by project
|
||||
|
||||
* Linking async thread
|
||||
|
||||
* Apply suggestions from code review
|
||||
|
||||
Use `runQueryTx` instead of `runLazyTx` for queries.
|
||||
|
||||
* remove the non-user facing entry in the changelog
|
||||
|
||||
Co-authored-by: Phil Freeman <paf31@cantab.net>
|
||||
Co-authored-by: Phil Freeman <phil@hasura.io>
|
||||
Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com>
|
||||
|
||||
(Done, but we should re-visit this, if we do query plan caching)
|
||||
commit 20cbe9cfd3e90b91d3f4faf370b081fc3859cbde
|
||||
Author: Auke Booij <auke@hasura.io>
|
||||
|
@ -447,6 +447,7 @@ library
|
||||
, Data.HashMap.Strict.InsOrd.Extended
|
||||
, Data.List.Extended
|
||||
, Data.Tuple.Extended
|
||||
, Data.Environment
|
||||
, Hasura.SQL.DML
|
||||
, Hasura.SQL.Error
|
||||
, Hasura.SQL.GeoJSON
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
module Main where
|
||||
|
||||
import Control.Exception
|
||||
import Data.Text.Conversions (convertText)
|
||||
|
||||
import Hasura.App
|
||||
@ -14,20 +15,32 @@ import Hasura.Server.Init
|
||||
import Hasura.Server.Migrate (downgradeCatalog, dropCatalog)
|
||||
import Hasura.Server.Version
|
||||
|
||||
import qualified Data.ByteString.Char8 as BC
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.ByteString.Lazy.Char8 as BLC
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified System.Exit as Sys
|
||||
import qualified System.Posix.Signals as Signals
|
||||
|
||||
main :: IO ()
|
||||
main = parseArgs >>= unAppM . runApp
|
||||
main = do
|
||||
tryExit $ do
|
||||
args <- parseArgs
|
||||
env <- Env.getEnvironment
|
||||
unAppM (runApp env args)
|
||||
where
|
||||
tryExit io = try io >>= \case
|
||||
Left (ExitException _code msg) -> BC.putStrLn msg >> Sys.exitFailure
|
||||
Right r -> return r
|
||||
|
||||
runApp :: HGEOptions Hasura -> AppM ()
|
||||
runApp (HGEOptionsG rci hgeCmd) =
|
||||
withVersion $$(getVersionFromEnvironment) case hgeCmd of
|
||||
runApp :: Env.Environment -> HGEOptions Hasura -> AppM ()
|
||||
runApp env (HGEOptionsG rci hgeCmd) =
|
||||
withVersion $$(getVersionFromEnvironment) $ case hgeCmd of
|
||||
HCServe serveOptions -> do
|
||||
(initCtx, initTime) <- initialiseCtx hgeCmd rci
|
||||
-- Catches the SIGTERM signal and initiates a graceful shutdown.
|
||||
(initCtx, initTime) <- initialiseCtx env hgeCmd rci
|
||||
let shutdownApp = return ()
|
||||
-- Catches the SIGTERM signal and initiates a graceful shutdown.
|
||||
-- Graceful shutdown for regular HTTP requests is already implemented in
|
||||
-- Warp, and is triggered by invoking the 'closeSocket' callback.
|
||||
-- We only catch the SIGTERM signal once, that is, if the user hits CTRL-C
|
||||
@ -36,35 +49,36 @@ runApp (HGEOptionsG rci hgeCmd) =
|
||||
Signals.sigTERM
|
||||
(Signals.CatchOnce (shutdownGracefully initCtx))
|
||||
Nothing
|
||||
runHGEServer serveOptions initCtx Nothing initTime
|
||||
runHGEServer env serveOptions initCtx Nothing initTime shutdownApp
|
||||
|
||||
HCExport -> do
|
||||
(initCtx, _) <- initialiseCtx hgeCmd rci
|
||||
(initCtx, _) <- initialiseCtx env hgeCmd rci
|
||||
res <- runTx' initCtx fetchMetadata Q.ReadCommitted
|
||||
either printErrJExit printJSON res
|
||||
either (printErrJExit MetadataExportError) printJSON res
|
||||
|
||||
HCClean -> do
|
||||
(initCtx, _) <- initialiseCtx hgeCmd rci
|
||||
(initCtx, _) <- initialiseCtx env hgeCmd rci
|
||||
res <- runTx' initCtx dropCatalog Q.ReadCommitted
|
||||
either printErrJExit (const cleanSuccess) res
|
||||
either (printErrJExit MetadataCleanError) (const cleanSuccess) res
|
||||
|
||||
HCExecute -> do
|
||||
(InitCtx{..}, _) <- initialiseCtx hgeCmd rci
|
||||
(InitCtx{..}, _) <- initialiseCtx env hgeCmd rci
|
||||
queryBs <- liftIO BL.getContents
|
||||
let sqlGenCtx = SQLGenCtx False
|
||||
res <- runAsAdmin _icPgPool sqlGenCtx _icHttpManager do
|
||||
schemaCache <- buildRebuildableSchemaCache
|
||||
execQuery queryBs
|
||||
res <- runAsAdmin _icPgPool sqlGenCtx _icHttpManager $ do
|
||||
schemaCache <- buildRebuildableSchemaCache env
|
||||
execQuery env queryBs
|
||||
& runHasSystemDefinedT (SystemDefined False)
|
||||
& runCacheRWT schemaCache
|
||||
& fmap (\(res, _, _) -> res)
|
||||
either printErrJExit (liftIO . BLC.putStrLn) res
|
||||
either (printErrJExit ExecuteProcessError) (liftIO . BLC.putStrLn) res
|
||||
|
||||
HCDowngrade opts -> do
|
||||
(InitCtx{..}, initTime) <- initialiseCtx hgeCmd rci
|
||||
(InitCtx{..}, initTime) <- initialiseCtx env hgeCmd rci
|
||||
let sqlGenCtx = SQLGenCtx False
|
||||
res <- downgradeCatalog opts initTime
|
||||
& runAsAdmin _icPgPool sqlGenCtx _icHttpManager
|
||||
either printErrJExit (liftIO . print) res
|
||||
either (printErrJExit DowngradeProcessError) (liftIO . print) res
|
||||
|
||||
HCVersion -> liftIO $ putStrLn $ "Hasura GraphQL Engine: " ++ convertText currentVersion
|
||||
where
|
||||
|
35
server/src-lib/Data/Environment.hs
Normal file
35
server/src-lib/Data/Environment.hs
Normal file
@ -0,0 +1,35 @@
|
||||
{-# LANGUAGE DeriveGeneric #-}
|
||||
|
||||
module Data.Environment
|
||||
( Environment()
|
||||
, getEnvironment
|
||||
, mkEnvironment
|
||||
, emptyEnvironment
|
||||
, maybeEnvironment
|
||||
, lookupEnv)
|
||||
where
|
||||
|
||||
import Hasura.Prelude
|
||||
import Data.Aeson
|
||||
|
||||
import qualified System.Environment
|
||||
import qualified Data.Map as M
|
||||
|
||||
newtype Environment = Environment (M.Map String String) deriving (Eq, Show, Generic)
|
||||
|
||||
instance FromJSON Environment
|
||||
|
||||
getEnvironment :: IO Environment
|
||||
getEnvironment = mkEnvironment <$> System.Environment.getEnvironment
|
||||
|
||||
maybeEnvironment :: Maybe Environment -> Environment
|
||||
maybeEnvironment = fromMaybe emptyEnvironment
|
||||
|
||||
mkEnvironment :: [(String, String)] -> Environment
|
||||
mkEnvironment = Environment . M.fromList
|
||||
|
||||
emptyEnvironment :: Environment
|
||||
emptyEnvironment = Environment M.empty
|
||||
|
||||
lookupEnv :: Environment -> String -> Maybe String
|
||||
lookupEnv (Environment es) k = M.lookup k es
|
@ -13,12 +13,12 @@ where
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Environment as Env
|
||||
|
||||
import Data.Attoparsec.Combinator (lookAhead)
|
||||
import Data.Attoparsec.Text
|
||||
import Instances.TH.Lift ()
|
||||
import Language.Haskell.TH.Syntax (Lift)
|
||||
import System.Environment (lookupEnv)
|
||||
import Test.QuickCheck
|
||||
|
||||
newtype Variable = Variable {unVariable :: Text}
|
||||
@ -63,22 +63,22 @@ parseURLTemplate t = parseOnly parseTemplate t
|
||||
parseVariable =
|
||||
string "{{" *> (Variable . T.pack <$> manyTill anyChar (string "}}"))
|
||||
|
||||
renderURLTemplate :: MonadIO m => URLTemplate -> m (Either String Text)
|
||||
renderURLTemplate template = do
|
||||
eitherResults <- mapM renderTemplateItem $ unURLTemplate template
|
||||
let errorVariables = lefts eitherResults
|
||||
pure $ case errorVariables of
|
||||
renderURLTemplate :: Env.Environment -> URLTemplate -> Either String Text
|
||||
renderURLTemplate env template =
|
||||
case errorVariables of
|
||||
[] -> Right $ T.concat $ rights eitherResults
|
||||
_ -> Left $ T.unpack $ "Value for environment variables not found: "
|
||||
<> T.intercalate ", " errorVariables
|
||||
where
|
||||
eitherResults = map renderTemplateItem $ unURLTemplate template
|
||||
errorVariables = lefts eitherResults
|
||||
renderTemplateItem = \case
|
||||
TIText t -> pure $ Right t
|
||||
TIVariable (Variable var) -> do
|
||||
maybeEnvValue <- liftIO $ lookupEnv $ T.unpack var
|
||||
pure $ case maybeEnvValue of
|
||||
Nothing -> Left var
|
||||
Just value -> Right $ T.pack value
|
||||
TIText t -> Right t
|
||||
TIVariable (Variable var) ->
|
||||
let maybeEnvValue = Env.lookupEnv env $ T.unpack var
|
||||
in case maybeEnvValue of
|
||||
Nothing -> Left var
|
||||
Just value -> Right $ T.pack value
|
||||
|
||||
-- QuickCheck generators
|
||||
instance Arbitrary Variable where
|
||||
|
@ -2,10 +2,11 @@
|
||||
|
||||
module Hasura.App where
|
||||
|
||||
import Control.Concurrent.STM.TVar (TVar, readTVarIO)
|
||||
import Control.Concurrent.STM.TVar (readTVarIO, TVar)
|
||||
import Control.Exception (throwIO)
|
||||
import Control.Lens (view, _2)
|
||||
import Control.Monad.Base
|
||||
import Control.Monad.Catch (MonadCatch, MonadThrow, onException)
|
||||
import Control.Monad.Catch (MonadCatch, MonadMask, MonadThrow, onException, Exception)
|
||||
import Control.Monad.Stateless
|
||||
import Control.Monad.STM (atomically)
|
||||
import Control.Monad.Trans.Control (MonadBaseControl (..))
|
||||
@ -15,11 +16,9 @@ import Data.Time.Clock (UTCTime)
|
||||
import GHC.AssertNF
|
||||
import GHC.Stats
|
||||
import Options.Applicative
|
||||
import System.Environment (getEnvironment, lookupEnv)
|
||||
import System.Exit (exitFailure)
|
||||
import System.Environment (getEnvironment)
|
||||
import System.Mem (performMajorGC)
|
||||
|
||||
import qualified Control.Concurrent.Async as Async
|
||||
import qualified Control.Concurrent.Async.Lifted.Safe as LA
|
||||
import qualified Control.Concurrent.Extended as C
|
||||
import qualified Data.Aeson as A
|
||||
@ -27,6 +26,7 @@ import qualified Data.ByteString.Char8 as BC
|
||||
import qualified Data.ByteString.Lazy.Char8 as BLC
|
||||
import qualified Data.Set as Set
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.Time.Clock as Clock
|
||||
import qualified Data.Yaml as Y
|
||||
import qualified Database.PG.Query as Q
|
||||
@ -35,6 +35,7 @@ import qualified Network.HTTP.Client.TLS as HTTP
|
||||
import qualified Network.Wai.Handler.Warp as Warp
|
||||
import qualified System.Log.FastLogger as FL
|
||||
import qualified Text.Mustache.Compile as M
|
||||
import qualified Control.Immortal as Immortal
|
||||
|
||||
import Hasura.Db
|
||||
import Hasura.EncJSON
|
||||
@ -71,11 +72,35 @@ import Hasura.Session
|
||||
|
||||
import qualified Hasura.GraphQL.Transport.WebSocket.Server as WS
|
||||
|
||||
printErrExit :: (MonadIO m) => forall a . String -> m a
|
||||
printErrExit = liftIO . (>> exitFailure) . putStrLn
|
||||
data ExitCode
|
||||
= InvalidEnvironmentVariableOptionsError
|
||||
| InvalidDatabaseConnectionParamsError
|
||||
| MetadataCatalogFetchingError
|
||||
| AuthConfigurationError
|
||||
| EventSubSystemError
|
||||
| EventEnvironmentVariableError
|
||||
| MetadataExportError
|
||||
| MetadataCleanError
|
||||
| DatabaseMigrationError
|
||||
| ExecuteProcessError
|
||||
| DowngradeProcessError
|
||||
| UnexpectedHasuraError
|
||||
| ExitFailureError Int
|
||||
deriving Show
|
||||
|
||||
printErrJExit :: (A.ToJSON a, MonadIO m) => forall b . a -> m b
|
||||
printErrJExit = liftIO . (>> exitFailure) . printJSON
|
||||
data ExitException
|
||||
= ExitException
|
||||
{ eeCode :: !ExitCode
|
||||
, eeMessage :: !BC.ByteString
|
||||
} deriving (Show)
|
||||
|
||||
instance Exception ExitException
|
||||
|
||||
printErrExit :: (MonadIO m) => forall a . ExitCode -> String -> m a
|
||||
printErrExit reason = liftIO . throwIO . ExitException reason . BC.pack
|
||||
|
||||
printErrJExit :: (A.ToJSON a, MonadIO m) => forall b . ExitCode -> a -> m b
|
||||
printErrJExit reason = liftIO . throwIO . ExitException reason . BLC.toStrict . A.encode
|
||||
|
||||
parseHGECommand :: EnabledLogTypes impl => Parser (RawHGECommand impl)
|
||||
parseHGECommand =
|
||||
@ -101,7 +126,7 @@ parseArgs = do
|
||||
rawHGEOpts <- execParser opts
|
||||
env <- getEnvironment
|
||||
let eitherOpts = runWithEnv env $ mkHGEOptions rawHGEOpts
|
||||
either printErrExit return eitherOpts
|
||||
either (printErrExit InvalidEnvironmentVariableOptionsError) return eitherOpts
|
||||
where
|
||||
opts = info (helper <*> hgeOpts)
|
||||
( fullDesc <>
|
||||
@ -143,8 +168,7 @@ data Loggers
|
||||
}
|
||||
|
||||
newtype AppM a = AppM { unAppM :: IO a }
|
||||
deriving ( Functor, Applicative, Monad, MonadIO, MonadUnique, MonadBase IO
|
||||
, MonadBaseControl IO, MonadCatch, MonadThrow)
|
||||
deriving (Functor, Applicative, Monad, MonadIO, MonadBase IO, MonadBaseControl IO, MonadCatch, MonadThrow, MonadMask)
|
||||
|
||||
-- | this function initializes the catalog and returns an @InitCtx@, based on the command given
|
||||
-- - for serve command it creates a proper PG connection pool
|
||||
@ -153,10 +177,11 @@ newtype AppM a = AppM { unAppM :: IO a }
|
||||
-- used by other functions as well
|
||||
initialiseCtx
|
||||
:: (HasVersion, MonadIO m, MonadCatch m)
|
||||
=> HGECommand Hasura
|
||||
=> Env.Environment
|
||||
-> HGECommand Hasura
|
||||
-> RawConnInfo
|
||||
-> m (InitCtx, UTCTime)
|
||||
initialiseCtx hgeCmd rci = do
|
||||
initialiseCtx env hgeCmd rci = do
|
||||
initTime <- liftIO Clock.getCurrentTime
|
||||
-- global http manager
|
||||
httpManager <- liftIO $ HTTP.newManager HTTP.tlsManagerSettings
|
||||
@ -181,11 +206,11 @@ initialiseCtx hgeCmd rci = do
|
||||
pure (l, pool, SQLGenCtx False)
|
||||
|
||||
res <- flip onException (flushLogger (_lsLoggerCtx loggers)) $
|
||||
migrateCatalogSchema (_lsLogger loggers) pool httpManager sqlGenCtx
|
||||
migrateCatalogSchema env (_lsLogger loggers) pool httpManager sqlGenCtx
|
||||
pure (InitCtx httpManager instanceId loggers connInfo pool latch res, initTime)
|
||||
where
|
||||
procConnInfo =
|
||||
either (printErrExit . ("Fatal Error : " <>)) return $ mkConnInfo rci
|
||||
either (printErrExit InvalidDatabaseConnectionParamsError . ("Fatal Error : " <>)) return $ mkConnInfo rci
|
||||
|
||||
getMinimalPool pgLogger ci = do
|
||||
let connParams = Q.defaultConnParams { Q.cpConns = 1 }
|
||||
@ -200,14 +225,14 @@ initialiseCtx hgeCmd rci = do
|
||||
-- | helper function to initialize or migrate the @hdb_catalog@ schema (used by pro as well)
|
||||
migrateCatalogSchema
|
||||
:: (HasVersion, MonadIO m)
|
||||
=> Logger Hasura -> Q.PGPool -> HTTP.Manager -> SQLGenCtx
|
||||
=> Env.Environment -> Logger Hasura -> Q.PGPool -> HTTP.Manager -> SQLGenCtx
|
||||
-> m (RebuildableSchemaCache Run, Maybe UTCTime)
|
||||
migrateCatalogSchema logger pool httpManager sqlGenCtx = do
|
||||
migrateCatalogSchema env logger pool httpManager sqlGenCtx = do
|
||||
let pgExecCtx = mkPGExecCtx Q.Serializable pool
|
||||
adminRunCtx = RunCtx adminUserInfo httpManager sqlGenCtx
|
||||
currentTime <- liftIO Clock.getCurrentTime
|
||||
initialiseResult <- runExceptT $ peelRun adminRunCtx pgExecCtx Q.ReadWrite $
|
||||
(,) <$> migrateCatalog currentTime <*> liftTx fetchLastUpdate
|
||||
(,) <$> migrateCatalog env currentTime <*> liftTx fetchLastUpdate
|
||||
|
||||
((migrationResult, schemaCache), lastUpdateEvent) <-
|
||||
initialiseResult `onLeft` \err -> do
|
||||
@ -216,7 +241,7 @@ migrateCatalogSchema logger pool httpManager sqlGenCtx = do
|
||||
, slKind = "db_migrate"
|
||||
, slInfo = A.toJSON err
|
||||
}
|
||||
liftIO exitFailure
|
||||
liftIO (printErrExit DatabaseMigrationError (BLC.unpack $ A.encode err))
|
||||
unLogger logger migrationResult
|
||||
return (schemaCache, view _2 <$> lastUpdateEvent)
|
||||
|
||||
@ -224,7 +249,7 @@ migrateCatalogSchema logger pool httpManager sqlGenCtx = do
|
||||
runTxIO :: Q.PGPool -> Q.TxMode -> Q.TxE QErr a -> IO a
|
||||
runTxIO pool isoLevel tx = do
|
||||
eVal <- liftIO $ runExceptT $ Q.runTx pool isoLevel tx
|
||||
either printErrJExit return eVal
|
||||
either (printErrJExit DatabaseMigrationError) return eVal
|
||||
|
||||
-- | A latch for the graceful shutdown of a server process.
|
||||
newtype ShutdownLatch = ShutdownLatch { unShutdownLatch :: C.MVar () }
|
||||
@ -239,40 +264,45 @@ waitForShutdown = C.takeMVar . unShutdownLatch
|
||||
-- | Initiate a graceful shutdown of the server associated with the provided
|
||||
-- latch.
|
||||
shutdownGracefully :: InitCtx -> IO ()
|
||||
shutdownGracefully = flip C.putMVar () . unShutdownLatch . _icShutdownLatch
|
||||
shutdownGracefully = shutdownGracefully' . _icShutdownLatch
|
||||
|
||||
shutdownGracefully' :: ShutdownLatch -> IO ()
|
||||
shutdownGracefully' = flip C.putMVar () . unShutdownLatch
|
||||
|
||||
-- | If an exception is encountered , flush the log buffer and
|
||||
-- rethrow If we do not flush the log buffer on exception, then log lines
|
||||
-- may be missed
|
||||
-- See: https://github.com/hasura/graphql-engine/issues/4772
|
||||
flushLogger :: (MonadIO m) => LoggerCtx impl -> m ()
|
||||
flushLogger loggerCtx = liftIO $ FL.flushLogStr $ _lcLoggerSet loggerCtx
|
||||
flushLogger :: MonadIO m => LoggerCtx impl -> m ()
|
||||
flushLogger = liftIO . FL.flushLogStr . _lcLoggerSet
|
||||
|
||||
runHGEServer
|
||||
:: ( HasVersion
|
||||
, MonadIO m
|
||||
, MonadUnique m
|
||||
, MonadCatch m
|
||||
, MonadMask m
|
||||
, MonadStateless IO m
|
||||
, LA.Forall (LA.Pure m)
|
||||
, UserAuthentication m
|
||||
, MetadataApiAuthorization m
|
||||
, HttpLog m
|
||||
, MonadQueryLog m
|
||||
, ConsoleRenderer m
|
||||
, MetadataApiAuthorization m
|
||||
, MonadGQLExecutionCheck m
|
||||
, MonadConfigApiHandler m
|
||||
, MonadQueryLog m
|
||||
, WS.MonadWSLog m
|
||||
)
|
||||
=> ServeOptions impl
|
||||
=> Env.Environment
|
||||
-> ServeOptions impl
|
||||
-> InitCtx
|
||||
-> Maybe PGExecCtx
|
||||
-- ^ An optional specialized pg exection context for executing queries
|
||||
-- and mutations
|
||||
-> UTCTime
|
||||
-- ^ start time
|
||||
-> IO ()
|
||||
-- ^ shutdown function
|
||||
-> m ()
|
||||
runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
|
||||
runHGEServer env ServeOptions{..} InitCtx{..} pgExecCtx initTime shutdownApp = do
|
||||
-- Comment this to enable expensive assertions from "GHC.AssertNF". These
|
||||
-- will log lines to STDOUT containing "not in normal form". In the future we
|
||||
-- could try to integrate this into our tests. For now this is a development
|
||||
@ -287,13 +317,14 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
|
||||
authModeRes <- runExceptT $ setupAuthMode soAdminSecret soAuthHook soJwtSecret soUnAuthRole
|
||||
_icHttpManager logger
|
||||
|
||||
authMode <- either (printErrExit . T.unpack) return authModeRes
|
||||
authMode <- either (printErrExit AuthConfigurationError . T.unpack) return authModeRes
|
||||
|
||||
_idleGCThread <- C.forkImmortal "ourIdleGC" logger $ liftIO $
|
||||
ourIdleGC logger (seconds 0.3) (seconds 10) (seconds 60)
|
||||
|
||||
HasuraApp app cacheRef cacheInitTime shutdownApp <- flip onException (flushLogger loggerCtx) $
|
||||
mkWaiApp soTxIso
|
||||
HasuraApp app cacheRef cacheInitTime stopWsServer <- flip onException (flushLogger loggerCtx) $
|
||||
mkWaiApp env
|
||||
soTxIso
|
||||
logger
|
||||
sqlGenCtx
|
||||
soEnableAllowlist
|
||||
@ -318,14 +349,14 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
|
||||
liftIO $ logInconsObjs logger inconsObjs
|
||||
|
||||
-- start background threads for schema sync
|
||||
(_schemaSyncListenerThread, _schemaSyncProcessorThread) <-
|
||||
(schemaSyncListenerThread, schemaSyncProcessorThread) <-
|
||||
startSchemaSyncThreads sqlGenCtx _icPgPool logger _icHttpManager
|
||||
cacheRef _icInstanceId cacheInitTime
|
||||
|
||||
maxEvThrds <- liftIO $ getFromEnv defaultMaxEventThreads "HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE"
|
||||
fetchI <- fmap milliseconds $ liftIO $
|
||||
getFromEnv (Milliseconds defaultFetchInterval) "HASURA_GRAPHQL_EVENTS_FETCH_INTERVAL"
|
||||
logEnvHeaders <- liftIO $ getFromEnv False "LOG_HEADERS_FROM_ENV"
|
||||
let
|
||||
maxEvThrds = fromMaybe defaultMaxEventThreads soEventsHttpPoolSize
|
||||
fetchI = milliseconds $ fromMaybe (Milliseconds defaultFetchInterval) soEventsFetchInterval
|
||||
logEnvHeaders = soLogHeadersFromEnv
|
||||
|
||||
lockedEventsCtx <- liftIO $ atomically $ initLockedEventsCtx
|
||||
|
||||
@ -333,13 +364,14 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
|
||||
prepareEvents _icPgPool logger
|
||||
eventEngineCtx <- liftIO $ atomically $ initEventEngineCtx maxEvThrds fetchI
|
||||
unLogger logger $ mkGenericStrLog LevelInfo "event_triggers" "starting workers"
|
||||
_eventQueueThread <- C.forkImmortal "processEventQueue" logger $ liftIO $
|
||||
|
||||
_eventQueueThread <- C.forkImmortal "processEventQueue" logger $
|
||||
processEventQueue logger logEnvHeaders
|
||||
_icHttpManager _icPgPool (getSCFromRef cacheRef) eventEngineCtx lockedEventsCtx
|
||||
|
||||
-- start a backgroud thread to handle async actions
|
||||
_asyncActionsThread <- C.forkImmortal "asyncActionsProcessor" logger $ liftIO $
|
||||
asyncActionsProcessor (_scrCache cacheRef) _icPgPool _icHttpManager
|
||||
asyncActionsThread <- C.forkImmortal "asyncActionsProcessor" logger $
|
||||
asyncActionsProcessor env (_scrCache cacheRef) _icPgPool _icHttpManager
|
||||
|
||||
-- start a background thread to create new cron events
|
||||
void $ liftIO $ C.forkImmortal "runCronEventsGenerator" logger $
|
||||
@ -349,20 +381,29 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
|
||||
prepareScheduledEvents _icPgPool logger
|
||||
|
||||
-- start a background thread to deliver the scheduled events
|
||||
void $ liftIO $ C.forkImmortal "processScheduledTriggers" logger $ processScheduledTriggers logger logEnvHeaders _icHttpManager _icPgPool (getSCFromRef cacheRef) lockedEventsCtx
|
||||
void $ C.forkImmortal "processScheduledTriggers" logger $
|
||||
processScheduledTriggers env logger logEnvHeaders _icHttpManager _icPgPool (getSCFromRef cacheRef) lockedEventsCtx
|
||||
|
||||
-- start a background thread to check for updates
|
||||
_updateThread <- C.forkImmortal "checkForUpdates" logger $ liftIO $
|
||||
updateThread <- C.forkImmortal "checkForUpdates" logger $ liftIO $
|
||||
checkForUpdates loggerCtx _icHttpManager
|
||||
|
||||
-- startTelemetry logger serveOpts cacheRef initCtx
|
||||
-- start a background thread for telemetry
|
||||
when soEnableTelemetry $ do
|
||||
unLogger logger $ mkGenericStrLog LevelInfo "telemetry" telemetryNotice
|
||||
|
||||
(dbId, pgVersion) <- liftIO $ runTxIO _icPgPool (Q.ReadCommitted, Nothing) $
|
||||
(,) <$> getDbId <*> getPgVersion
|
||||
|
||||
void $ C.forkImmortal "runTelemetry" logger $ liftIO $
|
||||
runTelemetry logger _icHttpManager (getSCFromRef cacheRef) dbId _icInstanceId pgVersion
|
||||
|
||||
|
||||
|
||||
-- events has its own shutdown mechanism, used in 'shutdownHandler'
|
||||
let immortalThreads = [schemaSyncListenerThread, schemaSyncProcessorThread, updateThread, asyncActionsThread]
|
||||
|
||||
finishTime <- liftIO Clock.getCurrentTime
|
||||
let apiInitTime = realToFrac $ Clock.diffUTCTime finishTime initTime
|
||||
unLogger logger $
|
||||
@ -370,7 +411,7 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
|
||||
let warpSettings = Warp.setPort soPort
|
||||
. Warp.setHost soHost
|
||||
. Warp.setGracefulShutdownTimeout (Just 30) -- 30s graceful shutdown
|
||||
. Warp.setInstallShutdownHandler (shutdownHandler _icLoggers shutdownApp lockedEventsCtx _icPgPool)
|
||||
. Warp.setInstallShutdownHandler (shutdownHandler _icLoggers immortalThreads stopWsServer lockedEventsCtx _icPgPool)
|
||||
$ Warp.defaultSettings
|
||||
liftIO $ Warp.runSettings warpSettings app
|
||||
|
||||
@ -389,13 +430,13 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
|
||||
prepareEvents pool (Logger logger) = do
|
||||
liftIO $ logger $ mkGenericStrLog LevelInfo "event_triggers" "preparing data"
|
||||
res <- liftIO $ runTx pool (Q.ReadCommitted, Nothing) unlockAllEvents
|
||||
either printErrJExit return res
|
||||
either (printErrJExit EventSubSystemError) return res
|
||||
|
||||
-- | prepareScheduledEvents is like prepareEvents, but for scheduled triggers
|
||||
prepareScheduledEvents pool (Logger logger) = do
|
||||
liftIO $ logger $ mkGenericStrLog LevelInfo "scheduled_triggers" "preparing data"
|
||||
res <- liftIO $ runTx pool (Q.ReadCommitted, Nothing) unlockAllLockedScheduledEvents
|
||||
either printErrJExit return res
|
||||
either (printErrJExit EventSubSystemError) return res
|
||||
|
||||
-- | shutdownEvents will be triggered when a graceful shutdown has been inititiated, it will
|
||||
-- get the locked events from the event engine context and the scheduled event engine context
|
||||
@ -433,15 +474,6 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
|
||||
Right count -> logger $ mkGenericStrLog LevelInfo triggerType $
|
||||
show count ++ " " ++ T.unpack eventType ++ " events successfully unlocked"
|
||||
|
||||
getFromEnv :: (Read a) => a -> String -> IO a
|
||||
getFromEnv defaults env = do
|
||||
mEnv <- lookupEnv env
|
||||
let mRes = case mEnv of
|
||||
Nothing -> Just defaults
|
||||
Just val -> readMaybe val
|
||||
eRes = maybe (Left $ "Wrong expected type for environment variable: " <> env) Right mRes
|
||||
either printErrExit return eRes
|
||||
|
||||
runTx :: Q.PGPool -> Q.TxMode -> Q.TxE QErr a -> IO (Either QErr a)
|
||||
runTx pool txLevel tx =
|
||||
liftIO $ runExceptT $ Q.runTx pool txLevel tx
|
||||
@ -452,17 +484,26 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
|
||||
-- we want to control shutdown.
|
||||
shutdownHandler
|
||||
:: Loggers
|
||||
-> [Immortal.Thread]
|
||||
-> IO ()
|
||||
-- ^ the stop websocket server function
|
||||
-> LockedEventsCtx
|
||||
-> Q.PGPool
|
||||
-> IO ()
|
||||
-- ^ the closeSocket callback
|
||||
-> IO ()
|
||||
shutdownHandler (Loggers loggerCtx (Logger logger) _) shutdownApp leCtx pool closeSocket =
|
||||
void . Async.async $ do
|
||||
shutdownHandler (Loggers loggerCtx (Logger logger) _) immortalThreads stopWsServer leCtx pool closeSocket =
|
||||
LA.link =<< LA.async do
|
||||
waitForShutdown _icShutdownLatch
|
||||
logger $ mkGenericStrLog LevelInfo "server" "gracefully shutting down server"
|
||||
shutdownEvents pool (Logger logger) leCtx
|
||||
closeSocket
|
||||
stopWsServer
|
||||
-- kill all the background immortal threads
|
||||
logger $ mkGenericStrLog LevelInfo "server" "killing all background immortal threads"
|
||||
forM_ immortalThreads $ \thread -> do
|
||||
logger $ mkGenericStrLog LevelInfo "server" $ "killing thread: " <> show (Immortal.threadId thread)
|
||||
Immortal.stop thread
|
||||
shutdownApp
|
||||
cleanLoggerCtx loggerCtx
|
||||
|
||||
@ -543,15 +584,15 @@ execQuery
|
||||
, UserInfoM m
|
||||
, HasSystemDefined m
|
||||
)
|
||||
=> BLC.ByteString
|
||||
=> Env.Environment
|
||||
-> BLC.ByteString
|
||||
-> m BLC.ByteString
|
||||
execQuery queryBs = do
|
||||
execQuery env queryBs = do
|
||||
query <- case A.decode queryBs of
|
||||
Just jVal -> decodeValue jVal
|
||||
Nothing -> throw400 InvalidJSON "invalid json"
|
||||
buildSchemaCacheStrict
|
||||
encJToLBS <$> runQueryM query
|
||||
|
||||
encJToLBS <$> runQueryM env query
|
||||
|
||||
instance HttpLog AppM where
|
||||
logHttpError logger userInfoM reqId httpReq req qErr headers =
|
||||
@ -614,7 +655,6 @@ mkConsoleHTML path authMode enableTelemetry consoleAssetsDir =
|
||||
|
||||
consoleTmplt = $(M.embedSingleTemplate "src-rsr/console.html")
|
||||
|
||||
|
||||
telemetryNotice :: String
|
||||
telemetryNotice =
|
||||
"Help us improve Hasura! The graphql-engine server collects anonymized "
|
||||
|
@ -18,6 +18,7 @@ module Hasura.Db
|
||||
, LazyRespTx
|
||||
, defaultTxErrorHandler
|
||||
, mkTxErrorHandler
|
||||
, lazyTxToQTx
|
||||
) where
|
||||
|
||||
import Control.Lens
|
||||
@ -134,7 +135,7 @@ type RespTx = Q.TxE QErr EncJSON
|
||||
type LazyRespTx = LazyTx QErr EncJSON
|
||||
|
||||
setHeadersTx :: SessionVariables -> Q.TxE QErr ()
|
||||
setHeadersTx session =
|
||||
setHeadersTx session = do
|
||||
Q.unitQE defaultTxErrorHandler setSess () False
|
||||
where
|
||||
setSess = Q.fromText $
|
||||
@ -182,7 +183,9 @@ withUserInfo :: UserInfo -> LazyTx QErr a -> LazyTx QErr a
|
||||
withUserInfo uInfo = \case
|
||||
LTErr e -> LTErr e
|
||||
LTNoTx a -> LTNoTx a
|
||||
LTTx tx -> LTTx $ setHeadersTx (_uiSession uInfo) >> tx
|
||||
LTTx tx ->
|
||||
let vars = _uiSession uInfo
|
||||
in LTTx $ setHeadersTx vars >> tx
|
||||
|
||||
instance Functor (LazyTx e) where
|
||||
fmap f = \case
|
||||
|
@ -1,18 +1,18 @@
|
||||
module Hasura.Eventing.Common where
|
||||
|
||||
import Hasura.Prelude
|
||||
import Control.Concurrent.STM.TVar
|
||||
import Control.Monad.STM
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.Types.EventTrigger (EventId)
|
||||
import Hasura.RQL.Types.ScheduledTrigger (CronEventId,StandAloneScheduledEventId)
|
||||
import Hasura.RQL.Types.ScheduledTrigger (CronEventId, StandAloneScheduledEventId)
|
||||
|
||||
import qualified Data.Set as Set
|
||||
import qualified Data.Set as Set
|
||||
|
||||
data LockedEventsCtx
|
||||
= LockedEventsCtx
|
||||
{ leCronEvents :: TVar (Set.Set CronEventId)
|
||||
{ leCronEvents :: TVar (Set.Set CronEventId)
|
||||
, leStandAloneEvents :: TVar (Set.Set StandAloneScheduledEventId)
|
||||
, leEvents :: TVar (Set.Set EventId)
|
||||
, leEvents :: TVar (Set.Set EventId)
|
||||
}
|
||||
|
||||
initLockedEventsCtx :: STM LockedEventsCtx
|
||||
@ -25,16 +25,16 @@ initLockedEventsCtx = do
|
||||
-- | After the events are fetched from the DB, we store the locked events
|
||||
-- in a hash set(order doesn't matter and look ups are faster) in the
|
||||
-- event engine context
|
||||
saveLockedEvents :: [Text] -> TVar (Set.Set Text) -> IO ()
|
||||
saveLockedEvents :: (MonadIO m) => [Text] -> TVar (Set.Set Text) -> m ()
|
||||
saveLockedEvents eventIds lockedEvents =
|
||||
atomically $ do
|
||||
liftIO $ atomically $ do
|
||||
lockedEventsVals <- readTVar lockedEvents
|
||||
writeTVar lockedEvents $!
|
||||
Set.union lockedEventsVals $ Set.fromList eventIds
|
||||
|
||||
-- | Remove an event from the 'LockedEventsCtx' after it has been processed
|
||||
removeEventFromLockedEvents :: Text -> TVar (Set.Set Text) -> IO ()
|
||||
removeEventFromLockedEvents :: MonadIO m => Text -> TVar (Set.Set Text) -> m ()
|
||||
removeEventFromLockedEvents eventId lockedEvents =
|
||||
atomically $ do
|
||||
liftIO $ atomically $ do
|
||||
lockedEventsVals <- readTVar lockedEvents
|
||||
writeTVar lockedEvents $! Set.delete eventId lockedEventsVals
|
||||
|
@ -40,39 +40,38 @@ module Hasura.Eventing.EventTrigger
|
||||
, EventEngineCtx(..)
|
||||
) where
|
||||
|
||||
|
||||
import Control.Concurrent.Async (async, link, wait, withAsync)
|
||||
import Control.Concurrent.Extended (sleep)
|
||||
import Control.Concurrent.Extended (sleep)
|
||||
import Control.Concurrent.STM.TVar
|
||||
import Control.Monad.Catch (MonadMask, bracket_)
|
||||
import Control.Monad.Catch (MonadMask, bracket_)
|
||||
import Control.Monad.STM
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import Data.Has
|
||||
import Data.Int (Int64)
|
||||
import Data.Int (Int64)
|
||||
import Data.String
|
||||
import Data.Time.Clock
|
||||
import Data.Word
|
||||
import Hasura.Eventing.HTTP
|
||||
import Hasura.Eventing.Common
|
||||
|
||||
import Hasura.Eventing.HTTP
|
||||
import Hasura.HTTP
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.DDL.Headers
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.SQL.Types
|
||||
|
||||
import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.TByteString as TBS
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Time.Clock as Time
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Database.PG.Query.PTI as PTI
|
||||
import qualified PostgreSQL.Binary.Encoding as PE
|
||||
import qualified Control.Concurrent.Async.Lifted.Safe as LA
|
||||
import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.TByteString as TBS
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Time.Clock as Time
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Database.PG.Query.PTI as PTI
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified PostgreSQL.Binary.Encoding as PE
|
||||
|
||||
data TriggerMetadata
|
||||
= TriggerMetadata { tmName :: TriggerName }
|
||||
@ -159,19 +158,31 @@ initEventEngineCtx maxT _eeCtxFetchInterval = do
|
||||
-- - try not to cause webhook workers to stall waiting on DB fetch
|
||||
-- - limit webhook HTTP concurrency per HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE
|
||||
processEventQueue
|
||||
:: (HasVersion) => L.Logger L.Hasura -> LogEnvHeaders -> HTTP.Manager-> Q.PGPool
|
||||
-> IO SchemaCache -> EventEngineCtx -> LockedEventsCtx
|
||||
-> IO void
|
||||
processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx{..} LockedEventsCtx {leEvents}= do
|
||||
:: forall m void
|
||||
. ( HasVersion
|
||||
, MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
, LA.Forall (LA.Pure m)
|
||||
, MonadMask m
|
||||
)
|
||||
=> L.Logger L.Hasura
|
||||
-> LogEnvHeaders
|
||||
-> HTTP.Manager
|
||||
-> Q.PGPool
|
||||
-> IO SchemaCache
|
||||
-> EventEngineCtx
|
||||
-> LockedEventsCtx
|
||||
-> m void
|
||||
processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx{..} LockedEventsCtx{leEvents} = do
|
||||
events0 <- popEventsBatch
|
||||
go events0 0 False
|
||||
where
|
||||
fetchBatchSize = 100
|
||||
popEventsBatch = do
|
||||
let run = runExceptT . Q.runTx pool (Q.RepeatableRead, Just Q.ReadWrite)
|
||||
let run = liftIO . runExceptT . Q.runTx pool (Q.RepeatableRead, Just Q.ReadWrite)
|
||||
run (fetchEvents fetchBatchSize) >>= \case
|
||||
Left err -> do
|
||||
L.unLogger logger $ EventInternalErr err
|
||||
liftIO $ L.unLogger logger $ EventInternalErr err
|
||||
return []
|
||||
Right events -> do
|
||||
saveLockedEvents (map eId events) leEvents
|
||||
@ -179,25 +190,26 @@ processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx
|
||||
|
||||
-- work on this batch of events while prefetching the next. Recurse after we've forked workers
|
||||
-- for each in the batch, minding the requested pool size.
|
||||
go :: [Event] -> Int -> Bool -> IO void
|
||||
go :: [Event] -> Int -> Bool -> m void
|
||||
go events !fullFetchCount !alreadyWarned = do
|
||||
-- process events ASAP until we've caught up; only then can we sleep
|
||||
when (null events) $ sleep _eeCtxFetchInterval
|
||||
when (null events) . liftIO $ sleep _eeCtxFetchInterval
|
||||
|
||||
-- Prefetch next events payload while concurrently working through our current batch.
|
||||
-- NOTE: we probably don't need to prefetch so early, but probably not
|
||||
-- worth the effort for something more fine-tuned
|
||||
eventsNext <- withAsync popEventsBatch $ \eventsNextA -> do
|
||||
eventsNext <- LA.withAsync popEventsBatch $ \eventsNextA -> do
|
||||
-- process approximately in order, minding HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE:
|
||||
forM_ events $ \event -> do
|
||||
t <- async $ runReaderT (withEventEngineCtx eeCtx $ (processEvent event)) (logger, httpMgr)
|
||||
-- removing an event from the _eeCtxLockedEvents after the event has
|
||||
-- been processed
|
||||
removeEventFromLockedEvents (eId event) leEvents
|
||||
link t
|
||||
|
||||
-- return when next batch ready; some 'processEvent' threads may be running.
|
||||
wait eventsNextA
|
||||
t <- processEvent event
|
||||
& withEventEngineCtx eeCtx
|
||||
& flip runReaderT (logger, httpMgr)
|
||||
& LA.async
|
||||
-- removing an event from the _eeCtxLockedEvents after the event has
|
||||
-- been processed
|
||||
removeEventFromLockedEvents (eId event) leEvents
|
||||
LA.link t
|
||||
LA.wait eventsNextA
|
||||
|
||||
let lenEvents = length events
|
||||
if | lenEvents == fetchBatchSize -> do
|
||||
@ -220,13 +232,14 @@ processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx
|
||||
go eventsNext 0 False
|
||||
|
||||
processEvent
|
||||
:: ( HasVersion
|
||||
, MonadReader r m
|
||||
:: forall io r
|
||||
. ( HasVersion
|
||||
, MonadIO io
|
||||
, MonadReader r io
|
||||
, Has HTTP.Manager r
|
||||
, Has (L.Logger L.Hasura) r
|
||||
, MonadIO m
|
||||
)
|
||||
=> Event -> m ()
|
||||
=> Event -> io ()
|
||||
processEvent e = do
|
||||
cache <- liftIO getSchemaCache
|
||||
let meti = getEventTriggerInfoFromEvent cache e
|
||||
|
@ -73,40 +73,40 @@ module Hasura.Eventing.ScheduledTrigger
|
||||
, unlockAllLockedScheduledEvents
|
||||
) where
|
||||
|
||||
import Control.Arrow.Extended (dup)
|
||||
import Control.Concurrent.Extended (sleep)
|
||||
import Control.Arrow.Extended (dup)
|
||||
import Control.Concurrent.Extended (sleep)
|
||||
import Control.Concurrent.STM.TVar
|
||||
import Data.Has
|
||||
import Data.Int (Int64)
|
||||
import Data.List (unfoldr)
|
||||
import Data.Int (Int64)
|
||||
import Data.List (unfoldr)
|
||||
import Data.Time.Clock
|
||||
import Hasura.Eventing.Common
|
||||
import Hasura.Eventing.HTTP
|
||||
import Hasura.HTTP
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.DDL.EventTrigger (getHeaderInfosFromConf)
|
||||
import Hasura.RQL.DDL.Headers
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.RQL.DDL.EventTrigger (getHeaderInfosFromConf)
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.SQL.DML
|
||||
import Hasura.SQL.Types
|
||||
import Hasura.Eventing.Common
|
||||
|
||||
import System.Cron
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Aeson.Casing as J
|
||||
import qualified Data.Aeson.TH as J
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.TByteString as TBS
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Text.Builder as TB (run)
|
||||
import qualified PostgreSQL.Binary.Decoding as PD
|
||||
import qualified Data.Set as Set
|
||||
import qualified Database.PG.Query.PTI as PTI
|
||||
import qualified PostgreSQL.Binary.Encoding as PE
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Aeson.Casing as J
|
||||
import qualified Data.Aeson.TH as J
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.Set as Set
|
||||
import qualified Data.TByteString as TBS
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Database.PG.Query.PTI as PTI
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified PostgreSQL.Binary.Decoding as PD
|
||||
import qualified PostgreSQL.Binary.Encoding as PE
|
||||
import qualified Text.Builder as TB (run)
|
||||
|
||||
|
||||
newtype ScheduledTriggerInternalErr
|
||||
@ -133,10 +133,10 @@ data ScheduledEventStatus
|
||||
|
||||
scheduledEventStatusToText :: ScheduledEventStatus -> Text
|
||||
scheduledEventStatusToText SESScheduled = "scheduled"
|
||||
scheduledEventStatusToText SESLocked = "locked"
|
||||
scheduledEventStatusToText SESLocked = "locked"
|
||||
scheduledEventStatusToText SESDelivered = "delivered"
|
||||
scheduledEventStatusToText SESError = "error"
|
||||
scheduledEventStatusToText SESDead = "dead"
|
||||
scheduledEventStatusToText SESError = "error"
|
||||
scheduledEventStatusToText SESDead = "dead"
|
||||
|
||||
instance Q.ToPrepArg ScheduledEventStatus where
|
||||
toPrepVal = Q.toPrepVal . scheduledEventStatusToText
|
||||
@ -338,19 +338,19 @@ generateScheduleTimes from n cron = take n $ go from
|
||||
go = unfoldr (fmap dup . nextMatch cron)
|
||||
|
||||
processCronEvents
|
||||
:: HasVersion
|
||||
:: (HasVersion, MonadIO m)
|
||||
=> L.Logger L.Hasura
|
||||
-> LogEnvHeaders
|
||||
-> HTTP.Manager
|
||||
-> Q.PGPool
|
||||
-> IO SchemaCache
|
||||
-> TVar (Set.Set CronEventId)
|
||||
-> IO ()
|
||||
-> m ()
|
||||
processCronEvents logger logEnv httpMgr pgpool getSC lockedCronEvents = do
|
||||
cronTriggersInfo <- scCronTriggers <$> getSC
|
||||
cronTriggersInfo <- scCronTriggers <$> liftIO getSC
|
||||
cronScheduledEvents <-
|
||||
runExceptT $
|
||||
Q.runTx pgpool (Q.ReadCommitted, Just Q.ReadWrite) getPartialCronEvents
|
||||
liftIO . runExceptT $
|
||||
Q.runTx pgpool (Q.ReadCommitted, Just Q.ReadWrite) getPartialCronEvents
|
||||
case cronScheduledEvents of
|
||||
Right partialEvents -> do
|
||||
-- save the locked standalone events that have been fetched from the
|
||||
@ -380,19 +380,20 @@ processCronEvents logger logEnv httpMgr pgpool getSC lockedCronEvents = do
|
||||
either logInternalError pure finally
|
||||
Left err -> logInternalError err
|
||||
where
|
||||
logInternalError err = L.unLogger logger $ ScheduledTriggerInternalErr err
|
||||
logInternalError err = liftIO . L.unLogger logger $ ScheduledTriggerInternalErr err
|
||||
|
||||
processStandAloneEvents
|
||||
:: HasVersion
|
||||
=> L.Logger L.Hasura
|
||||
:: (HasVersion, MonadIO m)
|
||||
=> Env.Environment
|
||||
-> L.Logger L.Hasura
|
||||
-> LogEnvHeaders
|
||||
-> HTTP.Manager
|
||||
-> Q.PGPool
|
||||
-> TVar (Set.Set StandAloneScheduledEventId)
|
||||
-> IO ()
|
||||
processStandAloneEvents logger logEnv httpMgr pgpool lockedStandAloneEvents = do
|
||||
-> m ()
|
||||
processStandAloneEvents env logger logEnv httpMgr pgpool lockedStandAloneEvents = do
|
||||
standAloneScheduledEvents <-
|
||||
runExceptT $
|
||||
liftIO . runExceptT $
|
||||
Q.runTx pgpool (Q.ReadCommitted, Just Q.ReadWrite) getOneOffScheduledEvents
|
||||
case standAloneScheduledEvents of
|
||||
Right standAloneScheduledEvents' -> do
|
||||
@ -410,8 +411,8 @@ processStandAloneEvents logger logEnv httpMgr pgpool lockedStandAloneEvents = do
|
||||
headerConf
|
||||
comment )
|
||||
-> do
|
||||
webhookInfo <- runExceptT $ resolveWebhook webhookConf
|
||||
headerInfo <- runExceptT $ getHeaderInfosFromConf headerConf
|
||||
webhookInfo <- liftIO . runExceptT $ resolveWebhook env webhookConf
|
||||
headerInfo <- liftIO . runExceptT $ getHeaderInfosFromConf env headerConf
|
||||
|
||||
case webhookInfo of
|
||||
Right webhookInfo' -> do
|
||||
@ -440,22 +441,23 @@ processStandAloneEvents logger logEnv httpMgr pgpool lockedStandAloneEvents = do
|
||||
|
||||
Left standAloneScheduledEventsErr -> logInternalError standAloneScheduledEventsErr
|
||||
where
|
||||
logInternalError err = L.unLogger logger $ ScheduledTriggerInternalErr err
|
||||
logInternalError err = liftIO . L.unLogger logger $ ScheduledTriggerInternalErr err
|
||||
|
||||
processScheduledTriggers
|
||||
:: HasVersion
|
||||
=> L.Logger L.Hasura
|
||||
:: (HasVersion, MonadIO m)
|
||||
=> Env.Environment
|
||||
-> L.Logger L.Hasura
|
||||
-> LogEnvHeaders
|
||||
-> HTTP.Manager
|
||||
-> Q.PGPool
|
||||
-> IO SchemaCache
|
||||
-> LockedEventsCtx
|
||||
-> IO void
|
||||
processScheduledTriggers logger logEnv httpMgr pgpool getSC LockedEventsCtx {..} =
|
||||
-> m void
|
||||
processScheduledTriggers env logger logEnv httpMgr pgpool getSC LockedEventsCtx {..} =
|
||||
forever $ do
|
||||
processCronEvents logger logEnv httpMgr pgpool getSC leCronEvents
|
||||
processStandAloneEvents logger logEnv httpMgr pgpool leStandAloneEvents
|
||||
sleep (minutes 1)
|
||||
processStandAloneEvents env logger logEnv httpMgr pgpool leStandAloneEvents
|
||||
liftIO $ sleep (minutes 1)
|
||||
|
||||
processScheduledEvent ::
|
||||
( MonadReader r m
|
||||
|
@ -14,7 +14,7 @@ module Hasura.GraphQL.Execute
|
||||
, EP.initPlanCache
|
||||
, EP.clearPlanCache
|
||||
, EP.dumpPlanCache
|
||||
|
||||
, EQ.PreparedSql(..)
|
||||
, ExecutionCtx(..)
|
||||
|
||||
, MonadGQLExecutionCheck(..)
|
||||
@ -27,6 +27,7 @@ import Data.Text.Conversions
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.CaseInsensitive as CI
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
|
||||
import qualified Data.Text as T
|
||||
@ -120,13 +121,6 @@ getExecPlanPartial userInfo sc queryType req = do
|
||||
where
|
||||
roleName = _uiRole userInfo
|
||||
|
||||
-- checkQueryInAllowlist =
|
||||
-- -- only for non-admin roles
|
||||
-- when (roleName /= adminRoleName) $ do
|
||||
-- let notInAllowlist =
|
||||
-- not $ _isQueryInAllowlist (_grQuery req) (scAllowlist sc)
|
||||
-- when notInAllowlist $ modifyQErr modErr $ throw400 ValidationFailed "query is not allowed"
|
||||
|
||||
contextMap =
|
||||
case queryType of
|
||||
ET.QueryHasura -> scGQLContext sc
|
||||
@ -172,10 +166,10 @@ getExecPlanPartial userInfo sc queryType req = do
|
||||
"in the document when operationName is not specified"
|
||||
|
||||
-- The graphql query is resolved into a sequence of execution operations
|
||||
data ResolvedExecutionPlan
|
||||
= QueryExecutionPlan (EPr.ExecutionPlan (LazyRespTx, EQ.GeneratedSqlMap) EPr.RemoteCall (G.Name, J.Value))
|
||||
data ResolvedExecutionPlan m
|
||||
= QueryExecutionPlan (EPr.ExecutionPlan (m EncJSON, EQ.GeneratedSqlMap) EPr.RemoteCall (G.Name, J.Value))
|
||||
-- ^ query execution; remote schemas and introspection possible
|
||||
| MutationExecutionPlan (EPr.ExecutionPlan (LazyRespTx, HTTP.ResponseHeaders) EPr.RemoteCall (G.Name, J.Value))
|
||||
| MutationExecutionPlan (EPr.ExecutionPlan (m EncJSON, HTTP.ResponseHeaders) EPr.RemoteCall (G.Name, J.Value))
|
||||
-- ^ mutation execution; only __typename introspection supported
|
||||
| SubscriptionExecutionPlan (EPr.ExecutionPlan EL.LiveQueryPlan Void Void)
|
||||
-- ^ live query execution; remote schemas and introspection not supported
|
||||
@ -212,8 +206,15 @@ checkQueryInAllowlist enableAL userInfo req sc =
|
||||
unGQLExecDoc q
|
||||
|
||||
getResolvedExecPlan
|
||||
:: forall m . (HasVersion, MonadError QErr m, MonadIO m)
|
||||
=> PGExecCtx
|
||||
:: forall m tx
|
||||
. ( HasVersion
|
||||
, MonadError QErr m
|
||||
, MonadIO m
|
||||
, MonadIO tx
|
||||
, MonadTx tx
|
||||
)
|
||||
=> Env.Environment
|
||||
-> PGExecCtx
|
||||
-> EP.PlanCache
|
||||
-> UserInfo
|
||||
-> SQLGenCtx
|
||||
@ -223,8 +224,8 @@ getResolvedExecPlan
|
||||
-> HTTP.Manager
|
||||
-> [HTTP.Header]
|
||||
-> (GQLReqUnparsed, GQLReqParsed)
|
||||
-> m (Telem.CacheHit, ResolvedExecutionPlan)
|
||||
getResolvedExecPlan pgExecCtx planCache userInfo sqlGenCtx
|
||||
-> m (Telem.CacheHit,ResolvedExecutionPlan tx)
|
||||
getResolvedExecPlan env pgExecCtx planCache userInfo sqlGenCtx
|
||||
sc scVer queryType httpManager reqHeaders (reqUnparsed, reqParsed) = do
|
||||
|
||||
planM <- liftIO $ EP.getPlan scVer (_uiRole userInfo) opNameM queryStr
|
||||
@ -234,7 +235,7 @@ getResolvedExecPlan pgExecCtx planCache userInfo sqlGenCtx
|
||||
-- plans are only for queries and subscriptions
|
||||
Just plan -> (Telem.Hit,) <$> case plan of
|
||||
EP.RPQuery queryPlan -> do
|
||||
(tx, genSql) <- EQ.queryOpFromPlan httpManager reqHeaders userInfo queryVars queryPlan
|
||||
-- (tx, genSql) <- EQ.queryOpFromPlan env httpManager reqHeaders userInfo queryVars queryPlan
|
||||
return $ QueryExecutionPlan _ -- tx (Just genSql)
|
||||
EP.RPSubs subsPlan ->
|
||||
return $ SubscriptionExecutionPlan _ -- <$> EL.reuseLiveQueryPlan pgExecCtx usrVars queryVars subsPlan
|
||||
@ -244,7 +245,7 @@ getResolvedExecPlan pgExecCtx planCache userInfo sqlGenCtx
|
||||
-- addPlanToCache plan =
|
||||
-- liftIO $ EP.addPlan scVer (userRole userInfo)
|
||||
-- opNameM queryStr plan planCache
|
||||
noExistingPlan :: m ResolvedExecutionPlan
|
||||
noExistingPlan :: m (ResolvedExecutionPlan tx)
|
||||
noExistingPlan = do
|
||||
-- GraphQL requests may incorporate fragments which insert a pre-defined
|
||||
-- part of a GraphQL query. Here we make sure to remember those
|
||||
@ -260,13 +261,13 @@ getResolvedExecPlan pgExecCtx planCache userInfo sqlGenCtx
|
||||
-- (Here the above fragment inlining is actually executed.)
|
||||
inlinedSelSet <- EI.inlineSelectionSet fragments selSet
|
||||
(execPlan, plan, _unprepared) <-
|
||||
EQ.convertQuerySelSet gCtx userInfo httpManager reqHeaders inlinedSelSet varDefs (_grVariables reqUnparsed)
|
||||
EQ.convertQuerySelSet env gCtx userInfo httpManager reqHeaders inlinedSelSet varDefs (_grVariables reqUnparsed)
|
||||
-- traverse_ (addPlanToCache . EP.RPQuery) plan
|
||||
return $ QueryExecutionPlan $ execPlan
|
||||
G.TypedOperationDefinition G.OperationTypeMutation _ varDefs _ selSet -> do
|
||||
-- (Here the above fragment inlining is actually executed.)
|
||||
inlinedSelSet <- EI.inlineSelectionSet fragments selSet
|
||||
queryTx <- EM.convertMutationSelectionSet gCtx sqlGenCtx userInfo httpManager reqHeaders
|
||||
queryTx <- EM.convertMutationSelectionSet env gCtx sqlGenCtx userInfo httpManager reqHeaders
|
||||
inlinedSelSet varDefs (_grVariables reqUnparsed)
|
||||
-- traverse_ (addPlanToCache . EP.RPQuery) plan
|
||||
return $ MutationExecutionPlan $ queryTx
|
||||
@ -315,6 +316,10 @@ getResolvedExecPlan pgExecCtx planCache userInfo sqlGenCtx
|
||||
-- ]
|
||||
-- }
|
||||
-- }
|
||||
-- Parse as query to check correctness
|
||||
(_execPlan :: EPr.ExecutionPlan (tx EncJSON, EQ.GeneratedSqlMap) EPr.RemoteCall (G.Name,J.Value)
|
||||
, _plan, unpreparedAST) <-
|
||||
EQ.convertQuerySelSet env gCtx userInfo httpManager reqHeaders inlinedSelSet varDefs (_grVariables reqUnparsed)
|
||||
case NE.nonEmpty inlinedSelSet of
|
||||
Nothing -> throw500 "empty selset for subscription"
|
||||
Just (_ :| rst) ->
|
||||
@ -322,9 +327,6 @@ getResolvedExecPlan pgExecCtx planCache userInfo sqlGenCtx
|
||||
in
|
||||
unless (multipleAllowed || null rst) $
|
||||
throw400 ValidationFailed $ "subscriptions must select one top level field"
|
||||
-- Parse as query to check correctness
|
||||
(_execPlan, _plan, unpreparedAST) <-
|
||||
EQ.convertQuerySelSet gCtx userInfo httpManager reqHeaders inlinedSelSet varDefs (_grVariables reqUnparsed)
|
||||
validSubscriptionAST <- for unpreparedAST validateSubscriptionRootField
|
||||
(lqOp, plan) <- EL.buildLiveQueryPlan pgExecCtx userInfo validSubscriptionAST
|
||||
-- getSubsOpM pgExecCtx userInfo inlinedSelSet
|
||||
@ -351,7 +353,8 @@ execRemoteGQ
|
||||
, MonadReader ExecutionCtx m
|
||||
, MonadQueryLog m
|
||||
)
|
||||
=> RequestId
|
||||
=> Env.Environment
|
||||
-> RequestId
|
||||
-> UserInfo
|
||||
-> [HTTP.Header]
|
||||
-> GQLReqUnparsed
|
||||
@ -359,12 +362,12 @@ execRemoteGQ
|
||||
-> G.TypedOperationDefinition G.NoFragments G.Name
|
||||
-> m (DiffTime, HttpResponse EncJSON)
|
||||
-- ^ Also returns time spent in http request, for telemetry.
|
||||
execRemoteGQ reqId userInfo reqHdrs q rsi opDef = do
|
||||
execRemoteGQ env reqId userInfo reqHdrs q rsi opDef = do
|
||||
execCtx <- ask
|
||||
let logger = _ecxLogger execCtx
|
||||
manager = _ecxHttpManager execCtx
|
||||
opType = G._todType opDef
|
||||
logQueryLog logger q Nothing reqId
|
||||
(time, respHdrs, resp) <- execRemoteGQ' manager userInfo reqHdrs q rsi opType
|
||||
(time, respHdrs, resp) <- execRemoteGQ' env manager userInfo reqHdrs q rsi opType
|
||||
let !httpResp = HttpResponse (encJFromLBS resp) respHdrs
|
||||
return (time, httpResp)
|
||||
|
@ -30,7 +30,7 @@ instance J.ToJSON LiveQueriesOptions where
|
||||
newtype BatchSize = BatchSize { unBatchSize :: Int }
|
||||
deriving (Show, Eq, J.ToJSON)
|
||||
|
||||
-- TODO this is treated as milliseconds in fromEnv and as seconds in ToJSON.
|
||||
-- TODO this is treated as milliseconds in fromEnv and as seconds in ToJSON.
|
||||
-- ideally this would have e.g. ... unRefetchInterval :: Milliseconds
|
||||
newtype RefetchInterval = RefetchInterval { unRefetchInterval :: DiffTime }
|
||||
deriving (Show, Eq, J.ToJSON)
|
||||
|
@ -30,6 +30,7 @@ import qualified Data.Aeson.Casing as J
|
||||
import qualified Data.Aeson.Extended as J
|
||||
import qualified Data.Aeson.TH as J
|
||||
import qualified Data.ByteString as B
|
||||
import qualified Data.Environment as E
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.Sequence as Seq
|
||||
|
@ -35,6 +35,8 @@ module Hasura.GraphQL.Execute.LiveQuery.Poll (
|
||||
, LiveQueryMetadata(..)
|
||||
) where
|
||||
|
||||
import Data.List.Split (chunksOf)
|
||||
import GHC.AssertNF
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Control.Concurrent.Async as A
|
||||
@ -54,8 +56,6 @@ import qualified Database.PG.Query as Q
|
||||
import qualified ListT
|
||||
import qualified StmContainers.Map as STMMap
|
||||
|
||||
import Data.List.Split (chunksOf)
|
||||
import GHC.AssertNF
|
||||
import Control.Lens
|
||||
|
||||
import qualified Hasura.GraphQL.Execute.LiveQuery.TMap as TMap
|
||||
@ -101,7 +101,6 @@ data Subscriber
|
||||
data LiveQueryMetadata
|
||||
= LiveQueryMetadata
|
||||
{ _lqmExecutionTime :: !Clock.DiffTime
|
||||
-- ^ Time spent waiting on the generated query to execute on postgres or the remote.
|
||||
}
|
||||
|
||||
data LiveQueryResponse
|
||||
@ -257,10 +256,10 @@ data Poller
|
||||
|
||||
data PollerIOState
|
||||
= PollerIOState
|
||||
{ _pThread :: !Immortal.Thread
|
||||
{ _pThread :: !Immortal.Thread
|
||||
-- ^ a handle on the poller’s worker thread that can be used to
|
||||
-- 'Immortal.stop' it if all its cohorts stop listening
|
||||
, _pId :: !PollerId
|
||||
, _pId :: !PollerId
|
||||
}
|
||||
|
||||
data PollerKey
|
||||
@ -440,7 +439,6 @@ pollQuery logger pollerId lqOpts pgExecCtx pgQuery cohortMap = do
|
||||
, _pdTotalTime = totalTime
|
||||
}
|
||||
where
|
||||
|
||||
LiveQueriesOptions batchSize _ = lqOpts
|
||||
|
||||
getCohortSnapshot (cohortVars, handlerC) = do
|
||||
|
@ -61,6 +61,7 @@ data LiveQueryId
|
||||
, _lqiSubscriber :: !SubscriberId
|
||||
} deriving Show
|
||||
|
||||
|
||||
addLiveQuery
|
||||
:: L.Logger L.Hasura
|
||||
-> SubscriberMetadata
|
||||
@ -123,6 +124,7 @@ addLiveQuery logger subscriberMetadata lqState plan onResultAction = do
|
||||
|
||||
newPoller = Poller <$> TMap.new <*> STM.newEmptyTMVar
|
||||
|
||||
|
||||
removeLiveQuery
|
||||
:: L.Logger L.Hasura
|
||||
-> LiveQueriesState
|
||||
|
@ -3,6 +3,7 @@ module Hasura.GraphQL.Execute.Mutation where
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.IntMap as IntMap
|
||||
@ -34,37 +35,40 @@ import qualified Language.GraphQL.Draft.Syntax as G
|
||||
|
||||
convertDelete
|
||||
:: (HasVersion, MonadIO m)
|
||||
=> SessionVariables
|
||||
=> Env.Environment
|
||||
-> SessionVariables
|
||||
-> RQL.MutationRemoteJoinCtx
|
||||
-> RQL.AnnDelG UnpreparedValue
|
||||
-> Bool
|
||||
-> m RespTx
|
||||
convertDelete usrVars rjCtx deleteOperation stringifyNum = do
|
||||
pure $ RQL.execDeleteQuery stringifyNum (Just rjCtx) (preparedDelete, planVariablesSequence usrVars planningState)
|
||||
convertDelete env usrVars rjCtx deleteOperation stringifyNum = do
|
||||
pure $ RQL.execDeleteQuery env stringifyNum (Just rjCtx) (preparedDelete, planVariablesSequence usrVars planningState)
|
||||
where (preparedDelete, planningState) = runIdentity $ runPlan $ RQL.traverseAnnDel prepareWithPlan deleteOperation
|
||||
|
||||
convertUpdate
|
||||
:: (HasVersion, MonadIO m)
|
||||
=> SessionVariables
|
||||
=> Env.Environment
|
||||
-> SessionVariables
|
||||
-> RQL.MutationRemoteJoinCtx
|
||||
-> RQL.AnnUpdG UnpreparedValue
|
||||
-> Bool
|
||||
-> m RespTx
|
||||
convertUpdate usrVars rjCtx updateOperation stringifyNum = do
|
||||
convertUpdate env usrVars rjCtx updateOperation stringifyNum = do
|
||||
pure $ if null $ RQL.uqp1OpExps updateOperation
|
||||
then pure $ buildEmptyMutResp $ RQL.uqp1Output preparedUpdate
|
||||
else RQL.execUpdateQuery stringifyNum (Just rjCtx) (preparedUpdate, Seq.empty)
|
||||
else RQL.execUpdateQuery env stringifyNum (Just rjCtx) (preparedUpdate, Seq.empty)
|
||||
where preparedUpdate = runIdentity $ RQL.traverseAnnUpd (pure . unpreparedToTextSQL) updateOperation
|
||||
|
||||
convertInsert
|
||||
:: (HasVersion, MonadIO m)
|
||||
=> SessionVariables
|
||||
=> Env.Environment
|
||||
-> SessionVariables
|
||||
-> RQL.MutationRemoteJoinCtx
|
||||
-> AnnMultiInsert UnpreparedValue
|
||||
-> Bool
|
||||
-> m RespTx
|
||||
convertInsert usrVars rjCtx insertOperation stringifyNum = do
|
||||
pure $ convertToSQLTransaction preparedInsert rjCtx Seq.empty stringifyNum
|
||||
convertInsert env usrVars rjCtx insertOperation stringifyNum = do
|
||||
pure $ convertToSQLTransaction env preparedInsert rjCtx Seq.empty stringifyNum
|
||||
where preparedInsert = fmapAnnInsert unpreparedToTextSQL insertOperation
|
||||
|
||||
planVariablesSequence :: SessionVariables -> PlanningSt -> Seq.Seq Q.PrepArg
|
||||
@ -75,18 +79,19 @@ convertMutationRootField
|
||||
, MonadIO m
|
||||
, MonadError QErr m
|
||||
)
|
||||
=> UserInfo
|
||||
=> Env.Environment
|
||||
-> UserInfo
|
||||
-> HTTP.Manager
|
||||
-> HTTP.RequestHeaders
|
||||
-> Bool
|
||||
-> MutationRootField UnpreparedValue
|
||||
-> m (Either (LazyRespTx, HTTP.ResponseHeaders) RemoteField)
|
||||
convertMutationRootField userInfo manager reqHeaders stringifyNum = \case
|
||||
RFDB (MDBInsert s) -> noResponseHeaders =<< convertInsert userSession rjCtx s stringifyNum
|
||||
RFDB (MDBUpdate s) -> noResponseHeaders =<< convertUpdate userSession rjCtx s stringifyNum
|
||||
RFDB (MDBDelete s) -> noResponseHeaders =<< convertDelete userSession rjCtx s stringifyNum
|
||||
convertMutationRootField env userInfo manager reqHeaders stringifyNum = \case
|
||||
RFDB (MDBInsert s) -> noResponseHeaders =<< convertInsert env userSession rjCtx s stringifyNum
|
||||
RFDB (MDBUpdate s) -> noResponseHeaders =<< convertUpdate env userSession rjCtx s stringifyNum
|
||||
RFDB (MDBDelete s) -> noResponseHeaders =<< convertDelete env userSession rjCtx s stringifyNum
|
||||
RFRemote remote -> pure $ Right remote
|
||||
RFAction (AMSync s) -> Left <$> first liftTx <$> resolveActionExecution userInfo s actionExecContext
|
||||
RFAction (AMSync s) -> Left <$> first liftTx <$> resolveActionExecution env userInfo s actionExecContext
|
||||
RFAction (AMAsync s) -> noResponseHeaders =<< resolveActionMutationAsync s reqHeaders userSession
|
||||
RFRaw s -> noResponseHeaders $ pure $ encJFromJValue s
|
||||
where
|
||||
@ -99,8 +104,13 @@ convertMutationRootField userInfo manager reqHeaders stringifyNum = \case
|
||||
rjCtx = (manager, reqHeaders, userInfo)
|
||||
|
||||
convertMutationSelectionSet
|
||||
:: (HasVersion, MonadIO m, MonadError QErr m)
|
||||
=> GQLContext
|
||||
:: ( HasVersion
|
||||
, MonadIO m
|
||||
, MonadError QErr m
|
||||
, MonadTx tx
|
||||
)
|
||||
=> Env.Environment
|
||||
-> GQLContext
|
||||
-> SQLGenCtx
|
||||
-> UserInfo
|
||||
-> HTTP.Manager
|
||||
@ -108,8 +118,8 @@ convertMutationSelectionSet
|
||||
-> G.SelectionSet G.NoFragments G.Name
|
||||
-> [G.VariableDefinition]
|
||||
-> Maybe GH.VariableValues
|
||||
-> m (ExecutionPlan (LazyRespTx, HTTP.ResponseHeaders) RemoteCall (G.Name, J.Value))
|
||||
convertMutationSelectionSet gqlContext sqlGenCtx userInfo manager reqHeaders fields varDefs varValsM = do
|
||||
-> m (ExecutionPlan (tx EncJSON, HTTP.ResponseHeaders) RemoteCall (G.Name, J.Value))
|
||||
convertMutationSelectionSet env gqlContext sqlGenCtx userInfo manager reqHeaders fields varDefs varValsM = do
|
||||
mutationParser <- onNothing (gqlMutationParser gqlContext) $
|
||||
throw400 ValidationFailed "no mutations exist"
|
||||
-- Parse the GraphQL query into the RQL AST
|
||||
@ -119,13 +129,13 @@ convertMutationSelectionSet gqlContext sqlGenCtx userInfo manager reqHeaders fie
|
||||
>>= (mutationParser >>> (`onLeft` reportParseErrors))
|
||||
|
||||
-- Transform the RQL AST into a prepared SQL query
|
||||
txs <- for unpreparedQueries $ convertMutationRootField userInfo manager reqHeaders (stringifyNum sqlGenCtx)
|
||||
txs <- for unpreparedQueries $ convertMutationRootField env userInfo manager reqHeaders (stringifyNum sqlGenCtx)
|
||||
let txList = OMap.toList txs
|
||||
case (mapMaybe takeTx txList, mapMaybe takeRemote txList) of
|
||||
(dbPlans, []) -> do
|
||||
let allHeaders = concatMap (snd . snd) dbPlans
|
||||
combinedTx = toSingleTx $ map (G.unName *** fst) dbPlans
|
||||
pure $ ExecStepDB (combinedTx, allHeaders)
|
||||
pure $ ExecStepDB (liftTx $ lazyTxToQTx combinedTx, allHeaders)
|
||||
([], remotes@(firstRemote:_)) -> do
|
||||
let (remoteOperation, varValsM') =
|
||||
buildTypedOperation
|
||||
|
@ -16,13 +16,15 @@ import qualified Data.Aeson as J
|
||||
import qualified Data.Aeson.Casing as J
|
||||
import qualified Data.Aeson.TH as J
|
||||
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Session
|
||||
|
||||
import qualified Hasura.Cache as Cache
|
||||
import qualified Hasura.GraphQL.Execute.LiveQuery as LQ
|
||||
import qualified Hasura.GraphQL.Execute.Query as EQ
|
||||
import qualified Hasura.GraphQL.Execute.Types as ET
|
||||
import qualified Hasura.GraphQL.Transport.HTTP.Protocol as GH
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Session
|
||||
|
||||
|
||||
data PlanId
|
||||
= PlanId
|
||||
@ -49,13 +51,13 @@ newtype PlanCache
|
||||
= PlanCache {_unPlanCache :: Cache.Cache PlanId ReusablePlan}
|
||||
|
||||
data ReusablePlan
|
||||
= RPQuery !EQ.ReusableQueryPlan
|
||||
= RPQuery !EQ.ReusableQueryPlan -- TODO (if we do query plan caching) [QueryRootFldUnresolved]
|
||||
| RPSubs !LQ.ReusableLiveQueryPlan
|
||||
|
||||
instance J.ToJSON ReusablePlan where
|
||||
toJSON = \case
|
||||
RPQuery queryPlan -> J.toJSON queryPlan
|
||||
RPSubs subsPlan -> J.toJSON subsPlan
|
||||
RPQuery queryPlan -> J.toJSON queryPlan
|
||||
RPSubs subsPlan -> J.toJSON subsPlan
|
||||
|
||||
newtype PlanCacheOptions
|
||||
= PlanCacheOptions { unPlanCacheSize :: Maybe Cache.CacheSize }
|
||||
|
@ -12,6 +12,7 @@ module Hasura.GraphQL.Execute.Query
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.ByteString as B
|
||||
import qualified Data.ByteString.Lazy as LBS
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Data.IntMap as IntMap
|
||||
@ -28,6 +29,7 @@ import Hasura.Server.Version (HasVersion)
|
||||
import qualified Hasura.SQL.DML as S
|
||||
|
||||
import Hasura.EncJSON
|
||||
import Hasura.Db
|
||||
import Hasura.GraphQL.Context
|
||||
import Hasura.GraphQL.Execute.Prepare
|
||||
import Hasura.GraphQL.Execute.Resolve
|
||||
@ -114,13 +116,18 @@ withPlan usrVars (PGPlan q reqVars prepMap remoteJoins) annVars = do
|
||||
|
||||
-- turn the current plan into a transaction
|
||||
mkCurPlanTx
|
||||
:: (HasVersion, MonadError QErr m)
|
||||
=> HTTP.Manager
|
||||
:: ( HasVersion
|
||||
, MonadError QErr m
|
||||
, MonadIO tx
|
||||
, MonadTx tx
|
||||
)
|
||||
=> Env.Environment
|
||||
-> HTTP.Manager
|
||||
-> [HTTP.Header]
|
||||
-> UserInfo
|
||||
-> FieldPlans
|
||||
-> m (LazyRespTx, GeneratedSqlMap)
|
||||
mkCurPlanTx manager reqHdrs userInfo fldPlans = do
|
||||
-> m (tx EncJSON, GeneratedSqlMap)
|
||||
mkCurPlanTx env manager reqHdrs userInfo fldPlans = do
|
||||
-- generate the SQL and prepared vars or the bytestring
|
||||
resolved <- forM fldPlans $ \(alias, fldPlan) -> do
|
||||
fldResp <- case fldPlan of
|
||||
@ -131,7 +138,7 @@ mkCurPlanTx manager reqHdrs userInfo fldPlans = do
|
||||
RFPActionQuery tx -> pure $ RRActionQuery tx
|
||||
return (alias, fldResp)
|
||||
|
||||
return (mkLazyRespTx manager reqHdrs userInfo resolved, mkGeneratedSqlMap resolved)
|
||||
pure (mkLazyRespTx env manager reqHdrs userInfo resolved, mkGeneratedSqlMap resolved)
|
||||
|
||||
getVarArgNum :: (MonadState PlanningSt m) => G.Name -> m Int
|
||||
getVarArgNum var = do
|
||||
@ -211,19 +218,20 @@ parseGraphQLQuery gqlContext varDefs varValsM fields =
|
||||
throwError (err400 ValidationFailed peMessage){ qePath = pePath }
|
||||
|
||||
convertQuerySelSet
|
||||
:: forall m. (HasVersion, MonadError QErr m, MonadIO m)
|
||||
=> GQLContext
|
||||
:: forall m tx . (HasVersion, MonadError QErr m, MonadIO m, MonadIO tx, MonadTx tx)
|
||||
=> Env.Environment
|
||||
-> GQLContext
|
||||
-> UserInfo
|
||||
-> HTTP.Manager
|
||||
-> HTTP.RequestHeaders
|
||||
-> G.SelectionSet G.NoFragments G.Name
|
||||
-> [G.VariableDefinition]
|
||||
-> Maybe GH.VariableValues
|
||||
-> m ( ExecutionPlan (LazyRespTx, GeneratedSqlMap) RemoteCall (G.Name, J.Value)
|
||||
-> m ( ExecutionPlan (tx EncJSON, GeneratedSqlMap) RemoteCall (G.Name, J.Value)
|
||||
, Maybe ReusableQueryPlan
|
||||
, InsOrdHashMap G.Name (QueryRootField UnpreparedValue)
|
||||
)
|
||||
convertQuerySelSet gqlContext userInfo manager reqHeaders fields varDefs varValsM = do
|
||||
convertQuerySelSet env gqlContext userInfo manager reqHeaders fields varDefs varValsM = do
|
||||
-- Parse the GraphQL query into the RQL AST
|
||||
(unpreparedQueries, _reusability) <- parseGraphQLQuery gqlContext varDefs varValsM fields
|
||||
|
||||
@ -261,7 +269,7 @@ convertQuerySelSet gqlContext userInfo manager reqHeaders fields varDefs varVals
|
||||
|
||||
|
||||
executionPlan <- case (dbPlans, remoteFields) of
|
||||
(dbs, Seq.Empty) -> ExecStepDB <$> mkCurPlanTx manager reqHeaders userInfo (toList dbs)
|
||||
(dbs, Seq.Empty) -> ExecStepDB <$> mkCurPlanTx env manager reqHeaders userInfo (toList dbs)
|
||||
(Seq.Empty, remotes@(firstRemote Seq.:<| _)) -> do
|
||||
let (remoteOperation, varValsM) =
|
||||
buildTypedOperation
|
||||
@ -281,20 +289,25 @@ convertQuerySelSet gqlContext userInfo manager reqHeaders fields varDefs varVals
|
||||
:: ActionQuery UnpreparedValue -> StateT PlanningSt m ActionQueryPlan
|
||||
convertActionQuery = \case
|
||||
AQQuery s -> (AQPQuery . fst) <$>
|
||||
lift (resolveActionExecution userInfo s $ ActionExecContext manager reqHeaders usrVars)
|
||||
lift (resolveActionExecution env userInfo s $ ActionExecContext manager reqHeaders usrVars)
|
||||
AQAsync s -> AQPAsyncQuery <$>
|
||||
DS.traverseAnnSimpleSelect prepareWithPlan (resolveAsyncActionQuery userInfo s)
|
||||
|
||||
-- use the existing plan and new variables to create a pg query
|
||||
queryOpFromPlan
|
||||
:: (HasVersion, MonadError QErr m)
|
||||
=> HTTP.Manager
|
||||
:: ( HasVersion
|
||||
, MonadError QErr m
|
||||
, MonadIO tx
|
||||
, MonadTx tx
|
||||
)
|
||||
=> Env.Environment
|
||||
-> HTTP.Manager
|
||||
-> [HTTP.Header]
|
||||
-> UserInfo
|
||||
-> Maybe GH.VariableValues
|
||||
-> ReusableQueryPlan
|
||||
-> m (LazyRespTx, GeneratedSqlMap)
|
||||
queryOpFromPlan manager reqHdrs userInfo varValsM (ReusableQueryPlan varTypes fldPlans) = do
|
||||
-> m (tx EncJSON, GeneratedSqlMap)
|
||||
queryOpFromPlan env manager reqHdrs userInfo varValsM (ReusableQueryPlan varTypes fldPlans) = do
|
||||
validatedVars <- _validateVariablesForReuse varTypes varValsM
|
||||
-- generate the SQL and prepared vars or the bytestring
|
||||
resolved <- forM fldPlans $ \(alias, fldPlan) ->
|
||||
@ -302,7 +315,7 @@ queryOpFromPlan manager reqHdrs userInfo varValsM (ReusableQueryPlan varTypes fl
|
||||
RFPRaw resp -> return $ RRRaw resp
|
||||
RFPPostgres pgPlan -> RRSql <$> withPlan (_uiSession userInfo) pgPlan validatedVars
|
||||
|
||||
return (mkLazyRespTx manager reqHdrs userInfo resolved, mkGeneratedSqlMap resolved)
|
||||
pure (mkLazyRespTx env manager reqHdrs userInfo resolved, mkGeneratedSqlMap resolved)
|
||||
|
||||
data PreparedSql
|
||||
= PreparedSql
|
||||
@ -334,19 +347,28 @@ data ResolvedQuery
|
||||
-- prepared statement
|
||||
type GeneratedSqlMap = [(G.Name, Maybe PreparedSql)]
|
||||
|
||||
mkLazyRespTx :: HasVersion
|
||||
=> HTTP.Manager -> [HTTP.Header] -> UserInfo -> [(G.Name, ResolvedQuery)] -> LazyRespTx
|
||||
mkLazyRespTx manager reqHdrs userInfo resolved =
|
||||
fmap encJFromAssocList $ forM resolved $ \(alias, node) -> do
|
||||
mkLazyRespTx
|
||||
:: ( HasVersion
|
||||
, MonadIO tx
|
||||
, MonadTx tx
|
||||
)
|
||||
=> Env.Environment
|
||||
-> HTTP.Manager
|
||||
-> [HTTP.Header]
|
||||
-> UserInfo
|
||||
-> [(G.Name, ResolvedQuery)]
|
||||
-> tx EncJSON
|
||||
mkLazyRespTx env manager reqHdrs userInfo resolved =
|
||||
encJFromAssocList <$> forM resolved \(alias, node) -> do
|
||||
resp <- case node of
|
||||
RRRaw bs -> return $ encJFromBS bs
|
||||
RRSql (PreparedSql q args maybeRemoteJoins) -> do
|
||||
RRRaw bs -> return $ encJFromBS bs
|
||||
RRSql (PreparedSql q args maybeRemoteJoins) -> do
|
||||
let prepArgs = map fst args
|
||||
case maybeRemoteJoins of
|
||||
Nothing -> liftTx $ asSingleRowJsonResp q (map fst args)
|
||||
Just remoteJoins ->
|
||||
executeQueryWithRemoteJoins manager reqHdrs userInfo q prepArgs remoteJoins
|
||||
RRActionQuery tx -> tx
|
||||
executeQueryWithRemoteJoins env manager reqHdrs userInfo q prepArgs remoteJoins
|
||||
RRActionQuery tx' -> liftTx $ lazyTxToQTx tx'
|
||||
return (G.unName alias, resp)
|
||||
|
||||
mkGeneratedSqlMap :: [(G.Name, ResolvedQuery)] -> GeneratedSqlMap
|
||||
|
@ -6,6 +6,7 @@ module Hasura.GraphQL.Explain
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Aeson.Casing as J
|
||||
import qualified Data.Aeson.TH as J
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
import qualified Database.PG.Query as Q
|
||||
|
@ -14,10 +14,11 @@ import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.Text as T
|
||||
import qualified Language.GraphQL.Draft.Parser as G
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Language.GraphQL.Draft.Parser as G
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Types as N
|
||||
import qualified Network.Wreq as Wreq
|
||||
@ -37,12 +38,13 @@ introspectionQuery = $(embedStringFile "src-rsr/introspection.json")
|
||||
fetchRemoteSchema
|
||||
:: forall m
|
||||
. (HasVersion, MonadIO m, MonadUnique m, MonadError QErr m)
|
||||
=> HTTP.Manager
|
||||
=> Env.Environment
|
||||
-> HTTP.Manager
|
||||
-> RemoteSchemaName
|
||||
-> RemoteSchemaInfo
|
||||
-> m RemoteSchemaCtx
|
||||
fetchRemoteSchema manager schemaName schemaInfo@(RemoteSchemaInfo url headerConf _ timeout) = do
|
||||
headers <- makeHeadersFromConf headerConf
|
||||
fetchRemoteSchema env manager schemaName schemaInfo@(RemoteSchemaInfo url headerConf _ timeout) = do
|
||||
headers <- makeHeadersFromConf env headerConf
|
||||
let hdrsWithDefaults = addDefaultHeaders headers
|
||||
|
||||
initReqE <- liftIO $ try $ HTTP.parseRequest (show url)
|
||||
@ -379,17 +381,18 @@ execRemoteGQ'
|
||||
, MonadIO m
|
||||
, MonadError QErr m
|
||||
)
|
||||
=> HTTP.Manager
|
||||
=> Env.Environment
|
||||
-> HTTP.Manager
|
||||
-> UserInfo
|
||||
-> [N.Header]
|
||||
-> GQLReqUnparsed
|
||||
-> RemoteSchemaInfo
|
||||
-> G.OperationType
|
||||
-> m (DiffTime, [N.Header], BL.ByteString)
|
||||
execRemoteGQ' manager userInfo reqHdrs q rsi opType = do
|
||||
execRemoteGQ' env manager userInfo reqHdrs q rsi opType = do
|
||||
when (opType == G.OperationTypeSubscription) $
|
||||
throw400 NotSupported "subscription to remote server is not supported"
|
||||
confHdrs <- makeHeadersFromConf hdrConf
|
||||
confHdrs <- makeHeadersFromConf env hdrConf
|
||||
let clientHdrs = bool [] (mkClientHeadersForward reqHdrs) fwdClientHdrs
|
||||
-- filter out duplicate headers
|
||||
-- priority: conf headers > resolved userinfo vars > client headers
|
||||
@ -407,7 +410,6 @@ execRemoteGQ' manager userInfo reqHdrs q rsi opType = do
|
||||
, HTTP.requestBody = HTTP.RequestBodyLBS (J.encode q)
|
||||
, HTTP.responseTimeout = HTTP.responseTimeoutMicro (timeout * 1000000)
|
||||
}
|
||||
|
||||
(time, res) <- withElapsedTime $ liftIO $ try $ HTTP.httpLbs req manager
|
||||
resp <- either httpThrow return res
|
||||
pure (time, mkSetCookieHeaders resp, resp ^. Wreq.responseBody)
|
||||
|
@ -20,12 +20,14 @@ module Hasura.GraphQL.Resolve
|
||||
|
||||
import Data.Has
|
||||
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Types as HTTP
|
||||
|
||||
import Hasura.EncJSON
|
||||
import Hasura.GraphQL.Resolve.Context
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.Types
|
||||
@ -105,10 +107,11 @@ queryFldToPGAST
|
||||
, HasVersion
|
||||
, MonadIO m
|
||||
)
|
||||
=> V.Field
|
||||
=> Env.Environment
|
||||
-> V.Field
|
||||
-> RA.QueryActionExecuter
|
||||
-> m QueryRootFldUnresolved
|
||||
queryFldToPGAST fld actionExecuter = do
|
||||
queryFldToPGAST env fld actionExecuter = do
|
||||
opCtx <- getOpCtx $ V._fName fld
|
||||
userInfo <- asks getter
|
||||
case opCtx of
|
||||
@ -147,8 +150,9 @@ queryFldToPGAST fld actionExecuter = do
|
||||
f = case jsonAggType of
|
||||
DS.JASMultipleRows -> QRFActionExecuteList
|
||||
DS.JASSingleObject -> QRFActionExecuteObject
|
||||
f <$> actionExecuter (RA.resolveActionQuery fld ctx (_uiSession userInfo))
|
||||
QCSelectConnection pk ctx ->
|
||||
f <$> actionExecuter (RA.resolveActionQuery env fld ctx (_uiSession userInfo))
|
||||
QCSelectConnection pk ctx -> do
|
||||
validateHdrs userInfo (_socHeaders ctx)
|
||||
QRFConnection <$> RS.convertConnectionSelect pk ctx fld
|
||||
QCFuncConnection pk ctx ->
|
||||
QRFConnection <$> RS.convertConnectionFuncQuery pk ctx fld
|
||||
@ -167,34 +171,37 @@ mutFldToTx
|
||||
, Has HTTP.Manager r
|
||||
, Has [HTTP.Header] r
|
||||
, MonadIO m
|
||||
, MonadIO tx
|
||||
, MonadTx tx
|
||||
)
|
||||
=> V.Field
|
||||
-> m (RespTx, HTTP.ResponseHeaders)
|
||||
mutFldToTx fld = do
|
||||
=> Env.Environment
|
||||
-> V.Field
|
||||
-> m (tx EncJSON, HTTP.ResponseHeaders)
|
||||
mutFldToTx env fld = do
|
||||
userInfo <- asks getter
|
||||
opCtx <- getOpCtx $ V._fName fld
|
||||
let noRespHeaders = fmap (,[])
|
||||
case opCtx of
|
||||
MCInsert ctx -> do
|
||||
validateHdrs userInfo (_iocHeaders ctx)
|
||||
noRespHeaders $ RI.convertInsert (userRole userInfo) (_iocTable ctx) fld
|
||||
noRespHeaders $ RI.convertInsert env rjCtx roleName (_iocTable ctx) fld
|
||||
MCInsertOne ctx -> do
|
||||
validateHdrs userInfo (_iocHeaders ctx)
|
||||
noRespHeaders $ RI.convertInsertOne (userRole userInfo) (_iocTable ctx) fld
|
||||
noRespHeaders $ RI.convertInsertOne env rjCtx roleName (_iocTable ctx) fld
|
||||
MCUpdate ctx -> do
|
||||
validateHdrs userInfo (_uocHeaders ctx)
|
||||
noRespHeaders $ RM.convertUpdate ctx fld
|
||||
noRespHeaders $ RM.convertUpdate env ctx rjCtx fld
|
||||
MCUpdateByPk ctx -> do
|
||||
validateHdrs userInfo (_uocHeaders ctx)
|
||||
noRespHeaders $ RM.convertUpdateByPk ctx fld
|
||||
noRespHeaders $ RM.convertUpdateByPk env ctx rjCtx fld
|
||||
MCDelete ctx -> do
|
||||
validateHdrs userInfo (_docHeaders ctx)
|
||||
noRespHeaders $ RM.convertDelete ctx fld
|
||||
noRespHeaders $ RM.convertDelete env ctx rjCtx fld
|
||||
MCDeleteByPk ctx -> do
|
||||
validateHdrs userInfo (_docHeaders ctx)
|
||||
noRespHeaders $ RM.convertDeleteByPk ctx fld
|
||||
noRespHeaders $ RM.convertDeleteByPk env ctx rjCtx fld
|
||||
MCAction ctx ->
|
||||
RA.resolveActionMutation fld ctx (userVars userInfo)
|
||||
RA.resolveActionMutation env fld ctx userInfo
|
||||
|
||||
getOpCtx
|
||||
:: ( MonadReusability m
|
||||
|
@ -14,29 +14,33 @@ module Hasura.GraphQL.Resolve.Action
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Control.Concurrent.Async as A
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Aeson.Casing as J
|
||||
import qualified Data.Aeson.TH as J
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.CaseInsensitive as CI
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.UUID as UUID
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Types as HTTP
|
||||
import qualified Network.Wreq as Wreq
|
||||
|
||||
import Control.Concurrent (threadDelay)
|
||||
import Control.Exception (try)
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Aeson.Casing as J
|
||||
import qualified Data.Aeson.TH as J
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.CaseInsensitive as CI
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.UUID as UUID
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Types as HTTP
|
||||
import qualified Network.Wreq as Wreq
|
||||
|
||||
import Control.Concurrent (threadDelay)
|
||||
import Control.Exception (try)
|
||||
import Control.Lens
|
||||
import Data.IORef
|
||||
|
||||
import qualified Hasura.RQL.DML.RemoteJoin as RJ
|
||||
import qualified Hasura.RQL.DML.Select as RS
|
||||
import qualified Hasura.RQL.DML.RemoteJoin as RJ
|
||||
import qualified Hasura.RQL.DML.Select as RS
|
||||
-- import qualified Hasura.GraphQL.Resolve.Select as GRS
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
|
||||
import qualified Control.Concurrent.Async.Lifted.Safe as LA
|
||||
import qualified Data.Environment as Env
|
||||
|
||||
import Hasura.EncJSON
|
||||
import Hasura.GraphQL.Execute.Prepare
|
||||
@ -125,14 +129,15 @@ $(J.deriveToJSON (J.aesonDrop 4 J.snakeCase) ''ActionInternalError)
|
||||
-- , Has HTTP.Manager r
|
||||
-- , Has [HTTP.Header] r
|
||||
-- )
|
||||
-- => Env.Environment
|
||||
-- => Field
|
||||
-- -> ActionMutationExecutionContext
|
||||
-- -> UserVars
|
||||
-- -> m (RespTx, HTTP.ResponseHeaders)
|
||||
-- resolveActionMutation field executionContext sessionVariables =
|
||||
-- resolveActionMutation env field executionContext sessionVariables =
|
||||
-- case executionContext of
|
||||
-- ActionMutationSyncWebhook executionContextSync ->
|
||||
-- resolveActionMutationSync field executionContextSync sessionVariables
|
||||
-- resolveActionMutationSync env field executionContextSync sessionVariables
|
||||
-- ActionMutationAsync ->
|
||||
-- (,[]) <$> resolveActionMutationAsync field sessionVariables
|
||||
|
||||
@ -142,14 +147,15 @@ resolveActionExecution
|
||||
, MonadError QErr m
|
||||
, MonadIO m
|
||||
)
|
||||
=> UserInfo
|
||||
=> Env.Environment
|
||||
-> UserInfo
|
||||
-> AnnActionExecution UnpreparedValue
|
||||
-> ActionExecContext
|
||||
-> m (RespTx, HTTP.ResponseHeaders)
|
||||
resolveActionExecution userInfo annAction execContext = do
|
||||
resolveActionExecution env userInfo annAction execContext = do
|
||||
let actionContext = ActionContext actionName
|
||||
handlerPayload = ActionWebhookPayload actionContext sessionVariables inputPayload
|
||||
(webhookRes, respHeaders) <- callWebhook manager outputType outputFields reqHeaders confHeaders
|
||||
(webhookRes, respHeaders) <- callWebhook env manager outputType outputFields reqHeaders confHeaders
|
||||
forwardClientHeaders resolvedWebhook handlerPayload
|
||||
let webhookResponseExpression = RS.AEInput $ UVLiteral $
|
||||
toTxtValue $ WithScalarType PGJSONB $ PGValJSONB $ Q.JSONB $ J.toJSON webhookRes
|
||||
@ -163,7 +169,7 @@ resolveActionExecution userInfo annAction execContext = do
|
||||
Just remoteJoins ->
|
||||
let query = Q.fromBuilder $ toSQL $
|
||||
RS.mkSQLSelect jsonAggType astResolvedWithoutRemoteJoins
|
||||
in RJ.executeQueryWithRemoteJoins manager reqHeaders userInfo query [] remoteJoins
|
||||
in RJ.executeQueryWithRemoteJoins env manager reqHeaders userInfo query [] remoteJoins
|
||||
Nothing ->
|
||||
asSingleRowJsonResp (Q.fromBuilder $ toSQL $ RS.mkSQLSelect jsonAggType astResolved) []
|
||||
where
|
||||
@ -200,17 +206,18 @@ restrictActionExecuter errMsg _ =
|
||||
-- , Has OrdByCtx r
|
||||
-- , Has SQLGenCtx r
|
||||
-- )
|
||||
-- => Field
|
||||
-- => Env.Environment
|
||||
-- -> Field
|
||||
-- -> ActionExecutionContext
|
||||
-- -> SessionVariables
|
||||
-- -> HTTP.Manager
|
||||
-- -> [HTTP.Header]
|
||||
-- -> m (RS.AnnSimpleSelG UnresolvedVal)
|
||||
-- resolveActionQuery field executionContext sessionVariables httpManager reqHeaders = do
|
||||
-- resolveActionQuery env field executionContext sessionVariables httpManager reqHeaders = do
|
||||
-- let inputArgs = J.toJSON $ fmap annInpValueToJson $ _fArguments field
|
||||
-- actionContext = ActionContext actionName
|
||||
-- handlerPayload = ActionWebhookPayload actionContext sessionVariables inputArgs
|
||||
-- (webhookRes, _) <- callWebhook httpManager outputType outputFields reqHeaders confHeaders
|
||||
-- (webhookRes, _) <- callWebhook env httpManager outputType outputFields reqHeaders confHeaders
|
||||
-- forwardClientHeaders resolvedWebhook handlerPayload
|
||||
-- let webhookResponseExpression = RS.AEInput $ UVSQL $
|
||||
-- toTxtValue $ WithScalarType PGJSONB $ PGValJSONB $ Q.JSONB $ J.toJSON webhookRes
|
||||
@ -238,13 +245,14 @@ table provides the action response. See Note [Resolving async action query/subsc
|
||||
|
||||
-- | Resolve asynchronous action mutation which returns only the action uuid
|
||||
resolveActionMutationAsync
|
||||
:: (MonadError QErr m)
|
||||
:: ( MonadError QErr m
|
||||
, MonadTx tx)
|
||||
=> AnnActionMutationAsync
|
||||
-> [HTTP.Header]
|
||||
-> SessionVariables
|
||||
-> m RespTx
|
||||
-> m (tx EncJSON)
|
||||
resolveActionMutationAsync annAction reqHeaders sessionVariables = do
|
||||
pure $ do
|
||||
pure $ liftTx do
|
||||
actionId <- runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler [Q.sql|
|
||||
INSERT INTO
|
||||
"hdb_catalog"."hdb_action_log"
|
||||
@ -302,9 +310,9 @@ resolveAsyncActionQuery userInfo annAction =
|
||||
actionLogTable = QualifiedObject (SchemaName "hdb_catalog") (TableName "hdb_action_log")
|
||||
|
||||
-- TODO (from master):- Avoid using PGColumnInfo
|
||||
mkAnnFldFromPGCol column columnType =
|
||||
mkAnnFldFromPGCol column' columnType =
|
||||
flip RS.mkAnnColumnField Nothing $
|
||||
PGColumnInfo (unsafePGCol column) (G.unsafeMkName column) 0 (PGColumnScalar columnType) True Nothing
|
||||
PGColumnInfo (unsafePGCol column') (G.unsafeMkName column') 0 (PGColumnScalar columnType) True Nothing
|
||||
|
||||
tableBoolExpression =
|
||||
let actionIdColumnInfo = PGColumnInfo (unsafePGCol "id") $$(G.litName "id")
|
||||
@ -334,23 +342,29 @@ data ActionLogItem
|
||||
-- | Process async actions from hdb_catalog.hdb_action_log table. This functions is executed in a background thread.
|
||||
-- See Note [Async action architecture] above
|
||||
asyncActionsProcessor
|
||||
:: HasVersion
|
||||
=> IORef (RebuildableSchemaCache Run, SchemaCacheVer)
|
||||
:: forall m void
|
||||
. ( HasVersion
|
||||
, MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
, LA.Forall (LA.Pure m)
|
||||
)
|
||||
=> Env.Environment
|
||||
-> IORef (RebuildableSchemaCache Run, SchemaCacheVer)
|
||||
-> Q.PGPool
|
||||
-> HTTP.Manager
|
||||
-> IO void
|
||||
asyncActionsProcessor cacheRef pgPool httpManager = forever $ do
|
||||
asyncInvocations <- getUndeliveredEvents
|
||||
actionCache <- scActions . lastBuiltSchemaCache . fst <$> readIORef cacheRef
|
||||
A.mapConcurrently_ (callHandler actionCache) asyncInvocations
|
||||
threadDelay (1 * 1000 * 1000)
|
||||
-> m void
|
||||
asyncActionsProcessor env cacheRef pgPool httpManager = forever $ do
|
||||
asyncInvocations <- liftIO getUndeliveredEvents
|
||||
actionCache <- scActions . lastBuiltSchemaCache . fst <$> liftIO (readIORef cacheRef)
|
||||
LA.mapConcurrently_ (callHandler actionCache) asyncInvocations
|
||||
liftIO $ threadDelay (1 * 1000 * 1000)
|
||||
where
|
||||
runTx :: (Monoid a) => Q.TxE QErr a -> IO a
|
||||
runTx q = do
|
||||
res <- runExceptT $ Q.runTx' pgPool q
|
||||
either mempty return res
|
||||
|
||||
callHandler :: ActionCache -> ActionLogItem -> IO ()
|
||||
callHandler :: ActionCache -> ActionLogItem -> m ()
|
||||
callHandler actionCache actionLogItem = do
|
||||
let ActionLogItem actionId actionName reqHeaders
|
||||
sessionVariables inputPayload = actionLogItem
|
||||
@ -365,10 +379,10 @@ asyncActionsProcessor cacheRef pgPool httpManager = forever $ do
|
||||
outputType = _adOutputType definition
|
||||
actionContext = ActionContext actionName
|
||||
eitherRes <- runExceptT $
|
||||
callWebhook httpManager outputType outputFields reqHeaders confHeaders
|
||||
callWebhook env httpManager outputType outputFields reqHeaders confHeaders
|
||||
forwardClientHeaders webhookUrl $
|
||||
ActionWebhookPayload actionContext sessionVariables inputPayload
|
||||
case eitherRes of
|
||||
liftIO $ case eitherRes of
|
||||
Left e -> setError actionId e
|
||||
Right (responsePayload, _) -> setCompleted actionId $ J.toJSON responsePayload
|
||||
|
||||
@ -423,7 +437,8 @@ asyncActionsProcessor cacheRef pgPool httpManager = forever $ do
|
||||
|
||||
callWebhook
|
||||
:: forall m. (HasVersion, MonadIO m, MonadError QErr m)
|
||||
=> HTTP.Manager
|
||||
=> Env.Environment
|
||||
-> HTTP.Manager
|
||||
-> GraphQLType
|
||||
-> ActionOutputFields
|
||||
-> [HTTP.Header]
|
||||
@ -432,18 +447,23 @@ callWebhook
|
||||
-> ResolvedWebhook
|
||||
-> ActionWebhookPayload
|
||||
-> m (ActionWebhookResponse, HTTP.ResponseHeaders)
|
||||
callWebhook manager outputType outputFields reqHeaders confHeaders
|
||||
callWebhook env manager outputType outputFields reqHeaders confHeaders
|
||||
forwardClientHeaders resolvedWebhook actionWebhookPayload = do
|
||||
resolvedConfHeaders <- makeHeadersFromConf confHeaders
|
||||
resolvedConfHeaders <- makeHeadersFromConf env confHeaders
|
||||
let clientHeaders = if forwardClientHeaders then mkClientHeadersForward reqHeaders else []
|
||||
contentType = ("Content-Type", "application/json")
|
||||
options = wreqOptions manager $
|
||||
-- Using HashMap to avoid duplicate headers between configuration headers
|
||||
-- and client headers where configuration headers are preferred
|
||||
contentType : (Map.toList . Map.fromList) (resolvedConfHeaders <> clientHeaders)
|
||||
-- Using HashMap to avoid duplicate headers between configuration headers
|
||||
-- and client headers where configuration headers are preferred
|
||||
hdrs = contentType : (Map.toList . Map.fromList) (resolvedConfHeaders <> clientHeaders)
|
||||
postPayload = J.toJSON actionWebhookPayload
|
||||
url = unResolvedWebhook resolvedWebhook
|
||||
httpResponse <- liftIO $ try $ Wreq.postWith options (T.unpack url) postPayload
|
||||
httpResponse <- do
|
||||
initReq <- liftIO $ HTTP.parseRequest (T.unpack url)
|
||||
let req = initReq { HTTP.method = "POST"
|
||||
, HTTP.requestHeaders = addDefaultHeaders hdrs
|
||||
, HTTP.requestBody = HTTP.RequestBodyLBS (J.encode postPayload)
|
||||
}
|
||||
liftIO . try $ HTTP.httpLbs req manager
|
||||
let requestInfo = ActionRequestInfo url postPayload $
|
||||
confHeaders <> toHeadersConf clientHeaders
|
||||
case httpResponse of
|
||||
|
@ -28,10 +28,8 @@ module Hasura.GraphQL.Resolve.InputValue
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Text.Builder as TB
|
||||
|
||||
import qualified Text.Builder as TB
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
|
||||
import qualified Hasura.RQL.Types as RQL
|
||||
|
||||
import Hasura.GraphQL.Resolve.Context
|
||||
|
@ -26,6 +26,7 @@ import qualified Data.Sequence as Seq
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Data.Environment as Env
|
||||
|
||||
import qualified Hasura.GraphQL.Parser as P
|
||||
import qualified Hasura.RQL.DML.Delete.Types as RQL
|
||||
@ -591,26 +592,28 @@ fmapAnnInsert f (annIns, mutationOutput) =
|
||||
|
||||
convertToSQLTransaction
|
||||
:: (HasVersion, MonadTx m, MonadIO m)
|
||||
=> AnnMultiInsert S.SQLExp
|
||||
=> Env.Environment
|
||||
-> AnnMultiInsert S.SQLExp
|
||||
-> RQL.MutationRemoteJoinCtx
|
||||
-> Seq.Seq Q.PrepArg
|
||||
-> Bool
|
||||
-> m EncJSON
|
||||
convertToSQLTransaction (annIns, mutationOutput) rjCtx planVars stringifyNum =
|
||||
convertToSQLTransaction env (annIns, mutationOutput) rjCtx planVars stringifyNum =
|
||||
if null $ _aiInsObj annIns
|
||||
then pure $ buildEmptyMutResp mutationOutput
|
||||
else insertMultipleObjects annIns [] rjCtx mutationOutput planVars stringifyNum
|
||||
else insertMultipleObjects env annIns [] rjCtx mutationOutput planVars stringifyNum
|
||||
|
||||
insertMultipleObjects
|
||||
:: (HasVersion, MonadTx m, MonadIO m)
|
||||
=> MultiObjIns S.SQLExp
|
||||
=> Env.Environment
|
||||
-> MultiObjIns S.SQLExp
|
||||
-> [(PGCol, S.SQLExp)]
|
||||
-> RQL.MutationRemoteJoinCtx
|
||||
-> RQL.MutationOutput
|
||||
-> Seq.Seq Q.PrepArg
|
||||
-> Bool
|
||||
-> m EncJSON
|
||||
insertMultipleObjects multiObjIns additionalColumns rjCtx mutationOutput planVars stringifyNum =
|
||||
insertMultipleObjects env multiObjIns additionalColumns rjCtx mutationOutput planVars stringifyNum =
|
||||
bool withoutRelsInsert withRelsInsert anyRelsToInsert
|
||||
where
|
||||
AnnIns insObjs table conflictClause checkCondition columnInfos defVals = multiObjIns
|
||||
@ -631,33 +634,34 @@ insertMultipleObjects multiObjIns additionalColumns rjCtx mutationOutput planVar
|
||||
checkCondition
|
||||
mutationOutput
|
||||
columnInfos
|
||||
RQL.execInsertQuery stringifyNum (Just rjCtx) (insertQuery, planVars)
|
||||
RQL.execInsertQuery env stringifyNum (Just rjCtx) (insertQuery, planVars)
|
||||
|
||||
withRelsInsert = do
|
||||
insertRequests <- for insObjs \obj -> do
|
||||
let singleObj = AnnIns obj table conflictClause checkCondition columnInfos defVals
|
||||
insertObject singleObj additionalColumns rjCtx planVars stringifyNum
|
||||
insertObject env singleObj additionalColumns rjCtx planVars stringifyNum
|
||||
let affectedRows = sum $ map fst insertRequests
|
||||
columnValues = catMaybes $ map snd insertRequests
|
||||
selectExpr <- RQL.mkSelCTEFromColVals table columnInfos columnValues
|
||||
let (mutOutputRJ, remoteJoins) = RQL.getRemoteJoinsMutationOutput mutationOutput
|
||||
sqlQuery = Q.fromBuilder $ toSQL $
|
||||
RQL.mkMutationOutputExp table columnInfos (Just affectedRows) selectExpr mutOutputRJ stringifyNum
|
||||
RQL.executeMutationOutputQuery sqlQuery [] $ (,rjCtx) <$> remoteJoins
|
||||
RQL.executeMutationOutputQuery env sqlQuery [] $ (,rjCtx) <$> remoteJoins
|
||||
|
||||
insertObject
|
||||
:: (HasVersion, MonadTx m, MonadIO m)
|
||||
=> SingleObjIns S.SQLExp
|
||||
=> Env.Environment
|
||||
-> SingleObjIns S.SQLExp
|
||||
-> [(PGCol, S.SQLExp)]
|
||||
-> RQL.MutationRemoteJoinCtx
|
||||
-> Seq.Seq Q.PrepArg
|
||||
-> Bool
|
||||
-> m (Int, Maybe (ColumnValues TxtEncodedPGVal))
|
||||
insertObject singleObjIns additionalColumns rjCtx planVars stringifyNum = do
|
||||
insertObject env singleObjIns additionalColumns rjCtx planVars stringifyNum = do
|
||||
validateInsert (map fst columns) (map _riRelInfo objectRels) (map fst additionalColumns)
|
||||
|
||||
-- insert all object relations and fetch this insert dependent column values
|
||||
objInsRes <- forM objectRels $ insertObjRel planVars rjCtx stringifyNum
|
||||
objInsRes <- forM objectRels $ insertObjRel env planVars rjCtx stringifyNum
|
||||
|
||||
-- prepare final insert columns
|
||||
let objRelAffRows = sum $ map fst objInsRes
|
||||
@ -683,7 +687,7 @@ insertObject singleObjIns additionalColumns rjCtx planVars stringifyNum = do
|
||||
withArrRels colValM = do
|
||||
colVal <- onNothing colValM $ throw400 NotSupported cannotInsArrRelErr
|
||||
arrDepColsWithVal <- fetchFromColVals colVal arrRelDepCols
|
||||
arrInsARows <- forM arrayRels $ insertArrRel arrDepColsWithVal rjCtx planVars stringifyNum
|
||||
arrInsARows <- forM arrayRels $ insertArrRel env arrDepColsWithVal rjCtx planVars stringifyNum
|
||||
return $ sum arrInsARows
|
||||
|
||||
asSingleObject = \case
|
||||
@ -697,13 +701,14 @@ insertObject singleObjIns additionalColumns rjCtx planVars stringifyNum = do
|
||||
|
||||
insertObjRel
|
||||
:: (HasVersion, MonadTx m, MonadIO m)
|
||||
=> Seq.Seq Q.PrepArg
|
||||
=> Env.Environment
|
||||
-> Seq.Seq Q.PrepArg
|
||||
-> RQL.MutationRemoteJoinCtx
|
||||
-> Bool
|
||||
-> ObjRelIns S.SQLExp
|
||||
-> m (Int, [(PGCol, S.SQLExp)])
|
||||
insertObjRel planVars rjCtx stringifyNum objRelIns = do
|
||||
(affRows, colValM) <- insertObject singleObjIns [] rjCtx planVars stringifyNum
|
||||
insertObjRel env planVars rjCtx stringifyNum objRelIns = do
|
||||
(affRows, colValM) <- insertObject env singleObjIns [] rjCtx planVars stringifyNum
|
||||
colVal <- onNothing colValM $ throw400 NotSupported errMsg
|
||||
retColsWithVals <- fetchFromColVals colVal rColInfos
|
||||
let columns = flip mapMaybe (Map.toList mapCols) \(column, target) -> do
|
||||
@ -724,17 +729,18 @@ insertObjRel planVars rjCtx stringifyNum objRelIns = do
|
||||
|
||||
insertArrRel
|
||||
:: (HasVersion, MonadTx m, MonadIO m)
|
||||
=> [(PGCol, S.SQLExp)]
|
||||
=> Env.Environment
|
||||
-> [(PGCol, S.SQLExp)]
|
||||
-> RQL.MutationRemoteJoinCtx
|
||||
-> Seq.Seq Q.PrepArg
|
||||
-> Bool
|
||||
-> ArrRelIns S.SQLExp
|
||||
-> m Int
|
||||
insertArrRel resCols rjCtx planVars stringifyNum arrRelIns = do
|
||||
insertArrRel env resCols rjCtx planVars stringifyNum arrRelIns = do
|
||||
let additionalColumns = flip mapMaybe resCols \(column, value) -> do
|
||||
target <- Map.lookup column mapping
|
||||
Just (target, value)
|
||||
resBS <- insertMultipleObjects multiObjIns additionalColumns rjCtx mutOutput planVars stringifyNum
|
||||
resBS <- insertMultipleObjects env multiObjIns additionalColumns rjCtx mutOutput planVars stringifyNum
|
||||
resObj <- decodeEncJSON resBS
|
||||
onNothing (Map.lookup ("affected_rows" :: T.Text) resObj) $
|
||||
throw500 "affected_rows not returned in array rel insert"
|
||||
|
@ -25,6 +25,7 @@ import Hasura.Session
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Hasura.GraphQL.Execute as E
|
||||
import qualified Hasura.GraphQL.Execute.Query as EQ
|
||||
@ -42,14 +43,15 @@ runGQ
|
||||
, E.MonadGQLExecutionCheck m
|
||||
, MonadQueryLog m
|
||||
)
|
||||
=> RequestId
|
||||
=> Env.Environment
|
||||
-> RequestId
|
||||
-> UserInfo
|
||||
-> Wai.IpAddress
|
||||
-> [HTTP.Header]
|
||||
-> E.GraphQLQueryType
|
||||
-> GQLReqUnparsed
|
||||
-> m (HttpResponse EncJSON)
|
||||
runGQ reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
||||
runGQ env reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
||||
-- The response and misc telemetry data:
|
||||
let telemTransport = Telem.HTTP
|
||||
(telemTimeTot_DT, (telemCacheHit, telemLocality, (telemTimeIO_DT, telemQueryType, !resp))) <- withElapsedTime $ do
|
||||
@ -59,7 +61,7 @@ runGQ reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
||||
reqParsed <- E.checkGQLExecution userInfo (reqHeaders, ipAddress) enableAL sc reqUnparsed
|
||||
>>= flip onLeft throwError
|
||||
|
||||
(telemCacheHit, execPlan) <- E.getResolvedExecPlan pgExecCtx planCache
|
||||
(telemCacheHit, execPlan) <- E.getResolvedExecPlan env pgExecCtx planCache
|
||||
userInfo sqlGenCtx sc scVer queryType
|
||||
httpManager reqHeaders (reqUnparsed, reqParsed)
|
||||
case execPlan of
|
||||
@ -89,7 +91,7 @@ runGQ reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
||||
throw400 UnexpectedPayload "subscriptions are not supported over HTTP, use websockets instead"
|
||||
{-
|
||||
E.GExPHasura resolvedOp -> do
|
||||
(telemTimeIO, telemQueryType, respHdrs, resp) <- runHasuraGQ reqId reqUnparsed userInfo resolvedOp
|
||||
(telemTimeIO, telemQueryType, respHdrs, resp) <- runHasuraGQ reqId (reqUnparsed, reqParsed) userInfo resolvedOp
|
||||
return (telemCacheHit, Telem.Local, (telemTimeIO, telemQueryType, HttpResponse resp respHdrs))
|
||||
E.GExPRemote rsi opDef -> do
|
||||
let telemQueryType | G._todType opDef == G.OperationTypeMutation = Telem.Mutation
|
||||
@ -105,7 +107,7 @@ runGQ reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
||||
runRemoteGQ telemCacheHit rsi opDef = do
|
||||
let telemQueryType | G._todType opDef == G.OperationTypeMutation = Telem.Mutation
|
||||
| otherwise = Telem.Query
|
||||
(telemTimeIO, resp) <- E.execRemoteGQ reqId userInfo reqHeaders reqUnparsed rsi opDef
|
||||
(telemTimeIO, resp) <- E.execRemoteGQ env reqId userInfo reqHeaders reqUnparsed rsi opDef
|
||||
return (telemCacheHit, Telem.Remote, (telemTimeIO, telemQueryType, resp))
|
||||
|
||||
-- | Run (execute) a batched GraphQL query (see 'GQLBatchedReqs')
|
||||
@ -117,7 +119,8 @@ runGQBatched
|
||||
, E.MonadGQLExecutionCheck m
|
||||
, MonadQueryLog m
|
||||
)
|
||||
=> RequestId
|
||||
=> Env.Environment
|
||||
-> RequestId
|
||||
-> ResponseInternalErrorsConfig
|
||||
-> UserInfo
|
||||
-> Wai.IpAddress
|
||||
@ -126,10 +129,10 @@ runGQBatched
|
||||
-> GQLBatchedReqs GQLQueryText
|
||||
-- ^ the batched request with unparsed GraphQL query
|
||||
-> m (HttpResponse EncJSON)
|
||||
runGQBatched reqId responseErrorsConfig userInfo ipAddress reqHdrs queryType query = do
|
||||
runGQBatched env reqId responseErrorsConfig userInfo ipAddress reqHdrs queryType query = do
|
||||
case query of
|
||||
GQLSingleRequest req ->
|
||||
runGQ reqId userInfo ipAddress reqHdrs queryType req
|
||||
runGQ env reqId userInfo ipAddress reqHdrs queryType req
|
||||
GQLBatchedReqs reqs -> do
|
||||
-- It's unclear what we should do if we receive multiple
|
||||
-- responses with distinct headers, so just do the simplest thing
|
||||
@ -140,7 +143,7 @@ runGQBatched reqId responseErrorsConfig userInfo ipAddress reqHdrs queryType que
|
||||
. encJFromList
|
||||
. map (either (encJFromJValue . encodeGQErr includeInternal) _hrBody)
|
||||
|
||||
removeHeaders <$> traverse (try . runGQ reqId userInfo ipAddress reqHdrs queryType) reqs
|
||||
removeHeaders <$> traverse (try . runGQ env reqId userInfo ipAddress reqHdrs queryType) reqs
|
||||
where
|
||||
try = flip catchError (pure . Left) . fmap Right
|
||||
|
||||
@ -201,21 +204,21 @@ runHasuraGQ
|
||||
, MonadQueryLog m
|
||||
)
|
||||
=> RequestId
|
||||
-> GQLReqUnparsed
|
||||
-> (GQLReqUnparsed, GQLReqParsed)
|
||||
-> UserInfo
|
||||
-> E.ExecOp
|
||||
-> E.ExecOp (LazyTx QErr)
|
||||
-> m (DiffTime, Telem.QueryType, HTTP.ResponseHeaders, EncJSON)
|
||||
-- ^ Also return 'Mutation' when the operation was a mutation, and the time
|
||||
-- spent in the PG query; for telemetry.
|
||||
runHasuraGQ reqId query userInfo resolvedOp = do
|
||||
runHasuraGQ reqId (query, _queryParsed) userInfo resolvedOp = do
|
||||
(E.ExecutionCtx logger _ pgExecCtx _ _ _ _ _) <- ask
|
||||
logQuery' logger
|
||||
(telemTimeIO, respE) <- withElapsedTime $ liftIO $ runExceptT $ case resolvedOp of
|
||||
E.ExOpQuery tx _genSql -> do
|
||||
(telemTimeIO, respE) <- withElapsedTime $ runExceptT $ case resolvedOp of
|
||||
E.ExOpQuery tx genSql _asts -> do
|
||||
-- log the generated SQL and the graphql query
|
||||
-- L.unLogger logger $ QueryLog query genSql reqId
|
||||
logQueryLog logger query genSql reqId
|
||||
([],) <$> runQueryTx pgExecCtx tx
|
||||
E.ExOpMutation respHeaders tx -> do
|
||||
logQueryLog logger query Nothing reqId
|
||||
(respHeaders,) <$> runLazyTx pgExecCtx Q.ReadWrite (withUserInfo userInfo tx)
|
||||
E.ExOpSubs _ ->
|
||||
throw400 UnexpectedPayload
|
||||
|
@ -37,7 +37,7 @@ newtype GQLExecDoc
|
||||
deriving (Ord, Show, Eq, Hashable)
|
||||
|
||||
instance J.FromJSON GQLExecDoc where
|
||||
parseJSON v = (GQLExecDoc . G.getExecutableDefinitions) <$> J.parseJSON v
|
||||
parseJSON v = GQLExecDoc . G.getExecutableDefinitions <$> J.parseJSON v
|
||||
|
||||
instance J.ToJSON GQLExecDoc where
|
||||
toJSON = J.toJSON . G.ExecutableDocument . unGQLExecDoc
|
||||
|
@ -6,6 +6,7 @@ module Hasura.GraphQL.Transport.WebSocket
|
||||
, createWSServerEnv
|
||||
, stopWSServerApp
|
||||
, WSServerEnv
|
||||
, WSLog(..)
|
||||
) where
|
||||
|
||||
-- NOTE!:
|
||||
@ -33,6 +34,7 @@ import qualified Network.HTTP.Types as H
|
||||
import qualified Network.Wai.Extended as Wai
|
||||
import qualified Network.WebSockets as WS
|
||||
import qualified StmContainers.Map as STMMap
|
||||
import qualified Data.Environment as Env
|
||||
|
||||
import Control.Concurrent.Extended (sleep)
|
||||
import Control.Exception.Lifted
|
||||
@ -49,13 +51,15 @@ import Hasura.RQL.Types
|
||||
import Hasura.Server.Auth (AuthMode, UserAuthentication,
|
||||
resolveUserInfo)
|
||||
import Hasura.Server.Cors
|
||||
import Hasura.Server.Utils (RequestId, getRequestId)
|
||||
import Hasura.Server.Utils (RequestId,
|
||||
getRequestId)
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.Session
|
||||
|
||||
import qualified Hasura.GraphQL.Execute as E
|
||||
import qualified Hasura.GraphQL.Execute.LiveQuery as LQ
|
||||
import qualified Hasura.GraphQL.Execute.LiveQuery.Poll as LQ
|
||||
import qualified Hasura.GraphQL.Execute.Query as EQ
|
||||
import qualified Hasura.GraphQL.Transport.WebSocket.Server as WS
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Hasura.Server.Telemetry.Counters as Telem
|
||||
@ -218,7 +222,7 @@ data WSServerEnv
|
||||
|
||||
onConn :: (MonadIO m)
|
||||
=> L.Logger L.Hasura -> CorsPolicy -> WS.OnConnH m WSConnData
|
||||
onConn (L.Logger logger) corsPolicy wsId requestHead ipAdress = do
|
||||
onConn (L.Logger logger) corsPolicy wsId requestHead ipAddress = do
|
||||
res <- runExceptT $ do
|
||||
(errType, queryType) <- checkPath
|
||||
let reqHdrs = WS.requestHeaders requestHead
|
||||
@ -244,7 +248,7 @@ onConn (L.Logger logger) corsPolicy wsId requestHead ipAdress = do
|
||||
accept (hdrs, errType, queryType) = do
|
||||
logger $ mkWsInfoLog Nothing (WsConnInfo wsId Nothing Nothing) EAccepted
|
||||
connData <- liftIO $ WSConnData
|
||||
<$> STM.newTVarIO (CSNotInitialised hdrs ipAdress)
|
||||
<$> STM.newTVarIO (CSNotInitialised hdrs ipAddress)
|
||||
<*> STMMap.newIO
|
||||
<*> pure errType
|
||||
<*> pure queryType
|
||||
@ -302,8 +306,8 @@ onConn (L.Logger logger) corsPolicy wsId requestHead ipAdress = do
|
||||
|
||||
onStart
|
||||
:: forall m. (HasVersion, MonadIO m, E.MonadGQLExecutionCheck m, MonadQueryLog m)
|
||||
=> WSServerEnv -> WSConn -> StartMsg -> m ()
|
||||
onStart serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
||||
=> Env.Environment -> WSServerEnv -> WSConn -> StartMsg -> m ()
|
||||
onStart env serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
||||
timerTot <- startTimer
|
||||
opM <- liftIO $ STM.atomically $ STMMap.lookup opId opMap
|
||||
|
||||
@ -327,7 +331,7 @@ onStart serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
||||
|
||||
reqParsedE <- lift $ E.checkGQLExecution userInfo (reqHdrs, ipAddress) enableAL sc q
|
||||
reqParsed <- either (withComplete . preExecErr requestId) return reqParsedE
|
||||
execPlanE <- runExceptT $ E.getResolvedExecPlan pgExecCtx
|
||||
execPlanE <- runExceptT $ E.getResolvedExecPlan env pgExecCtx
|
||||
planCache userInfo sqlGenCtx sc scVer queryType httpMgr reqHdrs (q, reqParsed)
|
||||
|
||||
(telemCacheHit, execPlan) <- either (withComplete . preExecErr requestId) return execPlanE
|
||||
@ -401,15 +405,14 @@ onStart serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
||||
-> Telem.CacheHit -> RequestId -> GQLReqUnparsed -> UserInfo -> E.ExecOp
|
||||
-> ExceptT () m ()
|
||||
runHasuraGQ timerTot telemCacheHit reqId query userInfo = \case
|
||||
E.ExOpQuery opTx genSql ->
|
||||
E.ExOpQuery opTx genSql _asts ->
|
||||
execQueryOrMut Telem.Query genSql $ runQueryTx pgExecCtx opTx
|
||||
-- Response headers discarded over websockets
|
||||
E.ExOpMutation _ opTx ->
|
||||
E.ExOpMutation _ opTx -> do
|
||||
execQueryOrMut Telem.Mutation Nothing $
|
||||
runLazyTx pgExecCtx Q.ReadWrite $ withUserInfo userInfo opTx
|
||||
E.ExOpSubs lqOp -> do
|
||||
-- log the graphql query
|
||||
-- L.unLogger logger $ QueryLog query Nothing reqId
|
||||
logQueryLog logger query Nothing reqId
|
||||
let subscriberMetadata = LQ.mkSubscriberMetadata $ J.object
|
||||
[ "websocket_id" J..= WS.getWSId wsConn
|
||||
@ -428,6 +431,11 @@ onStart serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
||||
|
||||
where
|
||||
telemLocality = Telem.Local
|
||||
execQueryOrMut
|
||||
:: Telem.QueryType
|
||||
-> Maybe EQ.GeneratedSqlMap
|
||||
-> ExceptT QErr (ExceptT () m) EncJSON
|
||||
-> ExceptT () m ()
|
||||
execQueryOrMut telemQueryType genSql action = do
|
||||
logOpEv ODStarted (Just reqId)
|
||||
-- log the generated SQL and the graphql query
|
||||
@ -459,7 +467,7 @@ onStart serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
||||
|
||||
-- if it's not a subscription, use HTTP to execute the query on the remote
|
||||
(runExceptT $ flip runReaderT execCtx $
|
||||
E.execRemoteGQ reqId userInfo reqHdrs q rsi opDef) >>= \case
|
||||
E.execRemoteGQ env reqId userInfo reqHdrs q rsi opDef) >>= \case
|
||||
Left err -> postExecErr reqId err
|
||||
Right (telemTimeIO_DT, !val) -> do
|
||||
-- Telemetry. NOTE: don't time network IO:
|
||||
@ -546,11 +554,17 @@ onStart serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
||||
catchAndIgnore m = void $ runExceptT m
|
||||
|
||||
onMessage
|
||||
:: (HasVersion, MonadIO m, UserAuthentication m, E.MonadGQLExecutionCheck m, MonadQueryLog m)
|
||||
=> AuthMode
|
||||
:: ( HasVersion
|
||||
, MonadIO m
|
||||
, UserAuthentication m
|
||||
, E.MonadGQLExecutionCheck m
|
||||
, MonadQueryLog m
|
||||
)
|
||||
=> Env.Environment
|
||||
-> AuthMode
|
||||
-> WSServerEnv
|
||||
-> WSConn -> BL.ByteString -> m ()
|
||||
onMessage authMode serverEnv wsConn msgRaw =
|
||||
onMessage env authMode serverEnv wsConn msgRaw = do
|
||||
case J.eitherDecode msgRaw of
|
||||
Left e -> do
|
||||
let err = ConnErrMsg $ "parsing ClientMessage failed: " <> T.pack e
|
||||
@ -561,7 +575,7 @@ onMessage authMode serverEnv wsConn msgRaw =
|
||||
CMConnInit params -> onConnInit (_wseLogger serverEnv)
|
||||
(_wseHManager serverEnv)
|
||||
wsConn authMode params
|
||||
CMStart startMsg -> onStart serverEnv wsConn startMsg
|
||||
CMStart startMsg -> onStart env serverEnv wsConn startMsg
|
||||
CMStop stopMsg -> liftIO $ onStop serverEnv wsConn stopMsg
|
||||
-- The idea is cleanup will be handled by 'onClose', but...
|
||||
-- NOTE: we need to close the websocket connection when we receive the
|
||||
@ -571,6 +585,7 @@ onMessage authMode serverEnv wsConn msgRaw =
|
||||
where
|
||||
logger = _wseLogger serverEnv
|
||||
|
||||
|
||||
onStop :: WSServerEnv -> WSConn -> StopMsg -> IO ()
|
||||
onStop serverEnv wsConn (StopMsg opId) = do
|
||||
-- When a stop message is received for an operation, it may not be present in OpMap
|
||||
@ -642,7 +657,7 @@ onConnInit logger manager wsConn authMode connParamsM = do
|
||||
let headers = mkHeaders connState
|
||||
res <- resolveUserInfo logger manager headers authMode
|
||||
case res of
|
||||
Left e -> do
|
||||
Left e -> do
|
||||
let !initErr = CSInitError $ qeError e
|
||||
liftIO $ do
|
||||
-- TODO(PDV) disabled for now; printing odd errors: $assertNFHere initErr -- so we don't write thunks to mutable vars
|
||||
@ -651,8 +666,9 @@ onConnInit logger manager wsConn authMode connParamsM = do
|
||||
let connErr = ConnErrMsg $ qeError e
|
||||
logWSEvent logger wsConn $ EConnErr connErr
|
||||
sendMsg wsConn $ SMConnErr connErr
|
||||
|
||||
Right (userInfo, expTimeM) -> do
|
||||
let !csInit = CSInitialised $ WsClientState userInfo expTimeM paramHeaders ipAddress
|
||||
let !csInit = CSInitialised $ WsClientState userInfo expTimeM paramHeaders ipAddress
|
||||
liftIO $ do
|
||||
-- TODO(PDV) disabled for now; printing odd errors: $assertNFHere csInit -- so we don't write thunks to mutable vars
|
||||
STM.atomically $ STM.writeTVar (_wscUser $ WS.getData wsConn) csInit
|
||||
@ -710,11 +726,11 @@ createWSServerEnv
|
||||
-> Bool
|
||||
-> E.PlanCache
|
||||
-> m WSServerEnv
|
||||
createWSServerEnv logger pgExecCtx lqState getSchemaCache httpManager
|
||||
createWSServerEnv logger isPgCtx lqState getSchemaCache httpManager
|
||||
corsPolicy sqlGenCtx enableAL planCache = do
|
||||
wsServer <- liftIO $ STM.atomically $ WS.createWSServer logger
|
||||
return $
|
||||
WSServerEnv logger pgExecCtx lqState getSchemaCache httpManager corsPolicy
|
||||
WSServerEnv logger isPgCtx lqState getSchemaCache httpManager corsPolicy
|
||||
sqlGenCtx planCache wsServer enableAL
|
||||
|
||||
createWSServerApp
|
||||
@ -723,22 +739,23 @@ createWSServerApp
|
||||
, MC.MonadBaseControl IO m
|
||||
, LA.Forall (LA.Pure m)
|
||||
, UserAuthentication m
|
||||
, WS.MonadWSLog m
|
||||
, E.MonadGQLExecutionCheck m
|
||||
, WS.MonadWSLog m
|
||||
, MonadQueryLog m
|
||||
)
|
||||
=> AuthMode
|
||||
=> Env.Environment
|
||||
-> AuthMode
|
||||
-> WSServerEnv
|
||||
-> WS.HasuraServerApp m
|
||||
-- ^ aka generalized 'WS.ServerApp'
|
||||
createWSServerApp authMode serverEnv = \ !ipAddress !pendingConn ->
|
||||
-- -- ^ aka generalized 'WS.ServerApp'
|
||||
createWSServerApp env authMode serverEnv = \ !ipAddress !pendingConn ->
|
||||
WS.createServerApp (_wseServer serverEnv) handlers ipAddress pendingConn
|
||||
where
|
||||
handlers =
|
||||
WS.WSHandlers
|
||||
-- Mask async exceptions during event processing to help maintain integrity of mutable vars:
|
||||
(\rid rh ip -> mask_ $ onConn (_wseLogger serverEnv) (_wseCorsPolicy serverEnv) rid rh ip)
|
||||
(\conn bs -> mask_ $ onMessage authMode serverEnv conn bs)
|
||||
(\rid rh ip -> mask_ $ onConn (_wseLogger serverEnv) (_wseCorsPolicy serverEnv) rid rh ip)
|
||||
(\conn bs -> mask_ $ onMessage env authMode serverEnv conn bs)
|
||||
(\conn -> mask_ $ onClose (_wseLogger serverEnv) (_wseLiveQMap serverEnv) conn)
|
||||
|
||||
stopWSServerApp :: WSServerEnv -> IO ()
|
||||
|
@ -8,6 +8,7 @@ module Hasura.GraphQL.Transport.WebSocket.Protocol
|
||||
, ServerMsg(..)
|
||||
, ServerMsgType(..)
|
||||
, encodeServerMsg
|
||||
, serverMsgType
|
||||
, DataMsg(..)
|
||||
, ErrorMsg(..)
|
||||
, ConnErrMsg(..)
|
||||
@ -115,6 +116,14 @@ instance Show ServerMsgType where
|
||||
instance J.ToJSON ServerMsgType where
|
||||
toJSON = J.toJSON . show
|
||||
|
||||
serverMsgType :: ServerMsg -> ServerMsgType
|
||||
serverMsgType SMConnAck = SMT_GQL_CONNECTION_ACK
|
||||
serverMsgType SMConnKeepAlive = SMT_GQL_CONNECTION_KEEP_ALIVE
|
||||
serverMsgType (SMConnErr _) = SMT_GQL_CONNECTION_ERROR
|
||||
serverMsgType (SMData _) = SMT_GQL_DATA
|
||||
serverMsgType (SMErr _) = SMT_GQL_ERROR
|
||||
serverMsgType (SMComplete _) = SMT_GQL_COMPLETE
|
||||
|
||||
encodeServerMsg :: ServerMsg -> BL.ByteString
|
||||
encodeServerMsg msg =
|
||||
encJToLBS $ encJFromAssocList $ case msg of
|
||||
|
@ -3,7 +3,8 @@
|
||||
|
||||
module Hasura.GraphQL.Transport.WebSocket.Server
|
||||
( WSId(..)
|
||||
|
||||
, WSLog(..)
|
||||
, WSEvent(..)
|
||||
, WSConn
|
||||
, getData
|
||||
, getWSId
|
||||
@ -17,6 +18,7 @@ module Hasura.GraphQL.Transport.WebSocket.Server
|
||||
, WSHandlers(..)
|
||||
|
||||
, WSServer
|
||||
, HasuraServerApp
|
||||
, WSEventInfo(..)
|
||||
, WSQueueResponse(..)
|
||||
, ServerMsgType(..)
|
||||
@ -26,9 +28,6 @@ module Hasura.GraphQL.Transport.WebSocket.Server
|
||||
, shutdown
|
||||
|
||||
, MonadWSLog (..)
|
||||
, HasuraServerApp
|
||||
, WSEvent(..)
|
||||
, WSLog(..)
|
||||
) where
|
||||
|
||||
import qualified Control.Concurrent.Async as A
|
||||
@ -225,6 +224,9 @@ type OnConnH m a = WSId -> WS.RequestHead -> IpAddress -> m (Either WS.Reject
|
||||
type OnCloseH m a = WSConn a -> m ()
|
||||
type OnMessageH m a = WSConn a -> BL.ByteString -> m ()
|
||||
|
||||
-- | aka generalized 'WS.ServerApp' over @m@, which takes an IPAddress
|
||||
type HasuraServerApp m = IpAddress -> WS.PendingConnection -> m ()
|
||||
|
||||
data WSHandlers m a
|
||||
= WSHandlers
|
||||
{ _hOnConn :: OnConnH m a
|
||||
@ -232,16 +234,13 @@ data WSHandlers m a
|
||||
, _hOnClose :: OnCloseH m a
|
||||
}
|
||||
|
||||
-- | aka generalized 'WS.ServerApp' over @m@, which takes an IPAddress
|
||||
type HasuraServerApp m = IpAddress -> WS.PendingConnection -> m ()
|
||||
|
||||
createServerApp
|
||||
:: (MonadIO m, MC.MonadBaseControl IO m, LA.Forall (LA.Pure m), MonadWSLog m)
|
||||
=> WSServer a
|
||||
-- user provided handlers
|
||||
-> WSHandlers m a
|
||||
-- aka WS.ServerApp
|
||||
-- ^ user provided handlers
|
||||
-> HasuraServerApp m
|
||||
-- ^ aka WS.ServerApp
|
||||
{-# INLINE createServerApp #-}
|
||||
createServerApp (WSServer logger@(L.Logger writeLog) serverStatus) wsHandlers !ipAddress !pendingConn = do
|
||||
wsId <- WSId <$> liftIO UUID.nextRandom
|
||||
@ -261,7 +260,7 @@ createServerApp (WSServer logger@(L.Logger writeLog) serverStatus) wsHandlers !i
|
||||
-- least log properly and re-raise:
|
||||
logUnexpectedExceptions = handle $ \(e :: SomeException) -> do
|
||||
writeLog $ L.UnstructuredLog L.LevelError $ fromString $
|
||||
"Unexpected exception raised in websocket. Please report this as a bug: "<>show e
|
||||
"Unexpected exception raised in websocket. Please report this as a bug: " <> show e
|
||||
throwIO e
|
||||
|
||||
shuttingDownReject =
|
||||
|
@ -795,7 +795,7 @@ instance (MonadReusability m) => MonadReusability (StateT s m) where
|
||||
markNotReusable = lift markNotReusable
|
||||
|
||||
newtype ReusabilityT m a = ReusabilityT { unReusabilityT :: StateT QueryReusability m a }
|
||||
deriving (Functor, Applicative, Monad, MonadError e, MonadReader r, MonadIO)
|
||||
deriving (Functor, Applicative, Monad, MonadError e, MonadReader r, MonadIO, MonadTrans)
|
||||
|
||||
instance (Monad m) => MonadReusability (ReusabilityT m) where
|
||||
recordVariableUse varName varType = ReusabilityT $
|
||||
|
@ -27,15 +27,14 @@ import Hasura.EncJSON
|
||||
import Hasura.GraphQL.Utils
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.Types
|
||||
import Data.URL.Template
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Aeson.Casing as J
|
||||
import qualified Data.Aeson.TH as J
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.Text as T
|
||||
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
@ -76,7 +75,7 @@ persistCreateAction (CreateAction actionName actionDefinition comment) = do
|
||||
VALUES ($1, $2, $3)
|
||||
|] (actionName, Q.AltJ actionDefinition, comment) True
|
||||
|
||||
{- Note [Postgres scalars in action input arguments]
|
||||
{-| Note [Postgres scalars in action input arguments]
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
It's very comfortable to be able to reference Postgres scalars in actions
|
||||
input arguments. For example, see the following action mutation:
|
||||
@ -95,14 +94,15 @@ referred scalars.
|
||||
-}
|
||||
|
||||
resolveAction
|
||||
:: (QErrM m, MonadIO m)
|
||||
=> AnnotatedCustomTypes
|
||||
:: QErrM m
|
||||
=> Env.Environment
|
||||
-> AnnotatedCustomTypes
|
||||
-> ActionDefinitionInput
|
||||
-> HashSet PGScalarType -- See Note [Postgres scalars in custom types]
|
||||
-> m ( ResolvedActionDefinition
|
||||
, AnnotatedObjectType
|
||||
)
|
||||
resolveAction AnnotatedCustomTypes{..} ActionDefinition{..} allPGScalars = do
|
||||
resolveAction env AnnotatedCustomTypes{..} ActionDefinition{..} allPGScalars = do
|
||||
resolvedArguments <- forM _adArguments $ \argumentDefinition -> do
|
||||
forM argumentDefinition $ \argumentType -> do
|
||||
let gType = unGraphQLType argumentType
|
||||
@ -123,16 +123,12 @@ resolveAction AnnotatedCustomTypes{..} ActionDefinition{..} allPGScalars = do
|
||||
outputObject <- onNothing (Map.lookup outputBaseType _actObjects) $
|
||||
throw400 NotExists $ "the type: " <> showName outputBaseType
|
||||
<> " is not an object type defined in custom types"
|
||||
resolvedWebhook <- resolveWebhook _adHandler
|
||||
resolvedWebhook <- resolveWebhook env _adHandler
|
||||
pure ( ActionDefinition resolvedArguments _adOutputType _adType
|
||||
_adHeaders _adForwardClientHeaders resolvedWebhook
|
||||
, outputObject
|
||||
)
|
||||
where
|
||||
resolveWebhook (InputWebhook urlTemplate) = do
|
||||
eitherRenderedTemplate <- renderURLTemplate urlTemplate
|
||||
either (throw400 Unexpected . T.pack) (pure . ResolvedWebhook) eitherRenderedTemplate
|
||||
|
||||
lookupPGScalar baseType = -- see Note [Postgres scalars in custom types]
|
||||
fmap (flip ScalarTypeDefinition Nothing) $
|
||||
find ((==) baseType) $ mapMaybe (G.mkName . toSQLTxt) $
|
||||
@ -225,8 +221,9 @@ resolveAction
|
||||
-> m ( ResolvedActionDefinition
|
||||
, AnnotatedObjectType
|
||||
, HashSet PGScalarType
|
||||
) -- ^ see Note [Postgres scalars in action input arguments].
|
||||
resolveAction customTypes allPGScalars actionDefinition = do
|
||||
-- ^ see Note [Postgres scalars in action input arguments].
|
||||
)
|
||||
resolveAction env customTypes allPGScalars actionDefinition = do
|
||||
let responseType = unGraphQLType $ _adOutputType actionDefinition
|
||||
responseBaseType = G.getBaseType responseType
|
||||
|
||||
@ -253,7 +250,7 @@ resolveAction customTypes allPGScalars actionDefinition = do
|
||||
|
||||
-- Check if the response type is an object
|
||||
outputObject <- getObjectTypeInfo responseBaseType
|
||||
resolvedDef <- traverse resolveWebhook actionDefinition
|
||||
resolvedDef <- traverse (resolveWebhook env) actionDefinition
|
||||
pure (resolvedDef, outputObject, reusedPGScalars)
|
||||
where
|
||||
getNonObjectTypeInfo typeName =
|
||||
|
@ -20,7 +20,6 @@ module Hasura.RQL.DDL.EventTrigger
|
||||
) where
|
||||
|
||||
import Data.Aeson
|
||||
import System.Environment (lookupEnv)
|
||||
|
||||
import Hasura.EncJSON
|
||||
import Hasura.Prelude
|
||||
@ -32,6 +31,7 @@ import Hasura.SQL.Types
|
||||
import qualified Hasura.SQL.DML as S
|
||||
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.Text.Lazy as TL
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Text.Shakespeare.Text as ST
|
||||
@ -208,16 +208,19 @@ subTableP1 (CreateEventTriggerQuery name qt insert update delete enableManual re
|
||||
SubCArray pgcols -> forM_ pgcols (assertPGCol (_tciFieldInfoMap ti) "")
|
||||
|
||||
subTableP2Setup
|
||||
:: (QErrM m, MonadIO m)
|
||||
=> QualifiedTable -> EventTriggerConf -> m (EventTriggerInfo, [SchemaDependency])
|
||||
subTableP2Setup qt (EventTriggerConf name def webhook webhookFromEnv rconf mheaders) = do
|
||||
:: QErrM m
|
||||
=> Env.Environment
|
||||
-> QualifiedTable
|
||||
-> EventTriggerConf
|
||||
-> m (EventTriggerInfo, [SchemaDependency])
|
||||
subTableP2Setup env qt (EventTriggerConf name def webhook webhookFromEnv rconf mheaders) = do
|
||||
webhookConf <- case (webhook, webhookFromEnv) of
|
||||
(Just w, Nothing) -> return $ WCValue w
|
||||
(Nothing, Just wEnv) -> return $ WCEnv wEnv
|
||||
_ -> throw500 "expected webhook or webhook_from_env"
|
||||
let headerConfs = fromMaybe [] mheaders
|
||||
webhookInfo <- getWebhookInfoFromConf webhookConf
|
||||
headerInfos <- getHeaderInfosFromConf headerConfs
|
||||
webhookInfo <- getWebhookInfoFromConf env webhookConf
|
||||
headerInfos <- getHeaderInfosFromConf env headerConfs
|
||||
let eTrigInfo = EventTriggerInfo name def rconf webhookInfo headerInfos
|
||||
tabDep = SchemaDependency (SOTable qt) DRParent
|
||||
pure (eTrigInfo, tabDep:getTrigDefDeps qt def)
|
||||
@ -310,30 +313,35 @@ runInvokeEventTrigger (InvokeEventTriggerQuery name payload) = do
|
||||
_ -> throw400 NotSupported "manual mode is not enabled for event trigger"
|
||||
|
||||
getHeaderInfosFromConf
|
||||
:: (QErrM m, MonadIO m)
|
||||
=> [HeaderConf] -> m [EventHeaderInfo]
|
||||
getHeaderInfosFromConf = mapM getHeader
|
||||
:: QErrM m
|
||||
=> Env.Environment
|
||||
-> [HeaderConf]
|
||||
-> m [EventHeaderInfo]
|
||||
getHeaderInfosFromConf env = mapM getHeader
|
||||
where
|
||||
getHeader :: (QErrM m, MonadIO m) => HeaderConf -> m EventHeaderInfo
|
||||
getHeader :: QErrM m => HeaderConf -> m EventHeaderInfo
|
||||
getHeader hconf = case hconf of
|
||||
(HeaderConf _ (HVValue val)) -> return $ EventHeaderInfo hconf val
|
||||
(HeaderConf _ (HVEnv val)) -> do
|
||||
envVal <- getEnv val
|
||||
envVal <- getEnv env val
|
||||
return $ EventHeaderInfo hconf envVal
|
||||
|
||||
getWebhookInfoFromConf
|
||||
:: (QErrM m, MonadIO m) => WebhookConf -> m WebhookConfInfo
|
||||
getWebhookInfoFromConf wc = case wc of
|
||||
:: QErrM m
|
||||
=> Env.Environment
|
||||
-> WebhookConf
|
||||
-> m WebhookConfInfo
|
||||
getWebhookInfoFromConf env wc = case wc of
|
||||
WCValue w -> return $ WebhookConfInfo wc w
|
||||
WCEnv we -> do
|
||||
envVal <- getEnv we
|
||||
envVal <- getEnv env we
|
||||
return $ WebhookConfInfo wc envVal
|
||||
|
||||
getEnv :: (QErrM m, MonadIO m) => T.Text -> m T.Text
|
||||
getEnv env = do
|
||||
mEnv <- liftIO $ lookupEnv (T.unpack env)
|
||||
getEnv :: QErrM m => Env.Environment -> T.Text -> m T.Text
|
||||
getEnv env k = do
|
||||
let mEnv = Env.lookupEnv env (T.unpack k)
|
||||
case mEnv of
|
||||
Nothing -> throw400 NotFound $ "environment variable '" <> env <> "' not set"
|
||||
Nothing -> throw400 NotFound $ "environment variable '" <> k <> "' not set"
|
||||
Just envVal -> return (T.pack envVal)
|
||||
|
||||
getEventTriggerDef
|
||||
|
@ -6,10 +6,10 @@ import Hasura.Prelude
|
||||
import Hasura.RQL.Instances ()
|
||||
import Hasura.RQL.Types.Error
|
||||
import Language.Haskell.TH.Syntax (Lift)
|
||||
import System.Environment (lookupEnv)
|
||||
|
||||
import qualified Data.CaseInsensitive as CI
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Network.HTTP.Types as HTTP
|
||||
|
||||
|
||||
@ -46,15 +46,15 @@ instance ToJSON HeaderConf where
|
||||
|
||||
-- | Resolve configuration headers
|
||||
makeHeadersFromConf
|
||||
:: (MonadError QErr m, MonadIO m) => [HeaderConf] -> m [HTTP.Header]
|
||||
makeHeadersFromConf = mapM getHeader
|
||||
:: MonadError QErr m => Env.Environment -> [HeaderConf] -> m [HTTP.Header]
|
||||
makeHeadersFromConf env = mapM getHeader
|
||||
where
|
||||
getHeader hconf =
|
||||
((CI.mk . txtToBs) *** txtToBs) <$>
|
||||
case hconf of
|
||||
(HeaderConf name (HVValue val)) -> return (name, val)
|
||||
(HeaderConf name (HVEnv val)) -> do
|
||||
mEnv <- liftIO $ lookupEnv (T.unpack val)
|
||||
let mEnv = Env.lookupEnv env (T.unpack val)
|
||||
case mEnv of
|
||||
Nothing -> throw400 NotFound $ "environment variable '" <> val <> "' not set"
|
||||
Just envval -> pure (name, T.pack envval)
|
||||
|
@ -6,7 +6,6 @@ where
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import Data.List.Extended (duplicates)
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Vector as V
|
||||
import qualified Language.GraphQL.Draft.Parser as G
|
||||
@ -15,6 +14,8 @@ import qualified Language.Haskell.TH.Syntax as TH
|
||||
import qualified Network.URI as N
|
||||
import qualified System.Cron.Parser as Cr
|
||||
|
||||
|
||||
import Data.List.Extended (duplicates)
|
||||
import Data.Scientific
|
||||
import System.Cron.Types
|
||||
import Test.QuickCheck
|
||||
|
@ -28,6 +28,8 @@ import Hasura.RQL.Types
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.SQL.Types
|
||||
|
||||
import qualified Data.Environment as Env
|
||||
|
||||
runAddRemoteSchema
|
||||
:: ( HasVersion
|
||||
, QErrM m
|
||||
@ -37,10 +39,12 @@ runAddRemoteSchema
|
||||
, MonadUnique m
|
||||
, HasHttpManager m
|
||||
)
|
||||
=> AddRemoteSchemaQuery -> m EncJSON
|
||||
runAddRemoteSchema q = do
|
||||
=> Env.Environment
|
||||
-> AddRemoteSchemaQuery
|
||||
-> m EncJSON
|
||||
runAddRemoteSchema env q = do
|
||||
addRemoteSchemaP1 name
|
||||
addRemoteSchemaP2 q
|
||||
addRemoteSchemaP2 env q
|
||||
buildSchemaCacheFor $ MORemoteSchema name
|
||||
pure successMsg
|
||||
where
|
||||
@ -57,16 +61,17 @@ addRemoteSchemaP1 name = do
|
||||
|
||||
addRemoteSchemaP2Setup
|
||||
:: (HasVersion, QErrM m, MonadIO m, MonadUnique m, HasHttpManager m)
|
||||
=> AddRemoteSchemaQuery -> m RemoteSchemaCtx
|
||||
addRemoteSchemaP2Setup (AddRemoteSchemaQuery name def _) = do
|
||||
=> Env.Environment
|
||||
-> AddRemoteSchemaQuery -> m RemoteSchemaCtx
|
||||
addRemoteSchemaP2Setup env (AddRemoteSchemaQuery name def _) = do
|
||||
httpMgr <- askHttpManager
|
||||
rsi <- validateRemoteSchemaDef def
|
||||
fetchRemoteSchema httpMgr name rsi
|
||||
rsi <- validateRemoteSchemaDef env def
|
||||
fetchRemoteSchema env httpMgr name rsi
|
||||
|
||||
addRemoteSchemaP2
|
||||
:: (HasVersion, MonadTx m, MonadIO m, MonadUnique m, HasHttpManager m) => AddRemoteSchemaQuery -> m ()
|
||||
addRemoteSchemaP2 q = do
|
||||
void $ addRemoteSchemaP2Setup q
|
||||
:: (HasVersion, MonadTx m, MonadIO m, MonadUnique m, HasHttpManager m) => Env.Environment -> AddRemoteSchemaQuery -> m ()
|
||||
addRemoteSchemaP2 env q = do
|
||||
void $ addRemoteSchemaP2Setup env q
|
||||
liftTx $ addRemoteSchemaToCatalog q
|
||||
|
||||
runRemoveRemoteSchema
|
||||
|
@ -18,6 +18,7 @@ import Hasura.Eventing.ScheduledTrigger
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Data.Time.Clock as C
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.Environment as Env
|
||||
|
||||
-- | runCreateCronTrigger will update a existing cron trigger when the 'replace'
|
||||
-- value is set to @true@ and when replace is @false@ a new cron trigger will
|
||||
@ -61,11 +62,13 @@ addCronTriggerToCatalog CronTriggerMetadata {..} = liftTx $ do
|
||||
insertCronEvents $ map (CronEventSeed ctName) scheduleTimes
|
||||
|
||||
resolveCronTrigger
|
||||
:: (QErrM m, MonadIO m)
|
||||
=> CatalogCronTrigger -> m CronTriggerInfo
|
||||
resolveCronTrigger CatalogCronTrigger {..} = do
|
||||
webhookInfo <- resolveWebhook _cctWebhookConf
|
||||
headerInfo <- getHeaderInfosFromConf headers
|
||||
:: (QErrM m)
|
||||
=> Env.Environment
|
||||
-> CatalogCronTrigger
|
||||
-> m CronTriggerInfo
|
||||
resolveCronTrigger env CatalogCronTrigger {..} = do
|
||||
webhookInfo <- resolveWebhook env _cctWebhookConf
|
||||
headerInfo <- getHeaderInfosFromConf env headers
|
||||
pure $
|
||||
CronTriggerInfo _cctName
|
||||
_cctCronSchedule
|
||||
|
@ -23,6 +23,7 @@ import Hasura.Prelude
|
||||
import qualified Data.HashMap.Strict.Extended as M
|
||||
import qualified Data.HashSet as HS
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Database.PG.Query as Q
|
||||
|
||||
import Control.Arrow.Extended
|
||||
@ -59,11 +60,12 @@ import Hasura.SQL.Types
|
||||
|
||||
buildRebuildableSchemaCache
|
||||
:: (HasVersion, MonadIO m, MonadUnique m, MonadTx m, HasHttpManager m, HasSQLGenCtx m)
|
||||
=> m (RebuildableSchemaCache m)
|
||||
buildRebuildableSchemaCache = do
|
||||
=> Env.Environment
|
||||
-> m (RebuildableSchemaCache m)
|
||||
buildRebuildableSchemaCache env = do
|
||||
catalogMetadata <- liftTx fetchCatalogData
|
||||
result <- flip runReaderT CatalogSync $
|
||||
Inc.build buildSchemaCacheRule (catalogMetadata, initialInvalidationKeys)
|
||||
Inc.build (buildSchemaCacheRule env) (catalogMetadata, initialInvalidationKeys)
|
||||
pure $ RebuildableSchemaCache (Inc.result result) initialInvalidationKeys (Inc.rebuildRule result)
|
||||
|
||||
newtype CacheRWT m a
|
||||
@ -113,8 +115,9 @@ buildSchemaCacheRule
|
||||
:: ( HasVersion, ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
||||
, MonadIO m, MonadUnique m, MonadTx m
|
||||
, MonadReader BuildReason m, HasHttpManager m, HasSQLGenCtx m )
|
||||
=> (CatalogMetadata, InvalidationKeys) `arr` SchemaCache
|
||||
buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||
=> Env.Environment
|
||||
-> (CatalogMetadata, InvalidationKeys) `arr` SchemaCache
|
||||
buildSchemaCacheRule env = proc (catalogMetadata, invalidationKeys) -> do
|
||||
invalidationKeysDep <- Inc.newDependency -< invalidationKeys
|
||||
|
||||
-- Step 1: Process metadata and collect dependency information.
|
||||
@ -318,7 +321,7 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||
|
||||
buildTableEventTriggers
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Inc.ArrowCache m arr, MonadIO m, MonadTx m, MonadReader BuildReason m, HasSQLGenCtx m )
|
||||
, Inc.ArrowCache m arr, MonadTx m, MonadReader BuildReason m, HasSQLGenCtx m )
|
||||
=> (TableCoreInfo, [CatalogEventTrigger]) `arr` EventTriggerInfoMap
|
||||
buildTableEventTriggers = buildInfoMap _cetName mkEventTriggerMetadataObject buildEventTrigger
|
||||
where
|
||||
@ -330,7 +333,7 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||
(| withRecordInconsistency (
|
||||
(| modifyErrA (do
|
||||
etc <- bindErrorA -< decodeValue configuration
|
||||
(info, dependencies) <- bindErrorA -< subTableP2Setup qt etc
|
||||
(info, dependencies) <- bindErrorA -< subTableP2Setup env qt etc
|
||||
let tableColumns = M.mapMaybe (^? _FIColumn) (_tciFieldInfoMap tableInfo)
|
||||
recreateViewIfNeeded -< (qt, tableColumns, trn, etcDefinition etc)
|
||||
recordDependencies -< (metadataObject, schemaObjectId, dependencies)
|
||||
@ -345,6 +348,45 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||
liftTx $ delTriggerQ triggerName -- executes DROP IF EXISTS.. sql
|
||||
mkAllTriggersQ triggerName tableName (M.elems tableColumns) triggerDefinition
|
||||
|
||||
buildCronTriggers
|
||||
:: ( ArrowChoice arr
|
||||
, Inc.ArrowDistribute arr
|
||||
, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Inc.ArrowCache m arr
|
||||
, MonadTx m)
|
||||
=> ((),[CatalogCronTrigger])
|
||||
`arr` HashMap TriggerName CronTriggerInfo
|
||||
buildCronTriggers = buildInfoMap _cctName mkCronTriggerMetadataObject buildCronTrigger
|
||||
where
|
||||
buildCronTrigger = proc (_,cronTrigger) -> do
|
||||
let triggerName = triggerNameToTxt $ _cctName cronTrigger
|
||||
addCronTriggerContext e = "in cron trigger " <> triggerName <> ": " <> e
|
||||
(| withRecordInconsistency (
|
||||
(| modifyErrA (bindErrorA -< resolveCronTrigger env cronTrigger)
|
||||
|) addCronTriggerContext)
|
||||
|) (mkCronTriggerMetadataObject cronTrigger)
|
||||
|
||||
buildActions
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
||||
, ArrowWriter (Seq CollectedInfo) arr)
|
||||
=> ( (AnnotatedCustomTypes, HashSet PGScalarType)
|
||||
, [ActionMetadata]
|
||||
) `arr` HashMap ActionName ActionInfo
|
||||
buildActions = buildInfoMap _amName mkActionMetadataObject buildAction
|
||||
where
|
||||
buildAction = proc ((resolvedCustomTypes, pgScalars), action) -> do
|
||||
let ActionMetadata name comment def actionPermissions = action
|
||||
addActionContext e = "in action " <> name <<> "; " <> e
|
||||
(| withRecordInconsistency (
|
||||
(| modifyErrA (do
|
||||
(resolvedDef, outObject) <- liftEitherA <<< bindA -<
|
||||
runExceptT $ resolveAction env resolvedCustomTypes def pgScalars
|
||||
let permissionInfos = map (ActionPermissionInfo . _apmRole) actionPermissions
|
||||
permissionMap = mapFromL _apiRole permissionInfos
|
||||
returnA -< ActionInfo name outObject resolvedDef permissionMap comment)
|
||||
|) addActionContext)
|
||||
|) (mkActionMetadataObject action)
|
||||
|
||||
buildRemoteSchemas
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Inc.ArrowCache m arr , MonadIO m, MonadUnique m, HasHttpManager m )
|
||||
@ -359,48 +401,9 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||
buildRemoteSchema = Inc.cache proc (invalidationKeys, remoteSchema) -> do
|
||||
Inc.dependOn -< Inc.selectKeyD (_arsqName remoteSchema) invalidationKeys
|
||||
(| withRecordInconsistency (liftEitherA <<< bindA -<
|
||||
runExceptT $ addRemoteSchemaP2Setup remoteSchema)
|
||||
runExceptT $ addRemoteSchemaP2Setup env remoteSchema)
|
||||
|) (mkRemoteSchemaMetadataObject remoteSchema)
|
||||
|
||||
buildActions
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
||||
, ArrowWriter (Seq CollectedInfo) arr, MonadIO m )
|
||||
=> ( (AnnotatedCustomTypes, HashSet PGScalarType)
|
||||
, [ActionMetadata]
|
||||
) `arr` HashMap ActionName ActionInfo
|
||||
buildActions = buildInfoMap _amName mkActionMetadataObject buildAction
|
||||
where
|
||||
buildAction = proc ((resolvedCustomTypes, pgScalars), action) -> do
|
||||
let ActionMetadata name comment def actionPermissions = action
|
||||
addActionContext e = "in action " <> name <<> "; " <> e
|
||||
(| withRecordInconsistency (
|
||||
(| modifyErrA (do
|
||||
(resolvedDef, outObject) <- liftEitherA <<< bindA -<
|
||||
runExceptT $ resolveAction resolvedCustomTypes def pgScalars
|
||||
let permissionInfos = map (ActionPermissionInfo . _apmRole) actionPermissions
|
||||
permissionMap = mapFromL _apiRole permissionInfos
|
||||
returnA -< ActionInfo name outObject resolvedDef permissionMap comment)
|
||||
|) addActionContext)
|
||||
|) (mkActionMetadataObject action)
|
||||
|
||||
buildCronTriggers
|
||||
:: ( ArrowChoice arr
|
||||
, Inc.ArrowDistribute arr
|
||||
, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Inc.ArrowCache m arr
|
||||
, MonadIO m
|
||||
, MonadTx m)
|
||||
=> ((),[CatalogCronTrigger])
|
||||
`arr` HashMap TriggerName CronTriggerInfo
|
||||
buildCronTriggers = buildInfoMap _cctName mkCronTriggerMetadataObject buildCronTrigger
|
||||
where
|
||||
buildCronTrigger = proc (_,cronTrigger) -> do
|
||||
let triggerName = triggerNameToTxt $ _cctName cronTrigger
|
||||
addCronTriggerContext e = "in cron trigger " <> triggerName <> ": " <> e
|
||||
(| withRecordInconsistency (
|
||||
(| modifyErrA (bindErrorA -< resolveCronTrigger cronTrigger)
|
||||
|) addCronTriggerContext)
|
||||
|) (mkCronTriggerMetadataObject cronTrigger)
|
||||
|
||||
|
||||
-- | @'withMetadataCheck' cascade action@ runs @action@ and checks if the schema changed as a
|
||||
|
@ -12,6 +12,8 @@ import Data.Aeson
|
||||
import Instances.TH.Lift ()
|
||||
|
||||
import qualified Data.Sequence as DS
|
||||
import qualified Data.Environment as Env
|
||||
|
||||
|
||||
import Hasura.EncJSON
|
||||
import Hasura.Prelude
|
||||
@ -105,13 +107,18 @@ validateDeleteQ =
|
||||
runDMLP1T . validateDeleteQWith sessVarFromCurrentSetting binRHSBuilder
|
||||
|
||||
execDeleteQuery
|
||||
:: (HasVersion, MonadTx m, MonadIO m)
|
||||
=> Bool
|
||||
::
|
||||
( HasVersion
|
||||
, MonadTx m
|
||||
, MonadIO m
|
||||
)
|
||||
=> Env.Environment
|
||||
-> Bool
|
||||
-> Maybe MutationRemoteJoinCtx
|
||||
-> (AnnDel, DS.Seq Q.PrepArg)
|
||||
-> m EncJSON
|
||||
execDeleteQuery strfyNum remoteJoinCtx (u, p) =
|
||||
runMutation $ mkMutation remoteJoinCtx (dqp1Table u) (deleteCTE, p)
|
||||
execDeleteQuery env strfyNum remoteJoinCtx (u, p) =
|
||||
runMutation env $ mkMutation remoteJoinCtx (dqp1Table u) (deleteCTE, p)
|
||||
(dqp1Output u) (dqp1AllCols u) strfyNum
|
||||
where
|
||||
deleteCTE = mkDeleteCTE u
|
||||
@ -120,7 +127,9 @@ runDelete
|
||||
:: ( HasVersion, QErrM m, UserInfoM m, CacheRM m
|
||||
, MonadTx m, HasSQLGenCtx m, MonadIO m
|
||||
)
|
||||
=> DeleteQuery -> m EncJSON
|
||||
runDelete q = do
|
||||
=> Env.Environment
|
||||
-> DeleteQuery
|
||||
-> m EncJSON
|
||||
runDelete env q = do
|
||||
strfyNum <- stringifyNum <$> askSQLGenCtx
|
||||
validateDeleteQ q >>= execDeleteQuery strfyNum Nothing
|
||||
validateDeleteQ q >>= execDeleteQuery env strfyNum Nothing
|
||||
|
@ -30,6 +30,7 @@ import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
|
||||
import qualified Data.Environment as Env
|
||||
|
||||
mkInsertCTE :: InsertQueryP1 -> S.CTE
|
||||
mkInsertCTE (InsertQueryP1 tn cols vals conflict (insCheck, updCheck) _ _) =
|
||||
@ -240,13 +241,17 @@ convInsQ =
|
||||
binRHSBuilder
|
||||
|
||||
execInsertQuery
|
||||
:: (HasVersion, MonadTx m, MonadIO m)
|
||||
=> Bool
|
||||
:: ( HasVersion
|
||||
, MonadTx m
|
||||
, MonadIO m
|
||||
)
|
||||
=> Env.Environment
|
||||
-> Bool
|
||||
-> Maybe MutationRemoteJoinCtx
|
||||
-> (InsertQueryP1, DS.Seq Q.PrepArg)
|
||||
-> m EncJSON
|
||||
execInsertQuery strfyNum remoteJoinCtx (u, p) =
|
||||
runMutation
|
||||
execInsertQuery env strfyNum remoteJoinCtx (u, p) =
|
||||
runMutation env
|
||||
$ mkMutation remoteJoinCtx (iqp1Table u) (insertCTE, p)
|
||||
(iqp1Output u) (iqp1AllCols u) strfyNum
|
||||
where
|
||||
@ -329,8 +334,8 @@ runInsert
|
||||
:: ( HasVersion, QErrM m, UserInfoM m
|
||||
, CacheRM m, MonadTx m, HasSQLGenCtx m, MonadIO m
|
||||
)
|
||||
=> InsertQuery -> m EncJSON
|
||||
runInsert q = do
|
||||
=> Env.Environment -> InsertQuery -> m EncJSON
|
||||
runInsert env q = do
|
||||
res <- convInsQ q
|
||||
strfyNum <- stringifyNum <$> askSQLGenCtx
|
||||
execInsertQuery strfyNum Nothing res
|
||||
execInsertQuery env strfyNum Nothing res
|
||||
|
@ -11,11 +11,12 @@ where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.Sequence as DS
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Types as N
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.Sequence as DS
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Types as N
|
||||
|
||||
import qualified Hasura.SQL.DML as S
|
||||
|
||||
@ -58,17 +59,29 @@ mkMutation ctx table query output' allCols strfyNum =
|
||||
in Mutation table query output allCols remoteJoinsCtx strfyNum
|
||||
|
||||
runMutation
|
||||
:: (HasVersion, MonadTx m, MonadIO m)
|
||||
=> Mutation -> m EncJSON
|
||||
runMutation mut =
|
||||
bool (mutateAndReturn mut) (mutateAndSel mut) $
|
||||
::
|
||||
( HasVersion
|
||||
, MonadTx m
|
||||
, MonadIO m
|
||||
)
|
||||
=> Env.Environment
|
||||
-> Mutation
|
||||
-> m EncJSON
|
||||
runMutation env mut =
|
||||
bool (mutateAndReturn env mut) (mutateAndSel env mut) $
|
||||
hasNestedFld $ _mOutput mut
|
||||
|
||||
mutateAndReturn
|
||||
:: (HasVersion, MonadTx m, MonadIO m)
|
||||
=> Mutation -> m EncJSON
|
||||
mutateAndReturn (Mutation qt (cte, p) mutationOutput allCols remoteJoins strfyNum) =
|
||||
executeMutationOutputQuery sqlQuery (toList p) remoteJoins
|
||||
::
|
||||
( HasVersion
|
||||
, MonadTx m
|
||||
, MonadIO m
|
||||
)
|
||||
=> Env.Environment
|
||||
-> Mutation
|
||||
-> m EncJSON
|
||||
mutateAndReturn env (Mutation qt (cte, p) mutationOutput allCols remoteJoins strfyNum) =
|
||||
executeMutationOutputQuery env sqlQuery (toList p) remoteJoins
|
||||
where
|
||||
sqlQuery = Q.fromBuilder $ toSQL $
|
||||
mkMutationOutputExp qt allCols Nothing cte mutationOutput strfyNum
|
||||
@ -88,29 +101,40 @@ conditions **might** see some degradation.
|
||||
-}
|
||||
|
||||
mutateAndSel
|
||||
:: (HasVersion, MonadTx m, MonadIO m)
|
||||
=> Mutation -> m EncJSON
|
||||
mutateAndSel (Mutation qt q mutationOutput allCols remoteJoins strfyNum) = do
|
||||
::
|
||||
( HasVersion
|
||||
, MonadTx m
|
||||
, MonadIO m
|
||||
)
|
||||
=> Env.Environment
|
||||
-> Mutation
|
||||
-> m EncJSON
|
||||
mutateAndSel env (Mutation qt q mutationOutput allCols remoteJoins strfyNum) = do
|
||||
-- Perform mutation and fetch unique columns
|
||||
MutateResp _ columnVals <- liftTx $ mutateAndFetchCols qt allCols q strfyNum
|
||||
selCTE <- mkSelCTEFromColVals qt allCols columnVals
|
||||
let selWith = mkMutationOutputExp qt allCols Nothing selCTE mutationOutput strfyNum
|
||||
-- Perform select query and fetch returning fields
|
||||
executeMutationOutputQuery (Q.fromBuilder $ toSQL selWith) [] remoteJoins
|
||||
executeMutationOutputQuery env (Q.fromBuilder $ toSQL selWith) [] remoteJoins
|
||||
|
||||
executeMutationOutputQuery
|
||||
:: (HasVersion, MonadTx m, MonadIO m)
|
||||
=> Q.Query -- ^ SQL query
|
||||
::
|
||||
( HasVersion
|
||||
, MonadTx m
|
||||
, MonadIO m
|
||||
)
|
||||
=> Env.Environment
|
||||
-> Q.Query -- ^ SQL query
|
||||
-> [Q.PrepArg] -- ^ Prepared params
|
||||
-> Maybe (RemoteJoins, MutationRemoteJoinCtx) -- ^ Remote joins context
|
||||
-> m EncJSON
|
||||
executeMutationOutputQuery query prepArgs = \case
|
||||
executeMutationOutputQuery env query prepArgs = \case
|
||||
Nothing ->
|
||||
runIdentity . Q.getRow
|
||||
-- See Note [Prepared statements in Mutations]
|
||||
<$> liftTx (Q.rawQE dmlTxErrorHandler query prepArgs False)
|
||||
Just (remoteJoins, (httpManager, reqHeaders, userInfo)) ->
|
||||
executeQueryWithRemoteJoins httpManager reqHeaders userInfo query prepArgs remoteJoins
|
||||
executeQueryWithRemoteJoins env httpManager reqHeaders userInfo query prepArgs remoteJoins
|
||||
|
||||
mutateAndFetchCols
|
||||
:: QualifiedTable
|
||||
|
@ -30,6 +30,7 @@ import qualified Hasura.SQL.DML as S
|
||||
|
||||
import qualified Data.Aeson as A
|
||||
import qualified Data.Aeson.Ordered as AO
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashMap.Strict.Extended as Map
|
||||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||||
@ -44,15 +45,19 @@ import qualified Network.HTTP.Types as N
|
||||
|
||||
-- | Executes given query and fetch response JSON from Postgres. Substitutes remote relationship fields.
|
||||
executeQueryWithRemoteJoins
|
||||
:: (HasVersion, MonadTx m, MonadIO m)
|
||||
=> HTTP.Manager
|
||||
:: ( HasVersion
|
||||
, MonadTx m
|
||||
, MonadIO m
|
||||
)
|
||||
=> Env.Environment
|
||||
-> HTTP.Manager
|
||||
-> [N.Header]
|
||||
-> UserInfo
|
||||
-> Q.Query
|
||||
-> [Q.PrepArg]
|
||||
-> RemoteJoins
|
||||
-> m EncJSON
|
||||
executeQueryWithRemoteJoins manager reqHdrs userInfo q prepArgs rjs = do
|
||||
executeQueryWithRemoteJoins env manager reqHdrs userInfo q prepArgs rjs = do
|
||||
-- Step 1: Perform the query on database and fetch the response
|
||||
pgRes <- runIdentity . Q.getRow <$> liftTx (Q.rawQE dmlTxErrorHandler q prepArgs True)
|
||||
jsonRes <- either (throw500 . T.pack) pure $ AO.eitherDecode pgRes
|
||||
@ -60,7 +65,7 @@ executeQueryWithRemoteJoins manager reqHdrs userInfo q prepArgs rjs = do
|
||||
compositeJson <- traverseQueryResponseJSON rjMap jsonRes
|
||||
let remoteJoins = collectRemoteFields compositeJson
|
||||
-- Step 3: Make queries to remote server and fetch graphql response
|
||||
remoteServerResp <- fetchRemoteJoinFields manager reqHdrs userInfo remoteJoins
|
||||
remoteServerResp <- fetchRemoteJoinFields env manager reqHdrs userInfo remoteJoins
|
||||
-- Step 4: Replace remote fields in composite json with remote join values
|
||||
AO.toEncJSON <$> replaceRemoteFields compositeJson remoteServerResp
|
||||
where
|
||||
@ -406,19 +411,20 @@ fetchRemoteJoinFields
|
||||
, MonadError QErr m
|
||||
, MonadIO m
|
||||
)
|
||||
=> HTTP.Manager
|
||||
=> Env.Environment
|
||||
-> HTTP.Manager
|
||||
-> [N.Header]
|
||||
-> UserInfo
|
||||
-> [RemoteJoinField]
|
||||
-> m AO.Object
|
||||
fetchRemoteJoinFields manager reqHdrs userInfo remoteJoins = do
|
||||
fetchRemoteJoinFields env manager reqHdrs userInfo remoteJoins = do
|
||||
results <- forM (Map.toList remoteSchemaBatch) $ \(rsi, batch) -> do
|
||||
let batchList = toList batch
|
||||
gqlReq = fieldsToRequest G.OperationTypeQuery
|
||||
(map _rjfField batchList)
|
||||
gqlReqUnparsed = (GQLQueryText . G.renderExecutableDoc . G.ExecutableDocument . unGQLExecDoc) <$> gqlReq
|
||||
-- NOTE: discard remote headers (for now):
|
||||
(_, _, respBody) <- execRemoteGQ' manager userInfo reqHdrs gqlReqUnparsed rsi G.OperationTypeQuery
|
||||
(_, _, respBody) <- execRemoteGQ' env manager userInfo reqHdrs gqlReqUnparsed rsi G.OperationTypeQuery
|
||||
case AO.eitherDecode respBody of
|
||||
Left e -> throw500 $ "Remote server response is not valid JSON: " <> T.pack e
|
||||
Right r -> do
|
||||
|
@ -6,6 +6,7 @@ module Hasura.RQL.DML.Select.Internal
|
||||
)
|
||||
where
|
||||
|
||||
import Instances.TH.Lift ()
|
||||
import Control.Lens hiding (op)
|
||||
import Control.Monad.Writer.Strict
|
||||
|
||||
|
@ -28,6 +28,7 @@ import Hasura.SQL.Types
|
||||
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Hasura.SQL.DML as S
|
||||
import qualified Data.Environment as Env
|
||||
|
||||
|
||||
-- NOTE: This function can be improved, because we use
|
||||
@ -76,7 +77,6 @@ mkUpdateCTE (AnnUpd tn opExps (permFltr, wc) chk _ columnsInfo) =
|
||||
tableFltrExpr = toSQLBoolExp (S.QualTable tn) $ andAnnBoolExps permFltr wc
|
||||
checkExpr = toSQLBoolExp (S.QualTable tn) chk
|
||||
|
||||
|
||||
expandOperator :: [PGColumnInfo] -> (PGCol, UpdOpExpG S.SQLExp) -> S.SetExpItem
|
||||
expandOperator infos (column, op) = S.SetExpItem $ (column,) $ case op of
|
||||
UpdSet e -> e
|
||||
@ -97,18 +97,6 @@ expandOperator infos (column, op) = S.SetExpItem $ (column,) $ case op of
|
||||
Just (PGColumnScalar s) -> S.mkTypeAnn $ PGTypeScalar s
|
||||
_ -> S.numericTypeAnn
|
||||
|
||||
execUpdateQuery
|
||||
:: (HasVersion, MonadTx m, MonadIO m)
|
||||
=> Bool
|
||||
-> Maybe MutationRemoteJoinCtx
|
||||
-> (AnnUpd, DS.Seq Q.PrepArg)
|
||||
-> m EncJSON
|
||||
execUpdateQuery strfyNum remoteJoinCtx (u, p) =
|
||||
runMutation $ mkMutation remoteJoinCtx (uqp1Table u) (updateCTE, p)
|
||||
(uqp1Output u) (uqp1AllCols u) strfyNum
|
||||
where
|
||||
updateCTE = mkUpdateCTE u
|
||||
|
||||
convInc
|
||||
:: (QErrM m)
|
||||
=> (PGColumnType -> Value -> m S.SQLExp)
|
||||
@ -259,11 +247,28 @@ validateUpdateQuery
|
||||
validateUpdateQuery =
|
||||
runDMLP1T . validateUpdateQueryWith sessVarFromCurrentSetting binRHSBuilder
|
||||
|
||||
execUpdateQuery
|
||||
::
|
||||
( HasVersion
|
||||
, MonadTx m
|
||||
, MonadIO m
|
||||
)
|
||||
=> Env.Environment
|
||||
-> Bool
|
||||
-> Maybe MutationRemoteJoinCtx
|
||||
-> (AnnUpd, DS.Seq Q.PrepArg)
|
||||
-> m EncJSON
|
||||
execUpdateQuery env strfyNum remoteJoinCtx (u, p) =
|
||||
runMutation env $ mkMutation remoteJoinCtx (uqp1Table u) (updateCTE, p)
|
||||
(uqp1Output u) (uqp1AllCols u) strfyNum
|
||||
where
|
||||
updateCTE = mkUpdateCTE u
|
||||
|
||||
runUpdate
|
||||
:: ( HasVersion, QErrM m, UserInfoM m, CacheRM m
|
||||
, MonadTx m, HasSQLGenCtx m, MonadIO m
|
||||
)
|
||||
=> UpdateQuery -> m EncJSON
|
||||
runUpdate q = do
|
||||
=> Env.Environment -> UpdateQuery -> m EncJSON
|
||||
runUpdate env q = do
|
||||
strfyNum <- stringifyNum <$> askSQLGenCtx
|
||||
validateUpdateQuery q >>= execUpdateQuery strfyNum Nothing
|
||||
validateUpdateQuery q >>= execUpdateQuery env strfyNum Nothing
|
||||
|
@ -44,7 +44,6 @@ import Hasura.Prelude
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
|
||||
|
||||
import Hasura.Db as R
|
||||
import Hasura.RQL.Types.Action as R
|
||||
import Hasura.RQL.Types.BoolExp as R
|
||||
|
@ -63,6 +63,7 @@ import Language.Haskell.TH.Syntax (Lift, Q, TExp)
|
||||
|
||||
import qualified Data.HashMap.Strict as HM
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Language.Haskell.TH.Syntax as TH
|
||||
@ -304,8 +305,8 @@ instance FromJSON InputWebhook where
|
||||
Left e -> fail $ "Parsing URL template failed: " ++ e
|
||||
Right v -> pure $ InputWebhook v
|
||||
|
||||
resolveWebhook :: (QErrM m,MonadIO m) => InputWebhook -> m ResolvedWebhook
|
||||
resolveWebhook (InputWebhook urlTemplate) = do
|
||||
eitherRenderedTemplate <- renderURLTemplate urlTemplate
|
||||
resolveWebhook :: QErrM m => Env.Environment -> InputWebhook -> m ResolvedWebhook
|
||||
resolveWebhook env (InputWebhook urlTemplate) = do
|
||||
let eitherRenderedTemplate = renderURLTemplate env urlTemplate
|
||||
either (throw400 Unexpected . T.pack)
|
||||
(pure . ResolvedWebhook) eitherRenderedTemplate
|
||||
|
@ -3,7 +3,6 @@ module Hasura.RQL.Types.RemoteSchema where
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.Types.Common (NonEmptyText)
|
||||
import Language.Haskell.TH.Syntax (Lift)
|
||||
import System.Environment (lookupEnv)
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Aeson.Casing as J
|
||||
@ -11,6 +10,7 @@ import qualified Data.Aeson.TH as J
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Network.URI.Extended as N
|
||||
import qualified Data.Environment as Env
|
||||
|
||||
import Hasura.Incremental (Cacheable)
|
||||
import Hasura.RQL.DDL.Headers (HeaderConf (..))
|
||||
@ -77,27 +77,26 @@ newtype RemoteSchemaNameQuery
|
||||
|
||||
$(J.deriveJSON (J.aesonDrop 5 J.snakeCase) ''RemoteSchemaNameQuery)
|
||||
|
||||
getUrlFromEnv :: (MonadIO m, MonadError QErr m) => Text -> m N.URI
|
||||
getUrlFromEnv urlFromEnv = do
|
||||
mEnv <- liftIO . lookupEnv $ T.unpack urlFromEnv
|
||||
env <- maybe (throw400 InvalidParams $ envNotFoundMsg urlFromEnv) return
|
||||
mEnv
|
||||
maybe (throw400 InvalidParams $ invalidUri env) return $ N.parseURI env
|
||||
getUrlFromEnv :: (MonadIO m, MonadError QErr m) => Env.Environment -> Text -> m N.URI
|
||||
getUrlFromEnv env urlFromEnv = do
|
||||
let mEnv = Env.lookupEnv env $ T.unpack urlFromEnv
|
||||
uri <- maybe (throw400 InvalidParams $ envNotFoundMsg urlFromEnv) return mEnv
|
||||
maybe (throw400 InvalidParams $ invalidUri uri) return $ N.parseURI uri
|
||||
where
|
||||
invalidUri uri = "not a valid URI: " <> T.pack uri
|
||||
envNotFoundMsg e =
|
||||
"environment variable '" <> e <> "' not set"
|
||||
invalidUri x = "not a valid URI: " <> T.pack x
|
||||
envNotFoundMsg e = "environment variable '" <> e <> "' not set"
|
||||
|
||||
validateRemoteSchemaDef
|
||||
:: (MonadError QErr m, MonadIO m)
|
||||
=> RemoteSchemaDef
|
||||
=> Env.Environment
|
||||
-> RemoteSchemaDef
|
||||
-> m RemoteSchemaInfo
|
||||
validateRemoteSchemaDef (RemoteSchemaDef mUrl mUrlEnv hdrC fwdHdrs mTimeout) =
|
||||
validateRemoteSchemaDef env (RemoteSchemaDef mUrl mUrlEnv hdrC fwdHdrs mTimeout) =
|
||||
case (mUrl, mUrlEnv) of
|
||||
(Just url, Nothing) ->
|
||||
return $ RemoteSchemaInfo url hdrs fwdHdrs timeout
|
||||
(Nothing, Just urlEnv) -> do
|
||||
url <- getUrlFromEnv urlEnv
|
||||
url <- getUrlFromEnv env urlEnv
|
||||
return $ RemoteSchemaInfo url hdrs fwdHdrs timeout
|
||||
(Nothing, Nothing) ->
|
||||
throw400 InvalidParams "both `url` and `url_from_env` can't be empty"
|
||||
|
@ -139,7 +139,6 @@ import Hasura.RQL.Types.Table
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
|
||||
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
|
@ -9,6 +9,7 @@ import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import Data.Time (UTCTime)
|
||||
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as HM
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
@ -41,7 +42,6 @@ import Hasura.Server.Utils
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.Session
|
||||
|
||||
|
||||
data RQLQueryV1
|
||||
= RQAddExistingTableOrView !TrackTable
|
||||
| RQTrackTable !TrackTable
|
||||
@ -191,12 +191,12 @@ recordSchemaUpdate instanceId invalidations =
|
||||
|
||||
runQuery
|
||||
:: (HasVersion, MonadIO m, MonadError QErr m)
|
||||
=> PGExecCtx -> InstanceId
|
||||
=> Env.Environment -> PGExecCtx -> InstanceId
|
||||
-> UserInfo -> RebuildableSchemaCache Run -> HTTP.Manager
|
||||
-> SQLGenCtx -> SystemDefined -> RQLQuery -> m (EncJSON, RebuildableSchemaCache Run)
|
||||
runQuery pgExecCtx instanceId userInfo sc hMgr sqlGenCtx systemDefined query = do
|
||||
runQuery env pgExecCtx instanceId userInfo sc hMgr sqlGenCtx systemDefined query = do
|
||||
accessMode <- getQueryAccessMode query
|
||||
resE <- runQueryM query
|
||||
resE <- runQueryM env query
|
||||
& runHasSystemDefinedT systemDefined
|
||||
& runCacheRWT sc
|
||||
& peelRun runCtx pgExecCtx accessMode
|
||||
@ -221,85 +221,85 @@ runQuery pgExecCtx instanceId userInfo sc hMgr sqlGenCtx systemDefined query = d
|
||||
-- by hand.
|
||||
queryModifiesSchemaCache :: RQLQuery -> Bool
|
||||
queryModifiesSchemaCache (RQV1 qi) = case qi of
|
||||
RQAddExistingTableOrView _ -> True
|
||||
RQTrackTable _ -> True
|
||||
RQUntrackTable _ -> True
|
||||
RQTrackFunction _ -> True
|
||||
RQUntrackFunction _ -> True
|
||||
RQSetTableIsEnum _ -> True
|
||||
RQAddExistingTableOrView _ -> True
|
||||
RQTrackTable _ -> True
|
||||
RQUntrackTable _ -> True
|
||||
RQTrackFunction _ -> True
|
||||
RQUntrackFunction _ -> True
|
||||
RQSetTableIsEnum _ -> True
|
||||
|
||||
RQCreateObjectRelationship _ -> True
|
||||
RQCreateArrayRelationship _ -> True
|
||||
RQDropRelationship _ -> True
|
||||
RQSetRelationshipComment _ -> False
|
||||
RQRenameRelationship _ -> True
|
||||
RQCreateObjectRelationship _ -> True
|
||||
RQCreateArrayRelationship _ -> True
|
||||
RQDropRelationship _ -> True
|
||||
RQSetRelationshipComment _ -> False
|
||||
RQRenameRelationship _ -> True
|
||||
|
||||
RQAddComputedField _ -> True
|
||||
RQDropComputedField _ -> True
|
||||
RQAddComputedField _ -> True
|
||||
RQDropComputedField _ -> True
|
||||
|
||||
RQCreateRemoteRelationship _ -> True
|
||||
RQUpdateRemoteRelationship _ -> True
|
||||
RQDeleteRemoteRelationship _ -> True
|
||||
RQCreateRemoteRelationship _ -> True
|
||||
RQUpdateRemoteRelationship _ -> True
|
||||
RQDeleteRemoteRelationship _ -> True
|
||||
|
||||
RQCreateInsertPermission _ -> True
|
||||
RQCreateSelectPermission _ -> True
|
||||
RQCreateUpdatePermission _ -> True
|
||||
RQCreateDeletePermission _ -> True
|
||||
RQCreateInsertPermission _ -> True
|
||||
RQCreateSelectPermission _ -> True
|
||||
RQCreateUpdatePermission _ -> True
|
||||
RQCreateDeletePermission _ -> True
|
||||
|
||||
RQDropInsertPermission _ -> True
|
||||
RQDropSelectPermission _ -> True
|
||||
RQDropUpdatePermission _ -> True
|
||||
RQDropDeletePermission _ -> True
|
||||
RQSetPermissionComment _ -> False
|
||||
RQDropInsertPermission _ -> True
|
||||
RQDropSelectPermission _ -> True
|
||||
RQDropUpdatePermission _ -> True
|
||||
RQDropDeletePermission _ -> True
|
||||
RQSetPermissionComment _ -> False
|
||||
|
||||
RQGetInconsistentMetadata _ -> False
|
||||
RQDropInconsistentMetadata _ -> True
|
||||
RQGetInconsistentMetadata _ -> False
|
||||
RQDropInconsistentMetadata _ -> True
|
||||
|
||||
RQInsert _ -> False
|
||||
RQSelect _ -> False
|
||||
RQUpdate _ -> False
|
||||
RQDelete _ -> False
|
||||
RQCount _ -> False
|
||||
RQInsert _ -> False
|
||||
RQSelect _ -> False
|
||||
RQUpdate _ -> False
|
||||
RQDelete _ -> False
|
||||
RQCount _ -> False
|
||||
|
||||
RQAddRemoteSchema _ -> True
|
||||
RQRemoveRemoteSchema _ -> True
|
||||
RQReloadRemoteSchema _ -> True
|
||||
RQIntrospectRemoteSchema _ -> False
|
||||
RQAddRemoteSchema _ -> True
|
||||
RQRemoveRemoteSchema _ -> True
|
||||
RQReloadRemoteSchema _ -> True
|
||||
RQIntrospectRemoteSchema _ -> False
|
||||
|
||||
RQCreateEventTrigger _ -> True
|
||||
RQDeleteEventTrigger _ -> True
|
||||
RQRedeliverEvent _ -> False
|
||||
RQInvokeEventTrigger _ -> False
|
||||
RQCreateEventTrigger _ -> True
|
||||
RQDeleteEventTrigger _ -> True
|
||||
RQRedeliverEvent _ -> False
|
||||
RQInvokeEventTrigger _ -> False
|
||||
|
||||
RQCreateCronTrigger _ -> True
|
||||
RQDeleteCronTrigger _ -> True
|
||||
RQCreateCronTrigger _ -> True
|
||||
RQDeleteCronTrigger _ -> True
|
||||
|
||||
RQCreateScheduledEvent _ -> False
|
||||
RQCreateScheduledEvent _ -> False
|
||||
|
||||
RQCreateQueryCollection _ -> True
|
||||
RQDropQueryCollection _ -> True
|
||||
RQAddQueryToCollection _ -> True
|
||||
RQDropQueryFromCollection _ -> True
|
||||
RQAddCollectionToAllowlist _ -> True
|
||||
RQDropCollectionFromAllowlist _ -> True
|
||||
RQCreateQueryCollection _ -> True
|
||||
RQDropQueryCollection _ -> True
|
||||
RQAddQueryToCollection _ -> True
|
||||
RQDropQueryFromCollection _ -> True
|
||||
RQAddCollectionToAllowlist _ -> True
|
||||
RQDropCollectionFromAllowlist _ -> True
|
||||
|
||||
RQRunSql q -> isSchemaCacheBuildRequiredRunSQL q
|
||||
RQRunSql q -> isSchemaCacheBuildRequiredRunSQL q
|
||||
|
||||
RQReplaceMetadata _ -> True
|
||||
RQExportMetadata _ -> False
|
||||
RQClearMetadata _ -> True
|
||||
RQReloadMetadata _ -> True
|
||||
RQReplaceMetadata _ -> True
|
||||
RQExportMetadata _ -> False
|
||||
RQClearMetadata _ -> True
|
||||
RQReloadMetadata _ -> True
|
||||
|
||||
RQCreateAction _ -> True
|
||||
RQDropAction _ -> True
|
||||
RQUpdateAction _ -> True
|
||||
RQCreateActionPermission _ -> True
|
||||
RQDropActionPermission _ -> True
|
||||
RQCreateAction _ -> True
|
||||
RQDropAction _ -> True
|
||||
RQUpdateAction _ -> True
|
||||
RQCreateActionPermission _ -> True
|
||||
RQDropActionPermission _ -> True
|
||||
|
||||
RQDumpInternalState _ -> False
|
||||
RQSetCustomTypes _ -> True
|
||||
RQDumpInternalState _ -> False
|
||||
RQSetCustomTypes _ -> True
|
||||
|
||||
RQBulk qs -> any queryModifiesSchemaCache qs
|
||||
RQBulk qs -> any queryModifiesSchemaCache qs
|
||||
|
||||
queryModifiesSchemaCache (RQV2 qi) = case qi of
|
||||
RQV2TrackTable _ -> True
|
||||
@ -346,9 +346,10 @@ runQueryM
|
||||
, MonadIO m, MonadUnique m, HasHttpManager m, HasSQLGenCtx m
|
||||
, HasSystemDefined m
|
||||
)
|
||||
=> RQLQuery
|
||||
=> Env.Environment
|
||||
-> RQLQuery
|
||||
-> m EncJSON
|
||||
runQueryM rq = withPathK "args" $ case rq of
|
||||
runQueryM env rq = withPathK "args" $ case rq of
|
||||
RQV1 q -> runQueryV1M q
|
||||
RQV2 q -> runQueryV2M q
|
||||
where
|
||||
@ -384,13 +385,13 @@ runQueryM rq = withPathK "args" $ case rq of
|
||||
RQGetInconsistentMetadata q -> runGetInconsistentMetadata q
|
||||
RQDropInconsistentMetadata q -> runDropInconsistentMetadata q
|
||||
|
||||
RQInsert q -> runInsert q
|
||||
RQInsert q -> runInsert env q
|
||||
RQSelect q -> runSelect q
|
||||
RQUpdate q -> runUpdate q
|
||||
RQDelete q -> runDelete q
|
||||
RQUpdate q -> runUpdate env q
|
||||
RQDelete q -> runDelete env q
|
||||
RQCount q -> runCount q
|
||||
|
||||
RQAddRemoteSchema q -> runAddRemoteSchema q
|
||||
RQAddRemoteSchema q -> runAddRemoteSchema env q
|
||||
RQRemoveRemoteSchema q -> runRemoveRemoteSchema q
|
||||
RQReloadRemoteSchema q -> runReloadRemoteSchema q
|
||||
RQIntrospectRemoteSchema q -> runIntrospectRemoteSchema q
|
||||
@ -433,7 +434,7 @@ runQueryM rq = withPathK "args" $ case rq of
|
||||
|
||||
RQSetCustomTypes q -> runSetCustomTypes q
|
||||
|
||||
RQBulk qs -> encJFromList <$> indexedMapM runQueryM qs
|
||||
RQBulk qs -> encJFromList <$> indexedMapM (runQueryM env) qs
|
||||
|
||||
runQueryV2M = \case
|
||||
RQV2TrackTable q -> runTrackTableV2Q q
|
||||
@ -444,86 +445,86 @@ runQueryM rq = withPathK "args" $ case rq of
|
||||
requiresAdmin :: RQLQuery -> Bool
|
||||
requiresAdmin = \case
|
||||
RQV1 q -> case q of
|
||||
RQAddExistingTableOrView _ -> True
|
||||
RQTrackTable _ -> True
|
||||
RQUntrackTable _ -> True
|
||||
RQSetTableIsEnum _ -> True
|
||||
RQAddExistingTableOrView _ -> True
|
||||
RQTrackTable _ -> True
|
||||
RQUntrackTable _ -> True
|
||||
RQSetTableIsEnum _ -> True
|
||||
|
||||
RQTrackFunction _ -> True
|
||||
RQUntrackFunction _ -> True
|
||||
RQTrackFunction _ -> True
|
||||
RQUntrackFunction _ -> True
|
||||
|
||||
RQCreateObjectRelationship _ -> True
|
||||
RQCreateArrayRelationship _ -> True
|
||||
RQDropRelationship _ -> True
|
||||
RQSetRelationshipComment _ -> True
|
||||
RQRenameRelationship _ -> True
|
||||
RQCreateObjectRelationship _ -> True
|
||||
RQCreateArrayRelationship _ -> True
|
||||
RQDropRelationship _ -> True
|
||||
RQSetRelationshipComment _ -> True
|
||||
RQRenameRelationship _ -> True
|
||||
|
||||
RQAddComputedField _ -> True
|
||||
RQDropComputedField _ -> True
|
||||
RQAddComputedField _ -> True
|
||||
RQDropComputedField _ -> True
|
||||
|
||||
RQCreateRemoteRelationship _ -> True
|
||||
RQUpdateRemoteRelationship _ -> True
|
||||
RQDeleteRemoteRelationship _ -> True
|
||||
RQCreateRemoteRelationship _ -> True
|
||||
RQUpdateRemoteRelationship _ -> True
|
||||
RQDeleteRemoteRelationship _ -> True
|
||||
|
||||
RQCreateInsertPermission _ -> True
|
||||
RQCreateSelectPermission _ -> True
|
||||
RQCreateUpdatePermission _ -> True
|
||||
RQCreateDeletePermission _ -> True
|
||||
RQCreateInsertPermission _ -> True
|
||||
RQCreateSelectPermission _ -> True
|
||||
RQCreateUpdatePermission _ -> True
|
||||
RQCreateDeletePermission _ -> True
|
||||
|
||||
RQDropInsertPermission _ -> True
|
||||
RQDropSelectPermission _ -> True
|
||||
RQDropUpdatePermission _ -> True
|
||||
RQDropDeletePermission _ -> True
|
||||
RQSetPermissionComment _ -> True
|
||||
RQDropInsertPermission _ -> True
|
||||
RQDropSelectPermission _ -> True
|
||||
RQDropUpdatePermission _ -> True
|
||||
RQDropDeletePermission _ -> True
|
||||
RQSetPermissionComment _ -> True
|
||||
|
||||
RQGetInconsistentMetadata _ -> True
|
||||
RQDropInconsistentMetadata _ -> True
|
||||
RQGetInconsistentMetadata _ -> True
|
||||
RQDropInconsistentMetadata _ -> True
|
||||
|
||||
RQInsert _ -> False
|
||||
RQSelect _ -> False
|
||||
RQUpdate _ -> False
|
||||
RQDelete _ -> False
|
||||
RQCount _ -> False
|
||||
RQInsert _ -> False
|
||||
RQSelect _ -> False
|
||||
RQUpdate _ -> False
|
||||
RQDelete _ -> False
|
||||
RQCount _ -> False
|
||||
|
||||
RQAddRemoteSchema _ -> True
|
||||
RQRemoveRemoteSchema _ -> True
|
||||
RQReloadRemoteSchema _ -> True
|
||||
RQIntrospectRemoteSchema _ -> True
|
||||
RQAddRemoteSchema _ -> True
|
||||
RQRemoveRemoteSchema _ -> True
|
||||
RQReloadRemoteSchema _ -> True
|
||||
RQIntrospectRemoteSchema _ -> True
|
||||
|
||||
RQCreateEventTrigger _ -> True
|
||||
RQDeleteEventTrigger _ -> True
|
||||
RQRedeliverEvent _ -> True
|
||||
RQInvokeEventTrigger _ -> True
|
||||
RQCreateEventTrigger _ -> True
|
||||
RQDeleteEventTrigger _ -> True
|
||||
RQRedeliverEvent _ -> True
|
||||
RQInvokeEventTrigger _ -> True
|
||||
|
||||
RQCreateCronTrigger _ -> True
|
||||
RQDeleteCronTrigger _ -> True
|
||||
RQCreateCronTrigger _ -> True
|
||||
RQDeleteCronTrigger _ -> True
|
||||
|
||||
RQCreateScheduledEvent _ -> True
|
||||
RQCreateScheduledEvent _ -> True
|
||||
|
||||
RQCreateQueryCollection _ -> True
|
||||
RQDropQueryCollection _ -> True
|
||||
RQAddQueryToCollection _ -> True
|
||||
RQDropQueryFromCollection _ -> True
|
||||
RQAddCollectionToAllowlist _ -> True
|
||||
RQDropCollectionFromAllowlist _ -> True
|
||||
RQCreateQueryCollection _ -> True
|
||||
RQDropQueryCollection _ -> True
|
||||
RQAddQueryToCollection _ -> True
|
||||
RQDropQueryFromCollection _ -> True
|
||||
RQAddCollectionToAllowlist _ -> True
|
||||
RQDropCollectionFromAllowlist _ -> True
|
||||
|
||||
RQReplaceMetadata _ -> True
|
||||
RQClearMetadata _ -> True
|
||||
RQExportMetadata _ -> True
|
||||
RQReloadMetadata _ -> True
|
||||
RQReplaceMetadata _ -> True
|
||||
RQClearMetadata _ -> True
|
||||
RQExportMetadata _ -> True
|
||||
RQReloadMetadata _ -> True
|
||||
|
||||
RQCreateAction _ -> True
|
||||
RQDropAction _ -> True
|
||||
RQUpdateAction _ -> True
|
||||
RQCreateActionPermission _ -> True
|
||||
RQDropActionPermission _ -> True
|
||||
RQCreateAction _ -> True
|
||||
RQDropAction _ -> True
|
||||
RQUpdateAction _ -> True
|
||||
RQCreateActionPermission _ -> True
|
||||
RQDropActionPermission _ -> True
|
||||
|
||||
RQDumpInternalState _ -> True
|
||||
RQSetCustomTypes _ -> True
|
||||
RQDumpInternalState _ -> True
|
||||
RQSetCustomTypes _ -> True
|
||||
|
||||
RQRunSql _ -> True
|
||||
RQRunSql _ -> True
|
||||
|
||||
RQBulk qs -> any requiresAdmin qs
|
||||
RQBulk qs -> any requiresAdmin qs
|
||||
|
||||
RQV2 q -> case q of
|
||||
RQV2TrackTable _ -> True
|
||||
|
@ -3,10 +3,24 @@
|
||||
module Hasura.Server.App where
|
||||
|
||||
import Hasura.Prelude hiding (get, put)
|
||||
import Control.Concurrent.MVar.Lifted
|
||||
import Control.Exception (IOException, try)
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
|
||||
import Control.Monad.Stateless
|
||||
import Data.Aeson hiding (json)
|
||||
import Data.Int (Int64)
|
||||
import Data.IORef
|
||||
import Data.Time.Clock (UTCTime)
|
||||
import Data.Time.Clock.POSIX (getPOSIXTime)
|
||||
import Network.Mime (defaultMimeLookup)
|
||||
import System.FilePath (joinPath, takeFileName)
|
||||
import Web.Spock.Core ((<//>))
|
||||
|
||||
import qualified Control.Concurrent.Async.Lifted.Safe as LA
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.CaseInsensitive as CI
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.HashSet as S
|
||||
import qualified Data.Text as T
|
||||
@ -20,21 +34,6 @@ import qualified System.Metrics.Json as EKG
|
||||
import qualified Text.Mustache as M
|
||||
import qualified Web.Spock.Core as Spock
|
||||
|
||||
import Control.Concurrent.MVar.Lifted
|
||||
import Control.Exception (IOException, try)
|
||||
import Control.Monad.Stateless
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Control.Monad.Unique
|
||||
import Data.Aeson hiding (json)
|
||||
import Data.Int (Int64)
|
||||
import Data.IORef
|
||||
import Data.Time.Clock (UTCTime)
|
||||
import Data.Time.Clock.POSIX (getPOSIXTime)
|
||||
import Network.Mime (defaultMimeLookup)
|
||||
import System.Exit (exitFailure)
|
||||
import System.FilePath (joinPath, takeFileName)
|
||||
import Web.Spock.Core ((<//>))
|
||||
|
||||
import qualified Hasura.GraphQL.Execute as E
|
||||
import qualified Hasura.GraphQL.Execute.LiveQuery as EL
|
||||
import qualified Hasura.GraphQL.Explain as GE
|
||||
@ -104,6 +103,7 @@ data ServerCtx
|
||||
, scEnableAllowlist :: !Bool
|
||||
, scEkgStore :: !EKG.Store
|
||||
, scResponseInternalErrorsConfig :: !ResponseInternalErrorsConfig
|
||||
, scEnvironment :: !Env.Environment
|
||||
}
|
||||
|
||||
data HandlerCtx
|
||||
@ -144,7 +144,7 @@ withSCUpdate
|
||||
:: (MonadIO m, MonadBaseControl IO m)
|
||||
=> SchemaCacheRef -> L.Logger L.Hasura -> m (a, RebuildableSchemaCache Run) -> m a
|
||||
withSCUpdate scr logger action = do
|
||||
withMVarMasked lk $ \()-> do
|
||||
withMVarMasked lk $ \() -> do
|
||||
(!res, !newSC) <- action
|
||||
liftIO $ do
|
||||
-- update schemacache in IO reference
|
||||
@ -197,10 +197,10 @@ onlyAdmin = do
|
||||
|
||||
buildQCtx :: (MonadIO m) => Handler m QCtx
|
||||
buildQCtx = do
|
||||
scRef <- scCacheRef . hcServerCtx <$> ask
|
||||
userInfo <- asks hcUser
|
||||
cache <- getSCFromRef scRef
|
||||
sqlGenCtx <- scSQLGenCtx . hcServerCtx <$> ask
|
||||
scRef <- asks (scCacheRef . hcServerCtx)
|
||||
userInfo <- asks hcUser
|
||||
cache <- getSCFromRef scRef
|
||||
sqlGenCtx <- asks (scSQLGenCtx . hcServerCtx)
|
||||
return $ QCtx userInfo cache sqlGenCtx
|
||||
|
||||
setHeader :: MonadIO m => HTTP.Header -> Spock.ActionT m ()
|
||||
@ -208,7 +208,7 @@ setHeader (headerName, headerValue) =
|
||||
Spock.setHeader (bsToTxt $ CI.original headerName) (bsToTxt headerValue)
|
||||
|
||||
-- | Typeclass representing the metadata API authorization effect
|
||||
class MetadataApiAuthorization m where
|
||||
class Monad m => MetadataApiAuthorization m where
|
||||
authorizeMetadataApi :: HasVersion => RQLQuery -> UserInfo -> Handler m ()
|
||||
|
||||
-- | The config API (/v1alpha1/config) handler
|
||||
@ -242,7 +242,7 @@ mkSpockAction serverCtx qErrEncoder qErrModifier apiHandler = do
|
||||
|
||||
userInfoE <- fmap fst <$> lift (resolveUserInfo logger manager headers authMode)
|
||||
userInfo <- either (logErrorAndResp Nothing requestId req (Left reqBody) False headers . qErrModifier)
|
||||
return userInfoE
|
||||
return userInfoE
|
||||
|
||||
let handlerState = HandlerCtx serverCtx userInfo headers requestId ipAddress
|
||||
includeInternal = shouldIncludeInternal (_uiRole userInfo) $
|
||||
@ -265,7 +265,7 @@ mkSpockAction serverCtx qErrEncoder qErrModifier apiHandler = do
|
||||
-- log and return result
|
||||
case modResult of
|
||||
Left err -> let jErr = maybe (Left reqBody) (Right . toJSON) q
|
||||
in logErrorAndResp (Just userInfo) requestId req jErr includeInternal headers err
|
||||
in logErrorAndResp (Just userInfo) requestId req jErr includeInternal headers err
|
||||
Right res -> logSuccessAndResp (Just userInfo) requestId req (fmap toJSON q) res (Just (ioWaitTime, serviceTime)) headers
|
||||
|
||||
where
|
||||
@ -304,50 +304,55 @@ mkSpockAction serverCtx qErrEncoder qErrModifier apiHandler = do
|
||||
mapM_ setHeader allRespHeaders
|
||||
Spock.lazyBytes compressedResp
|
||||
|
||||
|
||||
v1QueryHandler
|
||||
:: (HasVersion, MonadIO m, MonadUnique m, MonadBaseControl IO m, MetadataApiAuthorization m)
|
||||
=> RQLQuery -> Handler m (HttpResponse EncJSON)
|
||||
:: (HasVersion, MonadIO m, MonadBaseControl IO m, MetadataApiAuthorization m)
|
||||
=> RQLQuery
|
||||
-> Handler m (HttpResponse EncJSON)
|
||||
v1QueryHandler query = do
|
||||
userInfo <- asks hcUser
|
||||
authorizeMetadataApi query userInfo
|
||||
scRef <- scCacheRef . hcServerCtx <$> ask
|
||||
logger <- scLogger . hcServerCtx <$> ask
|
||||
res <- bool (fst <$> dbAction) (withSCUpdate scRef logger dbAction) $
|
||||
queryModifiesSchemaCache query
|
||||
scRef <- asks (scCacheRef . hcServerCtx)
|
||||
logger <- asks (scLogger . hcServerCtx)
|
||||
res <- bool (fst <$> dbAction) (withSCUpdate scRef logger dbAction) $ queryModifiesSchemaCache query
|
||||
return $ HttpResponse res []
|
||||
where
|
||||
-- Hit postgres
|
||||
dbAction = do
|
||||
userInfo <- asks hcUser
|
||||
scRef <- scCacheRef . hcServerCtx <$> ask
|
||||
userInfo <- asks hcUser
|
||||
scRef <- asks (scCacheRef . hcServerCtx)
|
||||
schemaCache <- fmap fst $ liftIO $ readIORef $ _scrCache scRef
|
||||
httpMgr <- scManager . hcServerCtx <$> ask
|
||||
sqlGenCtx <- scSQLGenCtx . hcServerCtx <$> ask
|
||||
pgExecCtx <- scPGExecCtx . hcServerCtx <$> ask
|
||||
instanceId <- scInstanceId . hcServerCtx <$> ask
|
||||
runQuery pgExecCtx instanceId userInfo schemaCache httpMgr sqlGenCtx (SystemDefined False) query
|
||||
httpMgr <- asks (scManager . hcServerCtx)
|
||||
sqlGenCtx <- asks (scSQLGenCtx . hcServerCtx)
|
||||
pgExecCtx <- asks (scPGExecCtx . hcServerCtx)
|
||||
instanceId <- asks (scInstanceId . hcServerCtx)
|
||||
env <- asks (scEnvironment . hcServerCtx)
|
||||
runQuery env pgExecCtx instanceId userInfo schemaCache httpMgr sqlGenCtx (SystemDefined False) query
|
||||
|
||||
v1Alpha1GQHandler
|
||||
:: (HasVersion, MonadIO m, E.MonadGQLExecutionCheck m, MonadQueryLog m)
|
||||
=> E.GraphQLQueryType -> GH.GQLBatchedReqs GH.GQLQueryText -> Handler m (HttpResponse EncJSON)
|
||||
v1Alpha1GQHandler queryType query = do
|
||||
userInfo <- asks hcUser
|
||||
reqHeaders <- asks hcReqHeaders
|
||||
ipAddress <- asks hcSourceIpAddress
|
||||
requestId <- asks hcRequestId
|
||||
manager <- scManager . hcServerCtx <$> ask
|
||||
scRef <- scCacheRef . hcServerCtx <$> ask
|
||||
(sc, scVer) <- liftIO $ readIORef $ _scrCache scRef
|
||||
pgExecCtx <- scPGExecCtx . hcServerCtx <$> ask
|
||||
sqlGenCtx <- scSQLGenCtx . hcServerCtx <$> ask
|
||||
planCache <- scPlanCache . hcServerCtx <$> ask
|
||||
enableAL <- scEnableAllowlist . hcServerCtx <$> ask
|
||||
logger <- scLogger . hcServerCtx <$> ask
|
||||
userInfo <- asks hcUser
|
||||
reqHeaders <- asks hcReqHeaders
|
||||
ipAddress <- asks hcSourceIpAddress
|
||||
requestId <- asks hcRequestId
|
||||
manager <- asks (scManager . hcServerCtx)
|
||||
scRef <- asks (scCacheRef . hcServerCtx)
|
||||
(sc, scVer) <- liftIO $ readIORef $ _scrCache scRef
|
||||
pgExecCtx <- asks (scPGExecCtx . hcServerCtx)
|
||||
sqlGenCtx <- asks (scSQLGenCtx . hcServerCtx)
|
||||
planCache <- asks (scPlanCache . hcServerCtx)
|
||||
enableAL <- asks (scEnableAllowlist . hcServerCtx)
|
||||
logger <- asks (scLogger . hcServerCtx)
|
||||
responseErrorsConfig <- asks (scResponseInternalErrorsConfig . hcServerCtx)
|
||||
env <- asks (scEnvironment . hcServerCtx)
|
||||
|
||||
let execCtx = E.ExecutionCtx logger sqlGenCtx pgExecCtx planCache
|
||||
(lastBuiltSchemaCache sc) scVer manager enableAL
|
||||
|
||||
flip runReaderT execCtx $
|
||||
GH.runGQBatched requestId responseErrorsConfig userInfo ipAddress reqHeaders queryType query
|
||||
GH.runGQBatched env requestId responseErrorsConfig userInfo ipAddress reqHeaders queryType query
|
||||
|
||||
v1GQHandler
|
||||
:: (HasVersion, MonadIO m, E.MonadGQLExecutionCheck m, MonadQueryLog m)
|
||||
@ -357,12 +362,17 @@ v1GQHandler = v1Alpha1GQHandler E.QueryHasura
|
||||
|
||||
v1GQRelayHandler
|
||||
:: (HasVersion, MonadIO m, E.MonadGQLExecutionCheck m, MonadQueryLog m)
|
||||
=> GH.GQLBatchedReqs GH.GQLQueryText -> Handler m (HttpResponse EncJSON)
|
||||
=> GH.GQLBatchedReqs GH.GQLQueryText
|
||||
-> Handler m (HttpResponse EncJSON)
|
||||
v1GQRelayHandler = v1Alpha1GQHandler E.QueryRelay
|
||||
|
||||
gqlExplainHandler
|
||||
:: (HasVersion, MonadIO m)
|
||||
=> GE.GQLExplain -> Handler m (HttpResponse EncJSON)
|
||||
:: forall m
|
||||
. ( HasVersion
|
||||
, MonadIO m
|
||||
)
|
||||
=> GE.GQLExplain
|
||||
-> Handler m (HttpResponse EncJSON)
|
||||
gqlExplainHandler query = do
|
||||
onlyAdmin
|
||||
scRef <- scCacheRef . hcServerCtx <$> ask
|
||||
@ -375,7 +385,7 @@ gqlExplainHandler query = do
|
||||
v1Alpha1PGDumpHandler :: (MonadIO m) => PGD.PGDumpReqBody -> Handler m APIResp
|
||||
v1Alpha1PGDumpHandler b = do
|
||||
onlyAdmin
|
||||
ci <- scConnInfo . hcServerCtx <$> ask
|
||||
ci <- asks (scConnInfo . hcServerCtx)
|
||||
output <- PGD.execPGDump b ci
|
||||
return $ RawResp $ HttpResponse output [sqlHeader]
|
||||
|
||||
@ -438,7 +448,7 @@ queryParsers =
|
||||
return $ f q
|
||||
|
||||
legacyQueryHandler
|
||||
:: (HasVersion, MonadIO m, MonadUnique m, MonadBaseControl IO m, MetadataApiAuthorization m)
|
||||
:: (HasVersion, MonadIO m, MonadBaseControl IO m, MetadataApiAuthorization m)
|
||||
=> TableName -> T.Text -> Object
|
||||
-> Handler m (HttpResponse EncJSON)
|
||||
legacyQueryHandler tn queryType req =
|
||||
@ -460,58 +470,67 @@ configApiGetHandler serverCtx@ServerCtx{..} consoleAssetsDir =
|
||||
(EL._lqsOptions $ scLQState) consoleAssetsDir
|
||||
return $ JSONResp $ HttpResponse (encJFromJValue res) []
|
||||
|
||||
initErrExit :: QErr -> IO a
|
||||
initErrExit e = do
|
||||
putStrLn $
|
||||
"failed to build schema-cache because of inconsistent metadata: "
|
||||
<> (show e)
|
||||
exitFailure
|
||||
|
||||
data HasuraApp
|
||||
= HasuraApp
|
||||
{ _hapApplication :: !Wai.Application
|
||||
, _hapSchemaRef :: !SchemaCacheRef
|
||||
, _hapCacheBuildTime :: !(Maybe UTCTime)
|
||||
, _hapShutdown :: !(IO ())
|
||||
{ _hapApplication :: !Wai.Application
|
||||
, _hapSchemaRef :: !SchemaCacheRef
|
||||
, _hapCacheBuildTime :: !(Maybe UTCTime)
|
||||
, _hapShutdownWsServer :: !(IO ())
|
||||
}
|
||||
|
||||
-- TODO: Put Env into ServerCtx?
|
||||
|
||||
mkWaiApp
|
||||
:: forall m.
|
||||
( HasVersion
|
||||
, MonadIO m
|
||||
, MonadUnique m
|
||||
-- , MonadUnique m
|
||||
, MonadStateless IO m
|
||||
, LA.Forall (LA.Pure m)
|
||||
, ConsoleRenderer m
|
||||
, HttpLog m
|
||||
, MonadQueryLog m
|
||||
, UserAuthentication m
|
||||
, MetadataApiAuthorization m
|
||||
, E.MonadGQLExecutionCheck m
|
||||
, MonadConfigApiHandler m
|
||||
, MonadQueryLog m
|
||||
, WS.MonadWSLog m
|
||||
)
|
||||
=> Q.TxIsolation
|
||||
=> Env.Environment
|
||||
-- ^ Set of environment variables for reference in UIs
|
||||
-> Q.TxIsolation
|
||||
-- ^ postgres transaction isolation to be used in the entire app
|
||||
-> L.Logger L.Hasura
|
||||
-- ^ a 'L.Hasura' specific logger
|
||||
-> SQLGenCtx
|
||||
-> Bool
|
||||
-- ^ is AllowList enabled - TODO: change this boolean to sumtype
|
||||
-> Q.PGPool
|
||||
-> Maybe PGExecCtx
|
||||
-> Q.ConnInfo
|
||||
-- ^ postgres connection parameters
|
||||
-> HTTP.Manager
|
||||
-- ^ HTTP manager so that we can re-use sessions
|
||||
-> AuthMode
|
||||
-- ^ 'AuthMode' in which the application should operate in
|
||||
-> CorsConfig
|
||||
-> Bool
|
||||
-- ^ is console enabled - TODO: better type
|
||||
-> Maybe Text
|
||||
-- ^ filepath to the console static assets directory - TODO: better type
|
||||
-> Bool
|
||||
-- ^ is telemetry enabled
|
||||
-> InstanceId
|
||||
-- ^ each application, when run, gets an 'InstanceId'. this is used at various places including
|
||||
-- schema syncing and telemetry
|
||||
-> S.HashSet API
|
||||
-- ^ set of the enabled 'API's
|
||||
-> EL.LiveQueriesOptions
|
||||
-> E.PlanCacheOptions
|
||||
-> ResponseInternalErrorsConfig
|
||||
-> (RebuildableSchemaCache Run, Maybe UTCTime)
|
||||
-> m HasuraApp
|
||||
mkWaiApp isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpManager mode corsCfg enableConsole consoleAssetsDir
|
||||
mkWaiApp env isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpManager mode corsCfg enableConsole consoleAssetsDir
|
||||
enableTelemetry instanceId apis lqOpts planCacheOptions responseErrorsConfig (schemaCache, cacheBuiltTime) = do
|
||||
|
||||
(planCache, schemaCacheRef) <- initialiseCache
|
||||
@ -540,6 +559,7 @@ mkWaiApp isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpManager
|
||||
, scLQState = lqState
|
||||
, scEnableAllowlist = enableAL
|
||||
, scEkgStore = ekgStore
|
||||
, scEnvironment = env
|
||||
, scResponseInternalErrorsConfig = responseErrorsConfig
|
||||
}
|
||||
|
||||
@ -551,7 +571,7 @@ mkWaiApp isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpManager
|
||||
Spock.spockAsApp $ Spock.spockT lowerIO $
|
||||
httpApp corsCfg serverCtx enableConsole consoleAssetsDir enableTelemetry
|
||||
|
||||
let wsServerApp = WS.createWSServerApp mode wsServerEnv
|
||||
let wsServerApp = WS.createWSServerApp env mode wsServerEnv -- TODO: Lyndon: Can we pass environment through wsServerEnv?
|
||||
stopWSServer = WS.stopWSServerApp wsServerEnv
|
||||
|
||||
waiApp <- liftWithStateless $ \lowerIO ->
|
||||
@ -570,10 +590,11 @@ mkWaiApp isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpManager
|
||||
let cacheRef = SchemaCacheRef cacheLock cacheCell (E.clearPlanCache planCache)
|
||||
pure (planCache, cacheRef)
|
||||
|
||||
|
||||
httpApp
|
||||
:: ( HasVersion
|
||||
, MonadIO m
|
||||
, MonadUnique m
|
||||
-- , MonadUnique m
|
||||
, MonadBaseControl IO m
|
||||
, ConsoleRenderer m
|
||||
, HttpLog m
|
||||
@ -626,7 +647,6 @@ httpApp corsCfg serverCtx enableConsole consoleAssetsDir enableTelemetry = do
|
||||
mkAPIRespHandler $ legacyQueryHandler (TableName tableName) queryType
|
||||
|
||||
when enablePGDump $
|
||||
|
||||
Spock.post "v1alpha1/pg_dump" $ spockAction encodeQErr id $
|
||||
mkPostHandler v1Alpha1PGDumpHandler
|
||||
|
||||
@ -640,7 +660,7 @@ httpApp corsCfg serverCtx enableConsole consoleAssetsDir enableTelemetry = do
|
||||
mkPostHandler $ mkAPIRespHandler v1GQHandler
|
||||
|
||||
Spock.post "v1beta1/relay" $ spockAction GH.encodeGQErr allMod200 $
|
||||
mkPostHandler $ mkAPIRespHandler v1GQRelayHandler
|
||||
mkPostHandler $ mkAPIRespHandler $ v1GQRelayHandler
|
||||
|
||||
when (isDeveloperAPIEnabled serverCtx) $ do
|
||||
Spock.get "dev/ekg" $ spockAction encodeQErr id $
|
||||
@ -679,18 +699,13 @@ httpApp corsCfg serverCtx enableConsole consoleAssetsDir enableTelemetry = do
|
||||
-> (QErr -> QErr) -> APIHandler m a -> Spock.ActionT m ()
|
||||
spockAction = mkSpockAction serverCtx
|
||||
|
||||
|
||||
-- all graphql errors should be of type 200
|
||||
allMod200 qe = qe { qeStatus = HTTP.status200 }
|
||||
|
||||
gqlExplainAction =
|
||||
spockAction encodeQErr id $ mkPostHandler $
|
||||
mkAPIRespHandler gqlExplainHandler
|
||||
|
||||
enableGraphQL = isGraphQLEnabled serverCtx
|
||||
enableMetadata = isMetadataEnabled serverCtx
|
||||
enablePGDump = isPGDumpEnabled serverCtx
|
||||
enableConfig = isConfigEnabled serverCtx
|
||||
allMod200 qe = qe { qeStatus = HTTP.status200 }
|
||||
gqlExplainAction = spockAction encodeQErr id $ mkPostHandler $ mkAPIRespHandler gqlExplainHandler
|
||||
enableGraphQL = isGraphQLEnabled serverCtx
|
||||
enableMetadata = isMetadataEnabled serverCtx
|
||||
enablePGDump = isPGDumpEnabled serverCtx
|
||||
enableConfig = isConfigEnabled serverCtx
|
||||
|
||||
serveApiConsole = do
|
||||
-- redirect / to /console
|
||||
|
@ -1,4 +1,4 @@
|
||||
{-# LANGUAGE RecordWildCards #-}
|
||||
{-# LANGUAGE DerivingStrategies #-}
|
||||
|
||||
module Hasura.Server.Auth
|
||||
( getUserInfo
|
||||
@ -24,22 +24,24 @@ module Hasura.Server.Auth
|
||||
, getUserInfoWithExpTime_
|
||||
) where
|
||||
|
||||
import Control.Concurrent.Extended (forkImmortal)
|
||||
import Data.IORef (newIORef)
|
||||
import Data.Time.Clock (UTCTime)
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import qualified Control.Concurrent.Async.Lifted.Safe as LA
|
||||
import Control.Concurrent.Extended (forkImmortal)
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Data.IORef (newIORef)
|
||||
import Data.Time.Clock (UTCTime)
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
|
||||
import qualified Crypto.Hash as Crypto
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Text.Encoding as T
|
||||
import qualified Network.HTTP.Client as H
|
||||
import qualified Network.HTTP.Types as N
|
||||
import qualified Crypto.Hash as Crypto
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Text.Encoding as T
|
||||
import qualified Network.HTTP.Client as H
|
||||
import qualified Network.HTTP.Types as N
|
||||
|
||||
import Hasura.Logging
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.Types
|
||||
|
||||
import Hasura.Server.Auth.JWT hiding (processJwt_)
|
||||
import Hasura.Server.Auth.JWT hiding (processJwt_)
|
||||
import Hasura.Server.Auth.WebHook
|
||||
import Hasura.Server.Utils
|
||||
import Hasura.Session
|
||||
@ -63,10 +65,10 @@ class (Monad m) => UserAuthentication m where
|
||||
--
|
||||
-- Although this exists only in memory we store only a hash of the admin secret
|
||||
-- primarily in order to:
|
||||
-- --
|
||||
-- -- - prevent theoretical timing attacks from a naive `==` check
|
||||
-- -- - prevent misuse or inadvertent leaking of the secret
|
||||
-- --
|
||||
--
|
||||
-- - prevent theoretical timing attacks from a naive `==` check
|
||||
-- - prevent misuse or inadvertent leaking of the secret
|
||||
--
|
||||
newtype AdminSecretHash = AdminSecretHash (Crypto.Digest Crypto.SHA512)
|
||||
deriving (Ord, Eq)
|
||||
|
||||
@ -99,7 +101,8 @@ data AuthMode
|
||||
setupAuthMode
|
||||
:: ( HasVersion
|
||||
, MonadIO m
|
||||
, MonadError T.Text m
|
||||
, MonadBaseControl IO m
|
||||
, LA.Forall (LA.Pure m)
|
||||
)
|
||||
=> Maybe AdminSecretHash
|
||||
-> Maybe AuthHook
|
||||
@ -107,7 +110,7 @@ setupAuthMode
|
||||
-> Maybe RoleName
|
||||
-> H.Manager
|
||||
-> Logger Hasura
|
||||
-> m AuthMode
|
||||
-> ExceptT Text m AuthMode
|
||||
setupAuthMode mAdminSecretHash mWebHook mJwtSecret mUnAuthRole httpManager logger =
|
||||
case (mAdminSecretHash, mWebHook, mJwtSecret) of
|
||||
(Just hash, Nothing, Nothing) -> return $ AMAdminSecret hash mUnAuthRole
|
||||
@ -139,7 +142,15 @@ setupAuthMode mAdminSecretHash mWebHook mJwtSecret mUnAuthRole httpManager logge
|
||||
|
||||
-- | Given the 'JWTConfig' (the user input of JWT configuration), create
|
||||
-- the 'JWTCtx' (the runtime JWT config used)
|
||||
mkJwtCtx :: (HasVersion, MonadIO m, MonadError T.Text m) => JWTConfig -> m JWTCtx
|
||||
-- mkJwtCtx :: HasVersion => JWTConfig -> m JWTCtx
|
||||
mkJwtCtx
|
||||
:: ( HasVersion
|
||||
, MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
, LA.Forall (LA.Pure m)
|
||||
)
|
||||
=> JWTConfig
|
||||
-> ExceptT T.Text m JWTCtx
|
||||
mkJwtCtx JWTConfig{..} = do
|
||||
jwkRef <- case jcKeyOrUrl of
|
||||
Left jwk -> liftIO $ newIORef (JWKSet [jwk])
|
||||
@ -155,7 +166,7 @@ setupAuthMode mAdminSecretHash mWebHook mJwtSecret mUnAuthRole httpManager logge
|
||||
case maybeExpiry of
|
||||
Nothing -> return ref
|
||||
Just time -> do
|
||||
void $ liftIO $ forkImmortal "jwkRefreshCtrl" logger $
|
||||
void . lift $ forkImmortal "jwkRefreshCtrl" logger $
|
||||
jwkRefreshCtrl logger httpManager url ref (convertDuration time)
|
||||
return ref
|
||||
|
||||
@ -171,7 +182,7 @@ setupAuthMode mAdminSecretHash mWebHook mJwtSecret mUnAuthRole httpManager logge
|
||||
JFEExpiryParseError _ _ -> return Nothing
|
||||
|
||||
getUserInfo
|
||||
:: (HasVersion, MonadIO m, MonadError QErr m)
|
||||
:: (HasVersion, MonadIO m, MonadBaseControl IO m, MonadError QErr m)
|
||||
=> Logger Hasura
|
||||
-> H.Manager
|
||||
-> [N.Header]
|
||||
@ -181,7 +192,7 @@ getUserInfo l m r a = fst <$> getUserInfoWithExpTime l m r a
|
||||
|
||||
-- | Authenticate the request using the headers and the configured 'AuthMode'.
|
||||
getUserInfoWithExpTime
|
||||
:: forall m. (HasVersion, MonadIO m, MonadError QErr m)
|
||||
:: forall m. (HasVersion, MonadIO m, MonadBaseControl IO m, MonadError QErr m)
|
||||
=> Logger Hasura
|
||||
-> H.Manager
|
||||
-> [N.Header]
|
||||
|
@ -17,8 +17,9 @@ module Hasura.Server.Auth.JWT
|
||||
, defaultRoleClaim
|
||||
) where
|
||||
|
||||
import Control.Exception (try)
|
||||
import Control.Exception.Lifted (try)
|
||||
import Control.Lens
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Control.Monad.Trans.Maybe
|
||||
import Data.IORef (IORef, readIORef, writeIORef)
|
||||
|
||||
@ -126,21 +127,22 @@ defaultClaimNs = "https://hasura.io/jwt/claims"
|
||||
|
||||
-- | An action that refreshes the JWK at intervals in an infinite loop.
|
||||
jwkRefreshCtrl
|
||||
:: HasVersion
|
||||
:: (HasVersion, MonadIO m, MonadBaseControl IO m)
|
||||
=> Logger Hasura
|
||||
-> HTTP.Manager
|
||||
-> URI
|
||||
-> IORef Jose.JWKSet
|
||||
-> DiffTime
|
||||
-> IO void
|
||||
jwkRefreshCtrl logger manager url ref time = liftIO $ do
|
||||
C.sleep time
|
||||
forever $ do
|
||||
-> m void
|
||||
jwkRefreshCtrl logger manager url ref time = do
|
||||
liftIO $ C.sleep time
|
||||
forever do
|
||||
res <- runExceptT $ updateJwkRef logger manager url ref
|
||||
mTime <- either (const $ logNotice >> return Nothing) return res
|
||||
-- if can't parse time from header, defaults to 1 min
|
||||
-- let delay = maybe (minutes 1) fromUnits mTime
|
||||
let delay = maybe (minutes 1) (convertDuration) mTime
|
||||
C.sleep delay
|
||||
liftIO $ C.sleep delay
|
||||
where
|
||||
logNotice = do
|
||||
let err = JwkRefreshLog LevelInfo (Just "retrying again in 60 secs") Nothing
|
||||
@ -150,6 +152,7 @@ jwkRefreshCtrl logger manager url ref time = liftIO $ do
|
||||
updateJwkRef
|
||||
:: ( HasVersion
|
||||
, MonadIO m
|
||||
, MonadBaseControl IO m
|
||||
, MonadError JwkFetchError m
|
||||
)
|
||||
=> Logger Hasura
|
||||
@ -158,11 +161,13 @@ updateJwkRef
|
||||
-> IORef Jose.JWKSet
|
||||
-> m (Maybe NominalDiffTime)
|
||||
updateJwkRef (Logger logger) manager url jwkRef = do
|
||||
let options = wreqOptions manager []
|
||||
urlT = T.pack $ show url
|
||||
let urlT = T.pack $ show url
|
||||
infoMsg = "refreshing JWK from endpoint: " <> urlT
|
||||
liftIO $ logger $ JwkRefreshLog LevelInfo (Just infoMsg) Nothing
|
||||
res <- liftIO $ try $ Wreq.getWith options $ show url
|
||||
res <- try do
|
||||
initReq <- liftIO $ HTTP.parseRequest $ show url
|
||||
let req = initReq { HTTP.requestHeaders = addDefaultHeaders (HTTP.requestHeaders initReq) }
|
||||
liftIO $ HTTP.httpLbs req manager
|
||||
resp <- either logAndThrowHttp return res
|
||||
let status = resp ^. Wreq.responseStatus
|
||||
respBody = resp ^. Wreq.responseBody
|
||||
@ -311,9 +316,9 @@ processAuthZHeader jwtCtx@JWTCtx{jcxClaimNs, jcxClaimsFormat} authzHeader = do
|
||||
ClaimNsPath path -> parseIValueJsonValue $ executeJSONPath path (J.toJSON $ claims ^. Jose.unregisteredClaims)
|
||||
|
||||
hasuraClaimsV <- maybe claimsNotFound return mHasuraClaims
|
||||
|
||||
-- return hasura claims value as an object. parse from string possibly
|
||||
(, expTimeM) <$> parseObjectFromString hasuraClaimsV
|
||||
|
||||
where
|
||||
parseAuthzHeader = do
|
||||
let tokenParts = BLC.words authzHeader
|
||||
|
@ -5,13 +5,12 @@ module Hasura.Server.Auth.JWT.Logging
|
||||
where
|
||||
|
||||
import Data.Aeson
|
||||
import Network.URI (URI)
|
||||
|
||||
import Hasura.HTTP
|
||||
import Hasura.Logging (EngineLogType (..), Hasura, InternalLogTypes (..),
|
||||
LogLevel (..), ToEngineLog (..))
|
||||
import Hasura.Prelude
|
||||
import Hasura.Server.Logging ()
|
||||
import Network.URI (URI)
|
||||
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.Text as T
|
||||
|
@ -5,20 +5,21 @@ module Hasura.Server.Auth.WebHook
|
||||
, userInfoFromAuthHook
|
||||
) where
|
||||
|
||||
import Control.Exception (try)
|
||||
import Control.Exception.Lifted (try)
|
||||
import Control.Lens
|
||||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||||
import Control.Monad.Trans.Maybe
|
||||
import Data.Aeson
|
||||
import Data.Time.Clock (UTCTime, addUTCTime, getCurrentTime)
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Data.Time.Clock (UTCTime, addUTCTime, getCurrentTime)
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.Text as T
|
||||
import qualified Network.HTTP.Client as H
|
||||
import qualified Network.HTTP.Types as N
|
||||
import qualified Network.Wreq as Wreq
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.Text as T
|
||||
import qualified Network.HTTP.Client as H
|
||||
import qualified Network.HTTP.Types as N
|
||||
import qualified Network.Wreq as Wreq
|
||||
|
||||
import Data.Parser.CacheControl
|
||||
import Data.Parser.Expires
|
||||
@ -58,31 +59,38 @@ hookMethod authHook = case ahType authHook of
|
||||
-- UserInfo parsed from the response, plus an expiration time if one
|
||||
-- was returned.
|
||||
userInfoFromAuthHook
|
||||
:: (HasVersion, MonadIO m, MonadError QErr m)
|
||||
:: forall m
|
||||
. (HasVersion, MonadIO m, MonadBaseControl IO m, MonadError QErr m)
|
||||
=> Logger Hasura
|
||||
-> H.Manager
|
||||
-> AuthHook
|
||||
-> [N.Header]
|
||||
-> m (UserInfo, Maybe UTCTime)
|
||||
userInfoFromAuthHook logger manager hook reqHeaders = do
|
||||
resp <- (`onLeft` logAndThrow) =<< liftIO (try performHTTPRequest)
|
||||
resp <- (`onLeft` logAndThrow) =<< try performHTTPRequest
|
||||
let status = resp ^. Wreq.responseStatus
|
||||
respBody = resp ^. Wreq.responseBody
|
||||
mkUserInfoFromResp logger (ahUrl hook) (hookMethod hook) status respBody
|
||||
where
|
||||
performHTTPRequest :: m (Wreq.Response BL.ByteString)
|
||||
performHTTPRequest = do
|
||||
let url = T.unpack $ ahUrl hook
|
||||
mkOptions = wreqOptions manager
|
||||
case ahType hook of
|
||||
AHTGet -> do
|
||||
let isCommonHeader = (`elem` commonClientHeadersIgnored)
|
||||
filteredHeaders = filter (not . isCommonHeader . fst) reqHeaders
|
||||
Wreq.getWith (mkOptions filteredHeaders) url
|
||||
AHTPost -> do
|
||||
let contentType = ("Content-Type", "application/json")
|
||||
headersPayload = J.toJSON $ Map.fromList $ hdrsToText reqHeaders
|
||||
Wreq.postWith (mkOptions [contentType]) url $ object ["headers" J..= headersPayload]
|
||||
req <- liftIO $ H.parseRequest url
|
||||
liftIO do
|
||||
case ahType hook of
|
||||
AHTGet -> do
|
||||
let isCommonHeader = (`elem` commonClientHeadersIgnored)
|
||||
filteredHeaders = filter (not . isCommonHeader . fst) reqHeaders
|
||||
H.httpLbs (req { H.requestHeaders = addDefaultHeaders filteredHeaders }) manager
|
||||
AHTPost -> do
|
||||
let contentType = ("Content-Type", "application/json")
|
||||
headersPayload = J.toJSON $ Map.fromList $ hdrsToText reqHeaders
|
||||
H.httpLbs (req { H.method = "POST"
|
||||
, H.requestHeaders = addDefaultHeaders [contentType]
|
||||
, H.requestBody = H.RequestBodyLBS . J.encode $ object ["headers" J..= headersPayload]
|
||||
}) manager
|
||||
|
||||
logAndThrow :: H.HttpException -> m a
|
||||
logAndThrow err = do
|
||||
unLogger logger $
|
||||
WebHookLog LevelError Nothing (ahUrl hook) (hookMethod hook)
|
||||
|
@ -17,6 +17,7 @@ import Hasura.Prelude
|
||||
import Hasura.Server.Utils (fmapL)
|
||||
|
||||
import Control.Applicative (optional)
|
||||
import Data.Aeson
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Aeson.Casing as J
|
||||
@ -33,7 +34,7 @@ data DomainParts =
|
||||
, wdPort :: !(Maybe Int)
|
||||
} deriving (Show, Eq, Generic, Hashable)
|
||||
|
||||
$(J.deriveToJSON (J.aesonDrop 2 J.snakeCase) ''DomainParts)
|
||||
$(J.deriveJSON (J.aesonDrop 2 J.snakeCase) ''DomainParts)
|
||||
|
||||
data Domains
|
||||
= Domains
|
||||
@ -41,7 +42,7 @@ data Domains
|
||||
, dmWildcards :: !(Set.HashSet DomainParts)
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(J.deriveToJSON (J.aesonDrop 2 J.snakeCase) ''Domains)
|
||||
$(J.deriveJSON (J.aesonDrop 2 J.snakeCase) ''Domains)
|
||||
|
||||
data CorsConfig
|
||||
= CCAllowAll
|
||||
@ -62,6 +63,16 @@ instance J.ToJSON CorsConfig where
|
||||
, "allowed_origins" J..= origs
|
||||
]
|
||||
|
||||
instance J.FromJSON CorsConfig where
|
||||
parseJSON = J.withObject "cors config" \o -> do
|
||||
let parseAllowAll "*" = pure CCAllowAll
|
||||
parseAllowAll _ = fail "unexpected string"
|
||||
o .: "disabled" >>= \case
|
||||
True -> CCDisabled <$> o .: "ws_read_cookie"
|
||||
False -> o .: "allowed_origins" >>= \v ->
|
||||
J.withText "origins" parseAllowAll v
|
||||
<|> CCAllowedOrigins <$> J.parseJSON v
|
||||
|
||||
isCorsDisabled :: CorsConfig -> Bool
|
||||
isCorsDisabled = \case
|
||||
CCDisabled _ -> True
|
||||
|
@ -165,11 +165,16 @@ mkServeOptions rso = do
|
||||
| adminInternalErrors -> InternalErrorsAdminOnly
|
||||
| otherwise -> InternalErrorsDisabled
|
||||
|
||||
eventsHttpPoolSize <- withEnv (rsoEventsHttpPoolSize rso) (fst eventsHttpPoolSizeEnv)
|
||||
eventsFetchInterval <- withEnv (rsoEventsFetchInterval rso) (fst eventsFetchIntervalEnv)
|
||||
logHeadersFromEnv <- withEnvBool (rsoLogHeadersFromEnv rso) (fst logHeadersFromEnvEnv)
|
||||
|
||||
return $ ServeOptions port host connParams txIso adminScrt authHook jwtSecret
|
||||
unAuthRole corsCfg enableConsole consoleAssetsDir
|
||||
enableTelemetry strfyNum enabledAPIs lqOpts enableAL
|
||||
enabledLogs serverLogLevel planCacheOptions
|
||||
internalErrorsConfig
|
||||
internalErrorsConfig eventsHttpPoolSize eventsFetchInterval
|
||||
logHeadersFromEnv
|
||||
where
|
||||
#ifdef DeveloperAPIs
|
||||
defaultAPIs = [METADATA,GRAPHQL,PGDUMP,CONFIG,DEVELOPER]
|
||||
@ -218,7 +223,6 @@ mkServeOptions rso = do
|
||||
mxBatchSizeM <- withEnv (rsoMxBatchSize rso) $ fst mxBatchSizeEnv
|
||||
return $ LQ.mkLiveQueriesOptions mxBatchSizeM mxRefetchIntM
|
||||
|
||||
|
||||
mkExamplesDoc :: [[String]] -> PP.Doc
|
||||
mkExamplesDoc exampleLines =
|
||||
PP.text "Examples: " PP.<$> PP.indent 2 (PP.vsep examples)
|
||||
@ -312,15 +316,25 @@ serveCmdFooter =
|
||||
, adminInternalErrorsEnv
|
||||
]
|
||||
|
||||
eventEnvs =
|
||||
[ ( "HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE"
|
||||
, "Max event threads"
|
||||
)
|
||||
, ( "HASURA_GRAPHQL_EVENTS_FETCH_INTERVAL"
|
||||
, "Interval in milliseconds to sleep before trying to fetch events again after a "
|
||||
<> "fetch returned no events from postgres."
|
||||
)
|
||||
]
|
||||
eventEnvs = [ eventsHttpPoolSizeEnv, eventsFetchIntervalEnv ]
|
||||
|
||||
eventsHttpPoolSizeEnv :: (String, String)
|
||||
eventsHttpPoolSizeEnv =
|
||||
( "HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE"
|
||||
, "Max event threads"
|
||||
)
|
||||
|
||||
eventsFetchIntervalEnv :: (String, String)
|
||||
eventsFetchIntervalEnv =
|
||||
( "HASURA_GRAPHQL_EVENTS_FETCH_INTERVAL"
|
||||
, "Interval in milliseconds to sleep before trying to fetch events again after a fetch returned no events from postgres."
|
||||
)
|
||||
|
||||
logHeadersFromEnvEnv :: (String, String)
|
||||
logHeadersFromEnvEnv =
|
||||
( "HASURA_GRAPHQL_LOG_HEADERS_FROM_ENV"
|
||||
, "Log headers sent instead of logging referenced environment variables."
|
||||
)
|
||||
|
||||
retriesNumEnv :: (String, String)
|
||||
retriesNumEnv =
|
||||
@ -785,6 +799,28 @@ parseGraphqlAdminInternalErrors = optional $
|
||||
help (snd adminInternalErrorsEnv)
|
||||
)
|
||||
|
||||
parseGraphqlEventsHttpPoolSize :: Parser (Maybe Int)
|
||||
parseGraphqlEventsHttpPoolSize = optional $
|
||||
option (eitherReader fromEnv)
|
||||
( long "events-http-pool-size" <>
|
||||
metavar (fst eventsHttpPoolSizeEnv) <>
|
||||
help (snd eventsHttpPoolSizeEnv)
|
||||
)
|
||||
|
||||
parseGraphqlEventsFetchInterval :: Parser (Maybe Milliseconds)
|
||||
parseGraphqlEventsFetchInterval = optional $
|
||||
option (eitherReader readEither)
|
||||
( long "events-fetch-interval" <>
|
||||
metavar (fst eventsFetchIntervalEnv) <>
|
||||
help (snd eventsFetchIntervalEnv)
|
||||
)
|
||||
|
||||
parseLogHeadersFromEnv :: Parser Bool
|
||||
parseLogHeadersFromEnv =
|
||||
switch ( long "log-headers-from-env" <>
|
||||
help (snd devModeEnv)
|
||||
)
|
||||
|
||||
mxRefetchDelayEnv :: (String, String)
|
||||
mxRefetchDelayEnv =
|
||||
( "HASURA_GRAPHQL_LIVE_QUERIES_MULTIPLEXED_REFETCH_INTERVAL"
|
||||
@ -929,6 +965,9 @@ serveOptionsParser =
|
||||
<*> parsePlanCacheSize
|
||||
<*> parseGraphqlDevMode
|
||||
<*> parseGraphqlAdminInternalErrors
|
||||
<*> parseGraphqlEventsHttpPoolSize
|
||||
<*> parseGraphqlEventsFetchInterval
|
||||
<*> parseLogHeadersFromEnv
|
||||
|
||||
-- | This implements the mapping between application versions
|
||||
-- and catalog schema versions.
|
||||
|
@ -2,6 +2,7 @@
|
||||
module Hasura.Server.Init.Config where
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Aeson.Casing as J
|
||||
import qualified Data.Aeson.TH as J
|
||||
import qualified Data.HashSet as Set
|
||||
import qualified Data.String as DataString
|
||||
@ -60,6 +61,9 @@ data RawServeOptions impl
|
||||
, rsoPlanCacheSize :: !(Maybe Cache.CacheSize)
|
||||
, rsoDevMode :: !Bool
|
||||
, rsoAdminInternalErrors :: !(Maybe Bool)
|
||||
, rsoEventsHttpPoolSize :: !(Maybe Int)
|
||||
, rsoEventsFetchInterval :: !(Maybe Milliseconds)
|
||||
, rsoLogHeadersFromEnv :: !Bool
|
||||
}
|
||||
|
||||
-- | @'ResponseInternalErrorsConfig' represents the encoding of the internal
|
||||
@ -99,6 +103,9 @@ data ServeOptions impl
|
||||
, soLogLevel :: !L.LogLevel
|
||||
, soPlanCacheOptions :: !E.PlanCacheOptions
|
||||
, soResponseInternalErrorsConfig :: !ResponseInternalErrorsConfig
|
||||
, soEventsHttpPoolSize :: !(Maybe Int)
|
||||
, soEventsFetchInterval :: !(Maybe Milliseconds)
|
||||
, soLogHeadersFromEnv :: !Bool
|
||||
}
|
||||
|
||||
data DowngradeOptions
|
||||
@ -135,11 +142,14 @@ data API
|
||||
| DEVELOPER
|
||||
| CONFIG
|
||||
deriving (Show, Eq, Read, Generic)
|
||||
|
||||
$(J.deriveJSON (J.defaultOptions { J.constructorTagModifier = map toLower })
|
||||
''API)
|
||||
|
||||
instance Hashable API
|
||||
|
||||
$(J.deriveJSON (J.aesonDrop 4 J.camelCase){J.omitNothingFields=True} ''RawConnInfo)
|
||||
|
||||
type HGECommand impl = HGECommandG (ServeOptions impl)
|
||||
type RawHGECommand impl = HGECommandG (RawServeOptions impl)
|
||||
|
||||
@ -252,6 +262,9 @@ instance FromEnv LQ.BatchSize where
|
||||
instance FromEnv LQ.RefetchInterval where
|
||||
fromEnv = fmap (LQ.RefetchInterval . milliseconds . fromInteger) . readEither
|
||||
|
||||
instance FromEnv Milliseconds where
|
||||
fromEnv = fmap fromInteger . readEither
|
||||
|
||||
instance FromEnv JWTConfig where
|
||||
fromEnv = readJson
|
||||
|
||||
|
@ -153,7 +153,6 @@ class (Monad m) => HttpLog m where
|
||||
-- ^ list of request headers
|
||||
-> m ()
|
||||
|
||||
|
||||
-- | Log information about the HTTP request
|
||||
data HttpInfoLog
|
||||
= HttpInfoLog
|
||||
|
@ -28,6 +28,7 @@ import Hasura.Prelude
|
||||
import qualified Data.Aeson as A
|
||||
import qualified Data.HashMap.Strict as HM
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Data.Text.IO as TIO
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Database.PG.Query.Connection as Q
|
||||
@ -92,9 +93,10 @@ migrateCatalog
|
||||
, HasHttpManager m
|
||||
, HasSQLGenCtx m
|
||||
)
|
||||
=> UTCTime
|
||||
=> Env.Environment
|
||||
-> UTCTime
|
||||
-> m (MigrationResult, RebuildableSchemaCache m)
|
||||
migrateCatalog migrationTime = do
|
||||
migrateCatalog env migrationTime = do
|
||||
doesSchemaExist (SchemaName "hdb_catalog") >>= \case
|
||||
False -> initialize True
|
||||
True -> doesTableExist (SchemaName "hdb_catalog") (TableName "hdb_version") >>= \case
|
||||
@ -144,7 +146,7 @@ migrateCatalog migrationTime = do
|
||||
migrateFrom :: T.Text -> m (MigrationResult, RebuildableSchemaCache m)
|
||||
migrateFrom previousVersion
|
||||
| previousVersion == latestCatalogVersionString = do
|
||||
schemaCache <- buildRebuildableSchemaCache
|
||||
schemaCache <- buildRebuildableSchemaCache env
|
||||
pure (MRNothingToDo, schemaCache)
|
||||
| [] <- neededMigrations =
|
||||
throw400 NotSupported $
|
||||
@ -163,7 +165,7 @@ migrateCatalog migrationTime = do
|
||||
|
||||
buildCacheAndRecreateSystemMetadata :: m (RebuildableSchemaCache m)
|
||||
buildCacheAndRecreateSystemMetadata = do
|
||||
schemaCache <- buildRebuildableSchemaCache
|
||||
schemaCache <- buildRebuildableSchemaCache env
|
||||
view _2 <$> runCacheRWT schemaCache recreateSystemMetadata
|
||||
|
||||
doesSchemaExist schemaName =
|
||||
|
@ -45,7 +45,7 @@ import qualified Database.PG.Query as Q
|
||||
import qualified Network.HTTP.Types as HTTP
|
||||
|
||||
newtype RoleName
|
||||
= RoleName {getRoleTxt :: NonEmptyText}
|
||||
= RoleName { getRoleTxt :: NonEmptyText }
|
||||
deriving ( Show, Eq, Ord, Hashable, FromJSONKey, ToJSONKey, FromJSON
|
||||
, ToJSON, Q.FromCol, Q.ToPrepArg, Lift, Generic, Arbitrary, NFData, Cacheable )
|
||||
|
||||
|
@ -14,6 +14,7 @@ import Test.Hspec.Core.Spec
|
||||
import Test.Hspec.Expectations.Lifted
|
||||
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Data.Environment as Env
|
||||
|
||||
import Hasura.RQL.DDL.Metadata (ClearMetadata (..), runClearMetadata)
|
||||
import Hasura.RQL.DDL.Schema
|
||||
@ -24,11 +25,6 @@ import Hasura.Server.Migrate
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
|
||||
-- -- NOTE: downgrade test disabled for now (see #5273)
|
||||
-- import Data.List.Split (splitOn)
|
||||
-- import Data.List (isPrefixOf, stripPrefix)
|
||||
-- import System.Process (readProcess)
|
||||
-- import qualified Safe
|
||||
-- import Hasura.Server.Init (downgradeShortcuts)
|
||||
|
||||
newtype CacheRefT m a
|
||||
= CacheRefT { runCacheRefT :: MVar (RebuildableSchemaCache m) -> m a }
|
||||
@ -69,33 +65,37 @@ spec
|
||||
)
|
||||
=> Q.ConnInfo -> SpecWithCache m
|
||||
spec pgConnInfo = do
|
||||
let dropAndInit time = CacheRefT $ flip modifyMVar \_ ->
|
||||
dropCatalog *> (swap <$> migrateCatalog time)
|
||||
let dropAndInit env time = CacheRefT $ flip modifyMVar \_ ->
|
||||
dropCatalog *> (swap <$> migrateCatalog env time)
|
||||
|
||||
describe "migrateCatalog" $ do
|
||||
it "initializes the catalog" $ singleTransaction do
|
||||
(dropAndInit =<< liftIO getCurrentTime) `shouldReturn` MRInitialized
|
||||
env <- liftIO Env.getEnvironment
|
||||
time <- liftIO getCurrentTime
|
||||
(dropAndInit env time) `shouldReturn` MRInitialized
|
||||
|
||||
it "is idempotent" \(NT transact) -> do
|
||||
let dumpSchema = execPGDump (PGDumpReqBody ["--schema-only"] (Just False)) pgConnInfo
|
||||
env <- Env.getEnvironment
|
||||
time <- getCurrentTime
|
||||
transact (dropAndInit time) `shouldReturn` MRInitialized
|
||||
transact (dropAndInit env time) `shouldReturn` MRInitialized
|
||||
firstDump <- transact dumpSchema
|
||||
transact (dropAndInit time) `shouldReturn` MRInitialized
|
||||
transact (dropAndInit env time) `shouldReturn` MRInitialized
|
||||
secondDump <- transact dumpSchema
|
||||
secondDump `shouldBe` firstDump
|
||||
|
||||
it "supports upgrades after downgrade to version 12" \(NT transact) -> do
|
||||
let downgradeTo v = downgradeCatalog DowngradeOptions{ dgoDryRun = False, dgoTargetVersion = v }
|
||||
upgradeToLatest time = CacheRefT $ flip modifyMVar \_ ->
|
||||
swap <$> migrateCatalog time
|
||||
upgradeToLatest env time = CacheRefT $ flip modifyMVar \_ ->
|
||||
swap <$> migrateCatalog env time
|
||||
env <- Env.getEnvironment
|
||||
time <- getCurrentTime
|
||||
transact (dropAndInit time) `shouldReturn` MRInitialized
|
||||
transact (dropAndInit env time) `shouldReturn` MRInitialized
|
||||
downgradeResult <- (transact . lift) (downgradeTo "12" time)
|
||||
downgradeResult `shouldSatisfy` \case
|
||||
MRMigrated{} -> True
|
||||
_ -> False
|
||||
transact (upgradeToLatest time) `shouldReturn` MRMigrated "12"
|
||||
transact (upgradeToLatest env time) `shouldReturn` MRMigrated "12"
|
||||
|
||||
-- -- NOTE: this has been problematic in CI and we're not quite sure how to
|
||||
-- -- make this work reliably given the way we do releases and create
|
||||
@ -114,14 +114,18 @@ spec pgConnInfo = do
|
||||
let dumpMetadata = execPGDump (PGDumpReqBody ["--schema=hdb_catalog"] (Just False)) pgConnInfo
|
||||
|
||||
it "is idempotent" \(NT transact) -> do
|
||||
(transact . dropAndInit =<< getCurrentTime) `shouldReturn` MRInitialized
|
||||
env <- Env.getEnvironment
|
||||
time <- getCurrentTime
|
||||
(transact $ dropAndInit env time) `shouldReturn` MRInitialized
|
||||
firstDump <- transact dumpMetadata
|
||||
transact recreateSystemMetadata
|
||||
secondDump <- transact dumpMetadata
|
||||
secondDump `shouldBe` firstDump
|
||||
|
||||
it "does not create any objects affected by ClearMetadata" \(NT transact) -> do
|
||||
(transact . dropAndInit =<< getCurrentTime) `shouldReturn` MRInitialized
|
||||
env <- Env.getEnvironment
|
||||
time <- getCurrentTime
|
||||
(transact $ dropAndInit env time) `shouldReturn` MRInitialized
|
||||
firstDump <- transact dumpMetadata
|
||||
transact (runClearMetadata ClearMetadata) `shouldReturn` successMsg
|
||||
secondDump <- transact dumpMetadata
|
||||
|
@ -12,6 +12,7 @@ import Test.Hspec
|
||||
|
||||
import qualified Data.Aeson as A
|
||||
import qualified Data.ByteString.Lazy.Char8 as BL
|
||||
import qualified Data.Environment as Env
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Client.TLS as HTTP
|
||||
@ -34,7 +35,6 @@ import qualified Hasura.IncrementalSpec as IncrementalSpec
|
||||
-- import qualified Hasura.RQL.MetadataSpec as MetadataSpec
|
||||
import qualified Hasura.Server.MigrateSpec as MigrateSpec
|
||||
import qualified Hasura.Server.TelemetrySpec as TelemetrySpec
|
||||
import qualified Hasura.Server.AuthSpec as AuthSpec
|
||||
|
||||
data TestSuites
|
||||
= AllSuites !RawConnInfo
|
||||
@ -65,7 +65,6 @@ unitSpecs = do
|
||||
-- describe "Hasura.RQL.Metadata" MetadataSpec.spec -- Commenting until optimizing the test in CI
|
||||
describe "Data.Time" TimeSpec.spec
|
||||
describe "Hasura.Server.Telemetry" TelemetrySpec.spec
|
||||
describe "Hasura.Server.Auth" AuthSpec.spec
|
||||
|
||||
buildPostgresSpecs :: (HasVersion) => RawConnInfo -> IO Spec
|
||||
buildPostgresSpecs pgConnOptions = do
|
||||
@ -76,10 +75,9 @@ buildPostgresSpecs pgConnOptions = do
|
||||
|
||||
let setupCacheRef = do
|
||||
pgPool <- Q.initPGPool pgConnInfo Q.defaultConnParams { Q.cpConns = 1 } print
|
||||
|
||||
let pgContext = mkPGExecCtx Q.Serializable pgPool
|
||||
httpManager <- HTTP.newManager HTTP.tlsManagerSettings
|
||||
let runContext = RunCtx adminUserInfo httpManager (SQLGenCtx False)
|
||||
pgContext = mkPGExecCtx Q.Serializable pgPool
|
||||
|
||||
runAsAdmin :: Run a -> IO a
|
||||
runAsAdmin =
|
||||
@ -87,7 +85,7 @@ buildPostgresSpecs pgConnOptions = do
|
||||
>>> runExceptT
|
||||
>=> flip onLeft printErrJExit
|
||||
|
||||
schemaCache <- snd <$> runAsAdmin (migrateCatalog =<< liftIO getCurrentTime)
|
||||
schemaCache <- snd <$> runAsAdmin (migrateCatalog (Env.mkEnvironment env) =<< liftIO getCurrentTime)
|
||||
cacheRef <- newMVar schemaCache
|
||||
pure $ NT (runAsAdmin . flip MigrateSpec.runCacheRefT cacheRef)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user