Pass environment variables around as a data structure, via @sordina (#5374)

* Pass environment variables around as a data structure, via @sordina

* Resolving build error

* Adding Environment passing note to changelog

* Removing references to ILTPollerLog as this seems to have been reintroduced from a bad merge

* removing commented-out imports

* Language pragmas already set by project

* Linking async thread

* Apply suggestions from code review

Use `runQueryTx` instead of `runLazyTx` for queries.

* remove the non-user facing entry in the changelog

Co-authored-by: Phil Freeman <paf31@cantab.net>
Co-authored-by: Phil Freeman <phil@hasura.io>
Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com>
This commit is contained in:
Lyndon Maydwell 2020-07-15 05:00:58 +10:00 committed by Karthikeyan Chinnakonda
parent 078f3955aa
commit 8904e063e9
62 changed files with 1284 additions and 934 deletions

View File

@ -125,6 +125,8 @@ constraints: any.Cabal ==3.2.0.0,
entropy -halvm, entropy -halvm,
any.errors ==2.3.0, any.errors ==2.3.0,
any.exceptions ==0.10.4, any.exceptions ==0.10.4,
exceptions +transformers-0-4,
any.fail ==4.9.0.0,
any.fast-logger ==3.0.1, any.fast-logger ==3.0.1,
any.file-embed ==0.0.11.2, any.file-embed ==0.0.11.2,
any.filepath ==1.4.2.1, any.filepath ==1.4.2.1,

View File

@ -63,36 +63,6 @@ Date: Wed Jul 15 03:40:48 2020 -0700
Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com> Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com>
commit 24592a516b2e920d3d41244b0aac4c060dc321ae
Author: Lyndon Maydwell <lyndon@sordina.net>
Date: Wed Jul 15 05:00:58 2020 +1000
Pass environment variables around as a data structure, via @sordina (#5374)
* Pass environment variables around as a data structure, via @sordina
* Resolving build error
* Adding Environment passing note to changelog
* Removing references to ILTPollerLog as this seems to have been reintroduced from a bad merge
* removing commented-out imports
* Language pragmas already set by project
* Linking async thread
* Apply suggestions from code review
Use `runQueryTx` instead of `runLazyTx` for queries.
* remove the non-user facing entry in the changelog
Co-authored-by: Phil Freeman <paf31@cantab.net>
Co-authored-by: Phil Freeman <phil@hasura.io>
Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com>
(Done, but we should re-visit this, if we do query plan caching) (Done, but we should re-visit this, if we do query plan caching)
commit 20cbe9cfd3e90b91d3f4faf370b081fc3859cbde commit 20cbe9cfd3e90b91d3f4faf370b081fc3859cbde
Author: Auke Booij <auke@hasura.io> Author: Auke Booij <auke@hasura.io>

View File

@ -447,6 +447,7 @@ library
, Data.HashMap.Strict.InsOrd.Extended , Data.HashMap.Strict.InsOrd.Extended
, Data.List.Extended , Data.List.Extended
, Data.Tuple.Extended , Data.Tuple.Extended
, Data.Environment
, Hasura.SQL.DML , Hasura.SQL.DML
, Hasura.SQL.Error , Hasura.SQL.Error
, Hasura.SQL.GeoJSON , Hasura.SQL.GeoJSON

View File

@ -2,6 +2,7 @@
module Main where module Main where
import Control.Exception
import Data.Text.Conversions (convertText) import Data.Text.Conversions (convertText)
import Hasura.App import Hasura.App
@ -14,19 +15,31 @@ import Hasura.Server.Init
import Hasura.Server.Migrate (downgradeCatalog, dropCatalog) import Hasura.Server.Migrate (downgradeCatalog, dropCatalog)
import Hasura.Server.Version import Hasura.Server.Version
import qualified Data.ByteString.Char8 as BC
import qualified Data.ByteString.Lazy as BL import qualified Data.ByteString.Lazy as BL
import qualified Data.ByteString.Lazy.Char8 as BLC import qualified Data.ByteString.Lazy.Char8 as BLC
import qualified Data.Environment as Env
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
import qualified System.Exit as Sys
import qualified System.Posix.Signals as Signals import qualified System.Posix.Signals as Signals
main :: IO () main :: IO ()
main = parseArgs >>= unAppM . runApp main = do
tryExit $ do
args <- parseArgs
env <- Env.getEnvironment
unAppM (runApp env args)
where
tryExit io = try io >>= \case
Left (ExitException _code msg) -> BC.putStrLn msg >> Sys.exitFailure
Right r -> return r
runApp :: HGEOptions Hasura -> AppM () runApp :: Env.Environment -> HGEOptions Hasura -> AppM ()
runApp (HGEOptionsG rci hgeCmd) = runApp env (HGEOptionsG rci hgeCmd) =
withVersion $$(getVersionFromEnvironment) case hgeCmd of withVersion $$(getVersionFromEnvironment) $ case hgeCmd of
HCServe serveOptions -> do HCServe serveOptions -> do
(initCtx, initTime) <- initialiseCtx hgeCmd rci (initCtx, initTime) <- initialiseCtx env hgeCmd rci
let shutdownApp = return ()
-- Catches the SIGTERM signal and initiates a graceful shutdown. -- Catches the SIGTERM signal and initiates a graceful shutdown.
-- Graceful shutdown for regular HTTP requests is already implemented in -- Graceful shutdown for regular HTTP requests is already implemented in
-- Warp, and is triggered by invoking the 'closeSocket' callback. -- Warp, and is triggered by invoking the 'closeSocket' callback.
@ -36,35 +49,36 @@ runApp (HGEOptionsG rci hgeCmd) =
Signals.sigTERM Signals.sigTERM
(Signals.CatchOnce (shutdownGracefully initCtx)) (Signals.CatchOnce (shutdownGracefully initCtx))
Nothing Nothing
runHGEServer serveOptions initCtx Nothing initTime runHGEServer env serveOptions initCtx Nothing initTime shutdownApp
HCExport -> do HCExport -> do
(initCtx, _) <- initialiseCtx hgeCmd rci (initCtx, _) <- initialiseCtx env hgeCmd rci
res <- runTx' initCtx fetchMetadata Q.ReadCommitted res <- runTx' initCtx fetchMetadata Q.ReadCommitted
either printErrJExit printJSON res either (printErrJExit MetadataExportError) printJSON res
HCClean -> do HCClean -> do
(initCtx, _) <- initialiseCtx hgeCmd rci (initCtx, _) <- initialiseCtx env hgeCmd rci
res <- runTx' initCtx dropCatalog Q.ReadCommitted res <- runTx' initCtx dropCatalog Q.ReadCommitted
either printErrJExit (const cleanSuccess) res either (printErrJExit MetadataCleanError) (const cleanSuccess) res
HCExecute -> do HCExecute -> do
(InitCtx{..}, _) <- initialiseCtx hgeCmd rci (InitCtx{..}, _) <- initialiseCtx env hgeCmd rci
queryBs <- liftIO BL.getContents queryBs <- liftIO BL.getContents
let sqlGenCtx = SQLGenCtx False let sqlGenCtx = SQLGenCtx False
res <- runAsAdmin _icPgPool sqlGenCtx _icHttpManager do res <- runAsAdmin _icPgPool sqlGenCtx _icHttpManager $ do
schemaCache <- buildRebuildableSchemaCache schemaCache <- buildRebuildableSchemaCache env
execQuery queryBs execQuery env queryBs
& runHasSystemDefinedT (SystemDefined False) & runHasSystemDefinedT (SystemDefined False)
& runCacheRWT schemaCache & runCacheRWT schemaCache
& fmap (\(res, _, _) -> res) & fmap (\(res, _, _) -> res)
either printErrJExit (liftIO . BLC.putStrLn) res either (printErrJExit ExecuteProcessError) (liftIO . BLC.putStrLn) res
HCDowngrade opts -> do HCDowngrade opts -> do
(InitCtx{..}, initTime) <- initialiseCtx hgeCmd rci (InitCtx{..}, initTime) <- initialiseCtx env hgeCmd rci
let sqlGenCtx = SQLGenCtx False let sqlGenCtx = SQLGenCtx False
res <- downgradeCatalog opts initTime res <- downgradeCatalog opts initTime
& runAsAdmin _icPgPool sqlGenCtx _icHttpManager & runAsAdmin _icPgPool sqlGenCtx _icHttpManager
either printErrJExit (liftIO . print) res either (printErrJExit DowngradeProcessError) (liftIO . print) res
HCVersion -> liftIO $ putStrLn $ "Hasura GraphQL Engine: " ++ convertText currentVersion HCVersion -> liftIO $ putStrLn $ "Hasura GraphQL Engine: " ++ convertText currentVersion
where where

View File

@ -0,0 +1,35 @@
{-# LANGUAGE DeriveGeneric #-}
module Data.Environment
( Environment()
, getEnvironment
, mkEnvironment
, emptyEnvironment
, maybeEnvironment
, lookupEnv)
where
import Hasura.Prelude
import Data.Aeson
import qualified System.Environment
import qualified Data.Map as M
newtype Environment = Environment (M.Map String String) deriving (Eq, Show, Generic)
instance FromJSON Environment
getEnvironment :: IO Environment
getEnvironment = mkEnvironment <$> System.Environment.getEnvironment
maybeEnvironment :: Maybe Environment -> Environment
maybeEnvironment = fromMaybe emptyEnvironment
mkEnvironment :: [(String, String)] -> Environment
mkEnvironment = Environment . M.fromList
emptyEnvironment :: Environment
emptyEnvironment = Environment M.empty
lookupEnv :: Environment -> String -> Maybe String
lookupEnv (Environment es) k = M.lookup k es

View File

@ -13,12 +13,12 @@ where
import Hasura.Prelude import Hasura.Prelude
import qualified Data.Text as T import qualified Data.Text as T
import qualified Data.Environment as Env
import Data.Attoparsec.Combinator (lookAhead) import Data.Attoparsec.Combinator (lookAhead)
import Data.Attoparsec.Text import Data.Attoparsec.Text
import Instances.TH.Lift () import Instances.TH.Lift ()
import Language.Haskell.TH.Syntax (Lift) import Language.Haskell.TH.Syntax (Lift)
import System.Environment (lookupEnv)
import Test.QuickCheck import Test.QuickCheck
newtype Variable = Variable {unVariable :: Text} newtype Variable = Variable {unVariable :: Text}
@ -63,22 +63,22 @@ parseURLTemplate t = parseOnly parseTemplate t
parseVariable = parseVariable =
string "{{" *> (Variable . T.pack <$> manyTill anyChar (string "}}")) string "{{" *> (Variable . T.pack <$> manyTill anyChar (string "}}"))
renderURLTemplate :: MonadIO m => URLTemplate -> m (Either String Text) renderURLTemplate :: Env.Environment -> URLTemplate -> Either String Text
renderURLTemplate template = do renderURLTemplate env template =
eitherResults <- mapM renderTemplateItem $ unURLTemplate template case errorVariables of
let errorVariables = lefts eitherResults
pure $ case errorVariables of
[] -> Right $ T.concat $ rights eitherResults [] -> Right $ T.concat $ rights eitherResults
_ -> Left $ T.unpack $ "Value for environment variables not found: " _ -> Left $ T.unpack $ "Value for environment variables not found: "
<> T.intercalate ", " errorVariables <> T.intercalate ", " errorVariables
where where
eitherResults = map renderTemplateItem $ unURLTemplate template
errorVariables = lefts eitherResults
renderTemplateItem = \case renderTemplateItem = \case
TIText t -> pure $ Right t TIText t -> Right t
TIVariable (Variable var) -> do TIVariable (Variable var) ->
maybeEnvValue <- liftIO $ lookupEnv $ T.unpack var let maybeEnvValue = Env.lookupEnv env $ T.unpack var
pure $ case maybeEnvValue of in case maybeEnvValue of
Nothing -> Left var Nothing -> Left var
Just value -> Right $ T.pack value Just value -> Right $ T.pack value
-- QuickCheck generators -- QuickCheck generators
instance Arbitrary Variable where instance Arbitrary Variable where

View File

@ -2,10 +2,11 @@
module Hasura.App where module Hasura.App where
import Control.Concurrent.STM.TVar (TVar, readTVarIO) import Control.Concurrent.STM.TVar (readTVarIO, TVar)
import Control.Exception (throwIO)
import Control.Lens (view, _2) import Control.Lens (view, _2)
import Control.Monad.Base import Control.Monad.Base
import Control.Monad.Catch (MonadCatch, MonadThrow, onException) import Control.Monad.Catch (MonadCatch, MonadMask, MonadThrow, onException, Exception)
import Control.Monad.Stateless import Control.Monad.Stateless
import Control.Monad.STM (atomically) import Control.Monad.STM (atomically)
import Control.Monad.Trans.Control (MonadBaseControl (..)) import Control.Monad.Trans.Control (MonadBaseControl (..))
@ -15,11 +16,9 @@ import Data.Time.Clock (UTCTime)
import GHC.AssertNF import GHC.AssertNF
import GHC.Stats import GHC.Stats
import Options.Applicative import Options.Applicative
import System.Environment (getEnvironment, lookupEnv) import System.Environment (getEnvironment)
import System.Exit (exitFailure)
import System.Mem (performMajorGC) import System.Mem (performMajorGC)
import qualified Control.Concurrent.Async as Async
import qualified Control.Concurrent.Async.Lifted.Safe as LA import qualified Control.Concurrent.Async.Lifted.Safe as LA
import qualified Control.Concurrent.Extended as C import qualified Control.Concurrent.Extended as C
import qualified Data.Aeson as A import qualified Data.Aeson as A
@ -27,6 +26,7 @@ import qualified Data.ByteString.Char8 as BC
import qualified Data.ByteString.Lazy.Char8 as BLC import qualified Data.ByteString.Lazy.Char8 as BLC
import qualified Data.Set as Set import qualified Data.Set as Set
import qualified Data.Text as T import qualified Data.Text as T
import qualified Data.Environment as Env
import qualified Data.Time.Clock as Clock import qualified Data.Time.Clock as Clock
import qualified Data.Yaml as Y import qualified Data.Yaml as Y
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
@ -35,6 +35,7 @@ import qualified Network.HTTP.Client.TLS as HTTP
import qualified Network.Wai.Handler.Warp as Warp import qualified Network.Wai.Handler.Warp as Warp
import qualified System.Log.FastLogger as FL import qualified System.Log.FastLogger as FL
import qualified Text.Mustache.Compile as M import qualified Text.Mustache.Compile as M
import qualified Control.Immortal as Immortal
import Hasura.Db import Hasura.Db
import Hasura.EncJSON import Hasura.EncJSON
@ -71,11 +72,35 @@ import Hasura.Session
import qualified Hasura.GraphQL.Transport.WebSocket.Server as WS import qualified Hasura.GraphQL.Transport.WebSocket.Server as WS
printErrExit :: (MonadIO m) => forall a . String -> m a data ExitCode
printErrExit = liftIO . (>> exitFailure) . putStrLn = InvalidEnvironmentVariableOptionsError
| InvalidDatabaseConnectionParamsError
| MetadataCatalogFetchingError
| AuthConfigurationError
| EventSubSystemError
| EventEnvironmentVariableError
| MetadataExportError
| MetadataCleanError
| DatabaseMigrationError
| ExecuteProcessError
| DowngradeProcessError
| UnexpectedHasuraError
| ExitFailureError Int
deriving Show
printErrJExit :: (A.ToJSON a, MonadIO m) => forall b . a -> m b data ExitException
printErrJExit = liftIO . (>> exitFailure) . printJSON = ExitException
{ eeCode :: !ExitCode
, eeMessage :: !BC.ByteString
} deriving (Show)
instance Exception ExitException
printErrExit :: (MonadIO m) => forall a . ExitCode -> String -> m a
printErrExit reason = liftIO . throwIO . ExitException reason . BC.pack
printErrJExit :: (A.ToJSON a, MonadIO m) => forall b . ExitCode -> a -> m b
printErrJExit reason = liftIO . throwIO . ExitException reason . BLC.toStrict . A.encode
parseHGECommand :: EnabledLogTypes impl => Parser (RawHGECommand impl) parseHGECommand :: EnabledLogTypes impl => Parser (RawHGECommand impl)
parseHGECommand = parseHGECommand =
@ -101,7 +126,7 @@ parseArgs = do
rawHGEOpts <- execParser opts rawHGEOpts <- execParser opts
env <- getEnvironment env <- getEnvironment
let eitherOpts = runWithEnv env $ mkHGEOptions rawHGEOpts let eitherOpts = runWithEnv env $ mkHGEOptions rawHGEOpts
either printErrExit return eitherOpts either (printErrExit InvalidEnvironmentVariableOptionsError) return eitherOpts
where where
opts = info (helper <*> hgeOpts) opts = info (helper <*> hgeOpts)
( fullDesc <> ( fullDesc <>
@ -143,8 +168,7 @@ data Loggers
} }
newtype AppM a = AppM { unAppM :: IO a } newtype AppM a = AppM { unAppM :: IO a }
deriving ( Functor, Applicative, Monad, MonadIO, MonadUnique, MonadBase IO deriving (Functor, Applicative, Monad, MonadIO, MonadBase IO, MonadBaseControl IO, MonadCatch, MonadThrow, MonadMask)
, MonadBaseControl IO, MonadCatch, MonadThrow)
-- | this function initializes the catalog and returns an @InitCtx@, based on the command given -- | this function initializes the catalog and returns an @InitCtx@, based on the command given
-- - for serve command it creates a proper PG connection pool -- - for serve command it creates a proper PG connection pool
@ -153,10 +177,11 @@ newtype AppM a = AppM { unAppM :: IO a }
-- used by other functions as well -- used by other functions as well
initialiseCtx initialiseCtx
:: (HasVersion, MonadIO m, MonadCatch m) :: (HasVersion, MonadIO m, MonadCatch m)
=> HGECommand Hasura => Env.Environment
-> HGECommand Hasura
-> RawConnInfo -> RawConnInfo
-> m (InitCtx, UTCTime) -> m (InitCtx, UTCTime)
initialiseCtx hgeCmd rci = do initialiseCtx env hgeCmd rci = do
initTime <- liftIO Clock.getCurrentTime initTime <- liftIO Clock.getCurrentTime
-- global http manager -- global http manager
httpManager <- liftIO $ HTTP.newManager HTTP.tlsManagerSettings httpManager <- liftIO $ HTTP.newManager HTTP.tlsManagerSettings
@ -181,11 +206,11 @@ initialiseCtx hgeCmd rci = do
pure (l, pool, SQLGenCtx False) pure (l, pool, SQLGenCtx False)
res <- flip onException (flushLogger (_lsLoggerCtx loggers)) $ res <- flip onException (flushLogger (_lsLoggerCtx loggers)) $
migrateCatalogSchema (_lsLogger loggers) pool httpManager sqlGenCtx migrateCatalogSchema env (_lsLogger loggers) pool httpManager sqlGenCtx
pure (InitCtx httpManager instanceId loggers connInfo pool latch res, initTime) pure (InitCtx httpManager instanceId loggers connInfo pool latch res, initTime)
where where
procConnInfo = procConnInfo =
either (printErrExit . ("Fatal Error : " <>)) return $ mkConnInfo rci either (printErrExit InvalidDatabaseConnectionParamsError . ("Fatal Error : " <>)) return $ mkConnInfo rci
getMinimalPool pgLogger ci = do getMinimalPool pgLogger ci = do
let connParams = Q.defaultConnParams { Q.cpConns = 1 } let connParams = Q.defaultConnParams { Q.cpConns = 1 }
@ -200,14 +225,14 @@ initialiseCtx hgeCmd rci = do
-- | helper function to initialize or migrate the @hdb_catalog@ schema (used by pro as well) -- | helper function to initialize or migrate the @hdb_catalog@ schema (used by pro as well)
migrateCatalogSchema migrateCatalogSchema
:: (HasVersion, MonadIO m) :: (HasVersion, MonadIO m)
=> Logger Hasura -> Q.PGPool -> HTTP.Manager -> SQLGenCtx => Env.Environment -> Logger Hasura -> Q.PGPool -> HTTP.Manager -> SQLGenCtx
-> m (RebuildableSchemaCache Run, Maybe UTCTime) -> m (RebuildableSchemaCache Run, Maybe UTCTime)
migrateCatalogSchema logger pool httpManager sqlGenCtx = do migrateCatalogSchema env logger pool httpManager sqlGenCtx = do
let pgExecCtx = mkPGExecCtx Q.Serializable pool let pgExecCtx = mkPGExecCtx Q.Serializable pool
adminRunCtx = RunCtx adminUserInfo httpManager sqlGenCtx adminRunCtx = RunCtx adminUserInfo httpManager sqlGenCtx
currentTime <- liftIO Clock.getCurrentTime currentTime <- liftIO Clock.getCurrentTime
initialiseResult <- runExceptT $ peelRun adminRunCtx pgExecCtx Q.ReadWrite $ initialiseResult <- runExceptT $ peelRun adminRunCtx pgExecCtx Q.ReadWrite $
(,) <$> migrateCatalog currentTime <*> liftTx fetchLastUpdate (,) <$> migrateCatalog env currentTime <*> liftTx fetchLastUpdate
((migrationResult, schemaCache), lastUpdateEvent) <- ((migrationResult, schemaCache), lastUpdateEvent) <-
initialiseResult `onLeft` \err -> do initialiseResult `onLeft` \err -> do
@ -216,7 +241,7 @@ migrateCatalogSchema logger pool httpManager sqlGenCtx = do
, slKind = "db_migrate" , slKind = "db_migrate"
, slInfo = A.toJSON err , slInfo = A.toJSON err
} }
liftIO exitFailure liftIO (printErrExit DatabaseMigrationError (BLC.unpack $ A.encode err))
unLogger logger migrationResult unLogger logger migrationResult
return (schemaCache, view _2 <$> lastUpdateEvent) return (schemaCache, view _2 <$> lastUpdateEvent)
@ -224,7 +249,7 @@ migrateCatalogSchema logger pool httpManager sqlGenCtx = do
runTxIO :: Q.PGPool -> Q.TxMode -> Q.TxE QErr a -> IO a runTxIO :: Q.PGPool -> Q.TxMode -> Q.TxE QErr a -> IO a
runTxIO pool isoLevel tx = do runTxIO pool isoLevel tx = do
eVal <- liftIO $ runExceptT $ Q.runTx pool isoLevel tx eVal <- liftIO $ runExceptT $ Q.runTx pool isoLevel tx
either printErrJExit return eVal either (printErrJExit DatabaseMigrationError) return eVal
-- | A latch for the graceful shutdown of a server process. -- | A latch for the graceful shutdown of a server process.
newtype ShutdownLatch = ShutdownLatch { unShutdownLatch :: C.MVar () } newtype ShutdownLatch = ShutdownLatch { unShutdownLatch :: C.MVar () }
@ -239,40 +264,45 @@ waitForShutdown = C.takeMVar . unShutdownLatch
-- | Initiate a graceful shutdown of the server associated with the provided -- | Initiate a graceful shutdown of the server associated with the provided
-- latch. -- latch.
shutdownGracefully :: InitCtx -> IO () shutdownGracefully :: InitCtx -> IO ()
shutdownGracefully = flip C.putMVar () . unShutdownLatch . _icShutdownLatch shutdownGracefully = shutdownGracefully' . _icShutdownLatch
shutdownGracefully' :: ShutdownLatch -> IO ()
shutdownGracefully' = flip C.putMVar () . unShutdownLatch
-- | If an exception is encountered , flush the log buffer and -- | If an exception is encountered , flush the log buffer and
-- rethrow If we do not flush the log buffer on exception, then log lines -- rethrow If we do not flush the log buffer on exception, then log lines
-- may be missed -- may be missed
-- See: https://github.com/hasura/graphql-engine/issues/4772 -- See: https://github.com/hasura/graphql-engine/issues/4772
flushLogger :: (MonadIO m) => LoggerCtx impl -> m () flushLogger :: MonadIO m => LoggerCtx impl -> m ()
flushLogger loggerCtx = liftIO $ FL.flushLogStr $ _lcLoggerSet loggerCtx flushLogger = liftIO . FL.flushLogStr . _lcLoggerSet
runHGEServer runHGEServer
:: ( HasVersion :: ( HasVersion
, MonadIO m , MonadIO m
, MonadUnique m , MonadMask m
, MonadCatch m
, MonadStateless IO m , MonadStateless IO m
, LA.Forall (LA.Pure m) , LA.Forall (LA.Pure m)
, UserAuthentication m , UserAuthentication m
, MetadataApiAuthorization m
, HttpLog m , HttpLog m
, MonadQueryLog m
, ConsoleRenderer m , ConsoleRenderer m
, MetadataApiAuthorization m
, MonadGQLExecutionCheck m , MonadGQLExecutionCheck m
, MonadConfigApiHandler m , MonadConfigApiHandler m
, MonadQueryLog m
, WS.MonadWSLog m , WS.MonadWSLog m
) )
=> ServeOptions impl => Env.Environment
-> ServeOptions impl
-> InitCtx -> InitCtx
-> Maybe PGExecCtx -> Maybe PGExecCtx
-- ^ An optional specialized pg exection context for executing queries -- ^ An optional specialized pg exection context for executing queries
-- and mutations -- and mutations
-> UTCTime -> UTCTime
-- ^ start time -- ^ start time
-> IO ()
-- ^ shutdown function
-> m () -> m ()
runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do runHGEServer env ServeOptions{..} InitCtx{..} pgExecCtx initTime shutdownApp = do
-- Comment this to enable expensive assertions from "GHC.AssertNF". These -- Comment this to enable expensive assertions from "GHC.AssertNF". These
-- will log lines to STDOUT containing "not in normal form". In the future we -- will log lines to STDOUT containing "not in normal form". In the future we
-- could try to integrate this into our tests. For now this is a development -- could try to integrate this into our tests. For now this is a development
@ -287,13 +317,14 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
authModeRes <- runExceptT $ setupAuthMode soAdminSecret soAuthHook soJwtSecret soUnAuthRole authModeRes <- runExceptT $ setupAuthMode soAdminSecret soAuthHook soJwtSecret soUnAuthRole
_icHttpManager logger _icHttpManager logger
authMode <- either (printErrExit . T.unpack) return authModeRes authMode <- either (printErrExit AuthConfigurationError . T.unpack) return authModeRes
_idleGCThread <- C.forkImmortal "ourIdleGC" logger $ liftIO $ _idleGCThread <- C.forkImmortal "ourIdleGC" logger $ liftIO $
ourIdleGC logger (seconds 0.3) (seconds 10) (seconds 60) ourIdleGC logger (seconds 0.3) (seconds 10) (seconds 60)
HasuraApp app cacheRef cacheInitTime shutdownApp <- flip onException (flushLogger loggerCtx) $ HasuraApp app cacheRef cacheInitTime stopWsServer <- flip onException (flushLogger loggerCtx) $
mkWaiApp soTxIso mkWaiApp env
soTxIso
logger logger
sqlGenCtx sqlGenCtx
soEnableAllowlist soEnableAllowlist
@ -318,14 +349,14 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
liftIO $ logInconsObjs logger inconsObjs liftIO $ logInconsObjs logger inconsObjs
-- start background threads for schema sync -- start background threads for schema sync
(_schemaSyncListenerThread, _schemaSyncProcessorThread) <- (schemaSyncListenerThread, schemaSyncProcessorThread) <-
startSchemaSyncThreads sqlGenCtx _icPgPool logger _icHttpManager startSchemaSyncThreads sqlGenCtx _icPgPool logger _icHttpManager
cacheRef _icInstanceId cacheInitTime cacheRef _icInstanceId cacheInitTime
maxEvThrds <- liftIO $ getFromEnv defaultMaxEventThreads "HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE" let
fetchI <- fmap milliseconds $ liftIO $ maxEvThrds = fromMaybe defaultMaxEventThreads soEventsHttpPoolSize
getFromEnv (Milliseconds defaultFetchInterval) "HASURA_GRAPHQL_EVENTS_FETCH_INTERVAL" fetchI = milliseconds $ fromMaybe (Milliseconds defaultFetchInterval) soEventsFetchInterval
logEnvHeaders <- liftIO $ getFromEnv False "LOG_HEADERS_FROM_ENV" logEnvHeaders = soLogHeadersFromEnv
lockedEventsCtx <- liftIO $ atomically $ initLockedEventsCtx lockedEventsCtx <- liftIO $ atomically $ initLockedEventsCtx
@ -333,13 +364,14 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
prepareEvents _icPgPool logger prepareEvents _icPgPool logger
eventEngineCtx <- liftIO $ atomically $ initEventEngineCtx maxEvThrds fetchI eventEngineCtx <- liftIO $ atomically $ initEventEngineCtx maxEvThrds fetchI
unLogger logger $ mkGenericStrLog LevelInfo "event_triggers" "starting workers" unLogger logger $ mkGenericStrLog LevelInfo "event_triggers" "starting workers"
_eventQueueThread <- C.forkImmortal "processEventQueue" logger $ liftIO $
_eventQueueThread <- C.forkImmortal "processEventQueue" logger $
processEventQueue logger logEnvHeaders processEventQueue logger logEnvHeaders
_icHttpManager _icPgPool (getSCFromRef cacheRef) eventEngineCtx lockedEventsCtx _icHttpManager _icPgPool (getSCFromRef cacheRef) eventEngineCtx lockedEventsCtx
-- start a backgroud thread to handle async actions -- start a backgroud thread to handle async actions
_asyncActionsThread <- C.forkImmortal "asyncActionsProcessor" logger $ liftIO $ asyncActionsThread <- C.forkImmortal "asyncActionsProcessor" logger $
asyncActionsProcessor (_scrCache cacheRef) _icPgPool _icHttpManager asyncActionsProcessor env (_scrCache cacheRef) _icPgPool _icHttpManager
-- start a background thread to create new cron events -- start a background thread to create new cron events
void $ liftIO $ C.forkImmortal "runCronEventsGenerator" logger $ void $ liftIO $ C.forkImmortal "runCronEventsGenerator" logger $
@ -349,20 +381,29 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
prepareScheduledEvents _icPgPool logger prepareScheduledEvents _icPgPool logger
-- start a background thread to deliver the scheduled events -- start a background thread to deliver the scheduled events
void $ liftIO $ C.forkImmortal "processScheduledTriggers" logger $ processScheduledTriggers logger logEnvHeaders _icHttpManager _icPgPool (getSCFromRef cacheRef) lockedEventsCtx void $ C.forkImmortal "processScheduledTriggers" logger $
processScheduledTriggers env logger logEnvHeaders _icHttpManager _icPgPool (getSCFromRef cacheRef) lockedEventsCtx
-- start a background thread to check for updates -- start a background thread to check for updates
_updateThread <- C.forkImmortal "checkForUpdates" logger $ liftIO $ updateThread <- C.forkImmortal "checkForUpdates" logger $ liftIO $
checkForUpdates loggerCtx _icHttpManager checkForUpdates loggerCtx _icHttpManager
-- startTelemetry logger serveOpts cacheRef initCtx
-- start a background thread for telemetry -- start a background thread for telemetry
when soEnableTelemetry $ do when soEnableTelemetry $ do
unLogger logger $ mkGenericStrLog LevelInfo "telemetry" telemetryNotice unLogger logger $ mkGenericStrLog LevelInfo "telemetry" telemetryNotice
(dbId, pgVersion) <- liftIO $ runTxIO _icPgPool (Q.ReadCommitted, Nothing) $ (dbId, pgVersion) <- liftIO $ runTxIO _icPgPool (Q.ReadCommitted, Nothing) $
(,) <$> getDbId <*> getPgVersion (,) <$> getDbId <*> getPgVersion
void $ C.forkImmortal "runTelemetry" logger $ liftIO $ void $ C.forkImmortal "runTelemetry" logger $ liftIO $
runTelemetry logger _icHttpManager (getSCFromRef cacheRef) dbId _icInstanceId pgVersion runTelemetry logger _icHttpManager (getSCFromRef cacheRef) dbId _icInstanceId pgVersion
-- events has its own shutdown mechanism, used in 'shutdownHandler'
let immortalThreads = [schemaSyncListenerThread, schemaSyncProcessorThread, updateThread, asyncActionsThread]
finishTime <- liftIO Clock.getCurrentTime finishTime <- liftIO Clock.getCurrentTime
let apiInitTime = realToFrac $ Clock.diffUTCTime finishTime initTime let apiInitTime = realToFrac $ Clock.diffUTCTime finishTime initTime
unLogger logger $ unLogger logger $
@ -370,7 +411,7 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
let warpSettings = Warp.setPort soPort let warpSettings = Warp.setPort soPort
. Warp.setHost soHost . Warp.setHost soHost
. Warp.setGracefulShutdownTimeout (Just 30) -- 30s graceful shutdown . Warp.setGracefulShutdownTimeout (Just 30) -- 30s graceful shutdown
. Warp.setInstallShutdownHandler (shutdownHandler _icLoggers shutdownApp lockedEventsCtx _icPgPool) . Warp.setInstallShutdownHandler (shutdownHandler _icLoggers immortalThreads stopWsServer lockedEventsCtx _icPgPool)
$ Warp.defaultSettings $ Warp.defaultSettings
liftIO $ Warp.runSettings warpSettings app liftIO $ Warp.runSettings warpSettings app
@ -389,13 +430,13 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
prepareEvents pool (Logger logger) = do prepareEvents pool (Logger logger) = do
liftIO $ logger $ mkGenericStrLog LevelInfo "event_triggers" "preparing data" liftIO $ logger $ mkGenericStrLog LevelInfo "event_triggers" "preparing data"
res <- liftIO $ runTx pool (Q.ReadCommitted, Nothing) unlockAllEvents res <- liftIO $ runTx pool (Q.ReadCommitted, Nothing) unlockAllEvents
either printErrJExit return res either (printErrJExit EventSubSystemError) return res
-- | prepareScheduledEvents is like prepareEvents, but for scheduled triggers -- | prepareScheduledEvents is like prepareEvents, but for scheduled triggers
prepareScheduledEvents pool (Logger logger) = do prepareScheduledEvents pool (Logger logger) = do
liftIO $ logger $ mkGenericStrLog LevelInfo "scheduled_triggers" "preparing data" liftIO $ logger $ mkGenericStrLog LevelInfo "scheduled_triggers" "preparing data"
res <- liftIO $ runTx pool (Q.ReadCommitted, Nothing) unlockAllLockedScheduledEvents res <- liftIO $ runTx pool (Q.ReadCommitted, Nothing) unlockAllLockedScheduledEvents
either printErrJExit return res either (printErrJExit EventSubSystemError) return res
-- | shutdownEvents will be triggered when a graceful shutdown has been inititiated, it will -- | shutdownEvents will be triggered when a graceful shutdown has been inititiated, it will
-- get the locked events from the event engine context and the scheduled event engine context -- get the locked events from the event engine context and the scheduled event engine context
@ -433,15 +474,6 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
Right count -> logger $ mkGenericStrLog LevelInfo triggerType $ Right count -> logger $ mkGenericStrLog LevelInfo triggerType $
show count ++ " " ++ T.unpack eventType ++ " events successfully unlocked" show count ++ " " ++ T.unpack eventType ++ " events successfully unlocked"
getFromEnv :: (Read a) => a -> String -> IO a
getFromEnv defaults env = do
mEnv <- lookupEnv env
let mRes = case mEnv of
Nothing -> Just defaults
Just val -> readMaybe val
eRes = maybe (Left $ "Wrong expected type for environment variable: " <> env) Right mRes
either printErrExit return eRes
runTx :: Q.PGPool -> Q.TxMode -> Q.TxE QErr a -> IO (Either QErr a) runTx :: Q.PGPool -> Q.TxMode -> Q.TxE QErr a -> IO (Either QErr a)
runTx pool txLevel tx = runTx pool txLevel tx =
liftIO $ runExceptT $ Q.runTx pool txLevel tx liftIO $ runExceptT $ Q.runTx pool txLevel tx
@ -452,17 +484,26 @@ runHGEServer ServeOptions{..} InitCtx{..} pgExecCtx initTime = do
-- we want to control shutdown. -- we want to control shutdown.
shutdownHandler shutdownHandler
:: Loggers :: Loggers
-> [Immortal.Thread]
-> IO () -> IO ()
-- ^ the stop websocket server function
-> LockedEventsCtx -> LockedEventsCtx
-> Q.PGPool -> Q.PGPool
-> IO () -> IO ()
-- ^ the closeSocket callback
-> IO () -> IO ()
shutdownHandler (Loggers loggerCtx (Logger logger) _) shutdownApp leCtx pool closeSocket = shutdownHandler (Loggers loggerCtx (Logger logger) _) immortalThreads stopWsServer leCtx pool closeSocket =
void . Async.async $ do LA.link =<< LA.async do
waitForShutdown _icShutdownLatch waitForShutdown _icShutdownLatch
logger $ mkGenericStrLog LevelInfo "server" "gracefully shutting down server" logger $ mkGenericStrLog LevelInfo "server" "gracefully shutting down server"
shutdownEvents pool (Logger logger) leCtx shutdownEvents pool (Logger logger) leCtx
closeSocket closeSocket
stopWsServer
-- kill all the background immortal threads
logger $ mkGenericStrLog LevelInfo "server" "killing all background immortal threads"
forM_ immortalThreads $ \thread -> do
logger $ mkGenericStrLog LevelInfo "server" $ "killing thread: " <> show (Immortal.threadId thread)
Immortal.stop thread
shutdownApp shutdownApp
cleanLoggerCtx loggerCtx cleanLoggerCtx loggerCtx
@ -543,15 +584,15 @@ execQuery
, UserInfoM m , UserInfoM m
, HasSystemDefined m , HasSystemDefined m
) )
=> BLC.ByteString => Env.Environment
-> BLC.ByteString
-> m BLC.ByteString -> m BLC.ByteString
execQuery queryBs = do execQuery env queryBs = do
query <- case A.decode queryBs of query <- case A.decode queryBs of
Just jVal -> decodeValue jVal Just jVal -> decodeValue jVal
Nothing -> throw400 InvalidJSON "invalid json" Nothing -> throw400 InvalidJSON "invalid json"
buildSchemaCacheStrict buildSchemaCacheStrict
encJToLBS <$> runQueryM query encJToLBS <$> runQueryM env query
instance HttpLog AppM where instance HttpLog AppM where
logHttpError logger userInfoM reqId httpReq req qErr headers = logHttpError logger userInfoM reqId httpReq req qErr headers =
@ -614,7 +655,6 @@ mkConsoleHTML path authMode enableTelemetry consoleAssetsDir =
consoleTmplt = $(M.embedSingleTemplate "src-rsr/console.html") consoleTmplt = $(M.embedSingleTemplate "src-rsr/console.html")
telemetryNotice :: String telemetryNotice :: String
telemetryNotice = telemetryNotice =
"Help us improve Hasura! The graphql-engine server collects anonymized " "Help us improve Hasura! The graphql-engine server collects anonymized "

View File

@ -18,6 +18,7 @@ module Hasura.Db
, LazyRespTx , LazyRespTx
, defaultTxErrorHandler , defaultTxErrorHandler
, mkTxErrorHandler , mkTxErrorHandler
, lazyTxToQTx
) where ) where
import Control.Lens import Control.Lens
@ -134,7 +135,7 @@ type RespTx = Q.TxE QErr EncJSON
type LazyRespTx = LazyTx QErr EncJSON type LazyRespTx = LazyTx QErr EncJSON
setHeadersTx :: SessionVariables -> Q.TxE QErr () setHeadersTx :: SessionVariables -> Q.TxE QErr ()
setHeadersTx session = setHeadersTx session = do
Q.unitQE defaultTxErrorHandler setSess () False Q.unitQE defaultTxErrorHandler setSess () False
where where
setSess = Q.fromText $ setSess = Q.fromText $
@ -182,7 +183,9 @@ withUserInfo :: UserInfo -> LazyTx QErr a -> LazyTx QErr a
withUserInfo uInfo = \case withUserInfo uInfo = \case
LTErr e -> LTErr e LTErr e -> LTErr e
LTNoTx a -> LTNoTx a LTNoTx a -> LTNoTx a
LTTx tx -> LTTx $ setHeadersTx (_uiSession uInfo) >> tx LTTx tx ->
let vars = _uiSession uInfo
in LTTx $ setHeadersTx vars >> tx
instance Functor (LazyTx e) where instance Functor (LazyTx e) where
fmap f = \case fmap f = \case

View File

@ -1,18 +1,18 @@
module Hasura.Eventing.Common where module Hasura.Eventing.Common where
import Hasura.Prelude
import Control.Concurrent.STM.TVar import Control.Concurrent.STM.TVar
import Control.Monad.STM import Control.Monad.STM
import Hasura.Prelude
import Hasura.RQL.Types.EventTrigger (EventId) import Hasura.RQL.Types.EventTrigger (EventId)
import Hasura.RQL.Types.ScheduledTrigger (CronEventId,StandAloneScheduledEventId) import Hasura.RQL.Types.ScheduledTrigger (CronEventId, StandAloneScheduledEventId)
import qualified Data.Set as Set import qualified Data.Set as Set
data LockedEventsCtx data LockedEventsCtx
= LockedEventsCtx = LockedEventsCtx
{ leCronEvents :: TVar (Set.Set CronEventId) { leCronEvents :: TVar (Set.Set CronEventId)
, leStandAloneEvents :: TVar (Set.Set StandAloneScheduledEventId) , leStandAloneEvents :: TVar (Set.Set StandAloneScheduledEventId)
, leEvents :: TVar (Set.Set EventId) , leEvents :: TVar (Set.Set EventId)
} }
initLockedEventsCtx :: STM LockedEventsCtx initLockedEventsCtx :: STM LockedEventsCtx
@ -25,16 +25,16 @@ initLockedEventsCtx = do
-- | After the events are fetched from the DB, we store the locked events -- | After the events are fetched from the DB, we store the locked events
-- in a hash set(order doesn't matter and look ups are faster) in the -- in a hash set(order doesn't matter and look ups are faster) in the
-- event engine context -- event engine context
saveLockedEvents :: [Text] -> TVar (Set.Set Text) -> IO () saveLockedEvents :: (MonadIO m) => [Text] -> TVar (Set.Set Text) -> m ()
saveLockedEvents eventIds lockedEvents = saveLockedEvents eventIds lockedEvents =
atomically $ do liftIO $ atomically $ do
lockedEventsVals <- readTVar lockedEvents lockedEventsVals <- readTVar lockedEvents
writeTVar lockedEvents $! writeTVar lockedEvents $!
Set.union lockedEventsVals $ Set.fromList eventIds Set.union lockedEventsVals $ Set.fromList eventIds
-- | Remove an event from the 'LockedEventsCtx' after it has been processed -- | Remove an event from the 'LockedEventsCtx' after it has been processed
removeEventFromLockedEvents :: Text -> TVar (Set.Set Text) -> IO () removeEventFromLockedEvents :: MonadIO m => Text -> TVar (Set.Set Text) -> m ()
removeEventFromLockedEvents eventId lockedEvents = removeEventFromLockedEvents eventId lockedEvents =
atomically $ do liftIO $ atomically $ do
lockedEventsVals <- readTVar lockedEvents lockedEventsVals <- readTVar lockedEvents
writeTVar lockedEvents $! Set.delete eventId lockedEventsVals writeTVar lockedEvents $! Set.delete eventId lockedEventsVals

View File

@ -40,39 +40,38 @@ module Hasura.Eventing.EventTrigger
, EventEngineCtx(..) , EventEngineCtx(..)
) where ) where
import Control.Concurrent.Extended (sleep)
import Control.Concurrent.Async (async, link, wait, withAsync)
import Control.Concurrent.Extended (sleep)
import Control.Concurrent.STM.TVar import Control.Concurrent.STM.TVar
import Control.Monad.Catch (MonadMask, bracket_) import Control.Monad.Catch (MonadMask, bracket_)
import Control.Monad.STM import Control.Monad.STM
import Control.Monad.Trans.Control (MonadBaseControl)
import Data.Aeson import Data.Aeson
import Data.Aeson.Casing import Data.Aeson.Casing
import Data.Aeson.TH import Data.Aeson.TH
import Data.Has import Data.Has
import Data.Int (Int64) import Data.Int (Int64)
import Data.String import Data.String
import Data.Time.Clock import Data.Time.Clock
import Data.Word import Data.Word
import Hasura.Eventing.HTTP
import Hasura.Eventing.Common import Hasura.Eventing.Common
import Hasura.Eventing.HTTP
import Hasura.HTTP import Hasura.HTTP
import Hasura.Prelude import Hasura.Prelude
import Hasura.RQL.DDL.Headers import Hasura.RQL.DDL.Headers
import Hasura.RQL.Types import Hasura.RQL.Types
import Hasura.Server.Version (HasVersion) import Hasura.Server.Version (HasVersion)
import Hasura.SQL.Types import Hasura.SQL.Types
import qualified Data.HashMap.Strict as M import qualified Control.Concurrent.Async.Lifted.Safe as LA
import qualified Data.TByteString as TBS import qualified Data.HashMap.Strict as M
import qualified Data.Text as T import qualified Data.TByteString as TBS
import qualified Data.Time.Clock as Time import qualified Data.Text as T
import qualified Database.PG.Query as Q import qualified Data.Time.Clock as Time
import qualified Hasura.Logging as L import qualified Database.PG.Query as Q
import qualified Network.HTTP.Client as HTTP import qualified Database.PG.Query.PTI as PTI
import qualified Database.PG.Query.PTI as PTI import qualified Hasura.Logging as L
import qualified PostgreSQL.Binary.Encoding as PE import qualified Network.HTTP.Client as HTTP
import qualified PostgreSQL.Binary.Encoding as PE
data TriggerMetadata data TriggerMetadata
= TriggerMetadata { tmName :: TriggerName } = TriggerMetadata { tmName :: TriggerName }
@ -159,19 +158,31 @@ initEventEngineCtx maxT _eeCtxFetchInterval = do
-- - try not to cause webhook workers to stall waiting on DB fetch -- - try not to cause webhook workers to stall waiting on DB fetch
-- - limit webhook HTTP concurrency per HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE -- - limit webhook HTTP concurrency per HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE
processEventQueue processEventQueue
:: (HasVersion) => L.Logger L.Hasura -> LogEnvHeaders -> HTTP.Manager-> Q.PGPool :: forall m void
-> IO SchemaCache -> EventEngineCtx -> LockedEventsCtx . ( HasVersion
-> IO void , MonadIO m
processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx{..} LockedEventsCtx {leEvents}= do , MonadBaseControl IO m
, LA.Forall (LA.Pure m)
, MonadMask m
)
=> L.Logger L.Hasura
-> LogEnvHeaders
-> HTTP.Manager
-> Q.PGPool
-> IO SchemaCache
-> EventEngineCtx
-> LockedEventsCtx
-> m void
processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx{..} LockedEventsCtx{leEvents} = do
events0 <- popEventsBatch events0 <- popEventsBatch
go events0 0 False go events0 0 False
where where
fetchBatchSize = 100 fetchBatchSize = 100
popEventsBatch = do popEventsBatch = do
let run = runExceptT . Q.runTx pool (Q.RepeatableRead, Just Q.ReadWrite) let run = liftIO . runExceptT . Q.runTx pool (Q.RepeatableRead, Just Q.ReadWrite)
run (fetchEvents fetchBatchSize) >>= \case run (fetchEvents fetchBatchSize) >>= \case
Left err -> do Left err -> do
L.unLogger logger $ EventInternalErr err liftIO $ L.unLogger logger $ EventInternalErr err
return [] return []
Right events -> do Right events -> do
saveLockedEvents (map eId events) leEvents saveLockedEvents (map eId events) leEvents
@ -179,25 +190,26 @@ processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx
-- work on this batch of events while prefetching the next. Recurse after we've forked workers -- work on this batch of events while prefetching the next. Recurse after we've forked workers
-- for each in the batch, minding the requested pool size. -- for each in the batch, minding the requested pool size.
go :: [Event] -> Int -> Bool -> IO void go :: [Event] -> Int -> Bool -> m void
go events !fullFetchCount !alreadyWarned = do go events !fullFetchCount !alreadyWarned = do
-- process events ASAP until we've caught up; only then can we sleep -- process events ASAP until we've caught up; only then can we sleep
when (null events) $ sleep _eeCtxFetchInterval when (null events) . liftIO $ sleep _eeCtxFetchInterval
-- Prefetch next events payload while concurrently working through our current batch. -- Prefetch next events payload while concurrently working through our current batch.
-- NOTE: we probably don't need to prefetch so early, but probably not -- NOTE: we probably don't need to prefetch so early, but probably not
-- worth the effort for something more fine-tuned -- worth the effort for something more fine-tuned
eventsNext <- withAsync popEventsBatch $ \eventsNextA -> do eventsNext <- LA.withAsync popEventsBatch $ \eventsNextA -> do
-- process approximately in order, minding HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE: -- process approximately in order, minding HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE:
forM_ events $ \event -> do forM_ events $ \event -> do
t <- async $ runReaderT (withEventEngineCtx eeCtx $ (processEvent event)) (logger, httpMgr) t <- processEvent event
-- removing an event from the _eeCtxLockedEvents after the event has & withEventEngineCtx eeCtx
-- been processed & flip runReaderT (logger, httpMgr)
removeEventFromLockedEvents (eId event) leEvents & LA.async
link t -- removing an event from the _eeCtxLockedEvents after the event has
-- been processed
-- return when next batch ready; some 'processEvent' threads may be running. removeEventFromLockedEvents (eId event) leEvents
wait eventsNextA LA.link t
LA.wait eventsNextA
let lenEvents = length events let lenEvents = length events
if | lenEvents == fetchBatchSize -> do if | lenEvents == fetchBatchSize -> do
@ -220,13 +232,14 @@ processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx
go eventsNext 0 False go eventsNext 0 False
processEvent processEvent
:: ( HasVersion :: forall io r
, MonadReader r m . ( HasVersion
, MonadIO io
, MonadReader r io
, Has HTTP.Manager r , Has HTTP.Manager r
, Has (L.Logger L.Hasura) r , Has (L.Logger L.Hasura) r
, MonadIO m
) )
=> Event -> m () => Event -> io ()
processEvent e = do processEvent e = do
cache <- liftIO getSchemaCache cache <- liftIO getSchemaCache
let meti = getEventTriggerInfoFromEvent cache e let meti = getEventTriggerInfoFromEvent cache e

View File

@ -73,40 +73,40 @@ module Hasura.Eventing.ScheduledTrigger
, unlockAllLockedScheduledEvents , unlockAllLockedScheduledEvents
) where ) where
import Control.Arrow.Extended (dup) import Control.Arrow.Extended (dup)
import Control.Concurrent.Extended (sleep) import Control.Concurrent.Extended (sleep)
import Control.Concurrent.STM.TVar import Control.Concurrent.STM.TVar
import Data.Has import Data.Has
import Data.Int (Int64) import Data.Int (Int64)
import Data.List (unfoldr) import Data.List (unfoldr)
import Data.Time.Clock import Data.Time.Clock
import Hasura.Eventing.Common
import Hasura.Eventing.HTTP import Hasura.Eventing.HTTP
import Hasura.HTTP import Hasura.HTTP
import Hasura.Prelude import Hasura.Prelude
import Hasura.RQL.DDL.EventTrigger (getHeaderInfosFromConf)
import Hasura.RQL.DDL.Headers import Hasura.RQL.DDL.Headers
import Hasura.RQL.Types import Hasura.RQL.Types
import Hasura.Server.Version (HasVersion) import Hasura.Server.Version (HasVersion)
import Hasura.RQL.DDL.EventTrigger (getHeaderInfosFromConf)
import Hasura.SQL.DML import Hasura.SQL.DML
import Hasura.SQL.Types import Hasura.SQL.Types
import Hasura.Eventing.Common
import System.Cron import System.Cron
import qualified Data.Aeson as J import qualified Data.Aeson as J
import qualified Data.Aeson.Casing as J import qualified Data.Aeson.Casing as J
import qualified Data.Aeson.TH as J import qualified Data.Aeson.TH as J
import qualified Data.HashMap.Strict as Map import qualified Data.Environment as Env
import qualified Data.TByteString as TBS import qualified Data.HashMap.Strict as Map
import qualified Data.Text as T import qualified Data.Set as Set
import qualified Database.PG.Query as Q import qualified Data.TByteString as TBS
import qualified Hasura.Logging as L import qualified Data.Text as T
import qualified Network.HTTP.Client as HTTP import qualified Database.PG.Query as Q
import qualified Text.Builder as TB (run) import qualified Database.PG.Query.PTI as PTI
import qualified PostgreSQL.Binary.Decoding as PD import qualified Hasura.Logging as L
import qualified Data.Set as Set import qualified Network.HTTP.Client as HTTP
import qualified Database.PG.Query.PTI as PTI import qualified PostgreSQL.Binary.Decoding as PD
import qualified PostgreSQL.Binary.Encoding as PE import qualified PostgreSQL.Binary.Encoding as PE
import qualified Text.Builder as TB (run)
newtype ScheduledTriggerInternalErr newtype ScheduledTriggerInternalErr
@ -133,10 +133,10 @@ data ScheduledEventStatus
scheduledEventStatusToText :: ScheduledEventStatus -> Text scheduledEventStatusToText :: ScheduledEventStatus -> Text
scheduledEventStatusToText SESScheduled = "scheduled" scheduledEventStatusToText SESScheduled = "scheduled"
scheduledEventStatusToText SESLocked = "locked" scheduledEventStatusToText SESLocked = "locked"
scheduledEventStatusToText SESDelivered = "delivered" scheduledEventStatusToText SESDelivered = "delivered"
scheduledEventStatusToText SESError = "error" scheduledEventStatusToText SESError = "error"
scheduledEventStatusToText SESDead = "dead" scheduledEventStatusToText SESDead = "dead"
instance Q.ToPrepArg ScheduledEventStatus where instance Q.ToPrepArg ScheduledEventStatus where
toPrepVal = Q.toPrepVal . scheduledEventStatusToText toPrepVal = Q.toPrepVal . scheduledEventStatusToText
@ -338,19 +338,19 @@ generateScheduleTimes from n cron = take n $ go from
go = unfoldr (fmap dup . nextMatch cron) go = unfoldr (fmap dup . nextMatch cron)
processCronEvents processCronEvents
:: HasVersion :: (HasVersion, MonadIO m)
=> L.Logger L.Hasura => L.Logger L.Hasura
-> LogEnvHeaders -> LogEnvHeaders
-> HTTP.Manager -> HTTP.Manager
-> Q.PGPool -> Q.PGPool
-> IO SchemaCache -> IO SchemaCache
-> TVar (Set.Set CronEventId) -> TVar (Set.Set CronEventId)
-> IO () -> m ()
processCronEvents logger logEnv httpMgr pgpool getSC lockedCronEvents = do processCronEvents logger logEnv httpMgr pgpool getSC lockedCronEvents = do
cronTriggersInfo <- scCronTriggers <$> getSC cronTriggersInfo <- scCronTriggers <$> liftIO getSC
cronScheduledEvents <- cronScheduledEvents <-
runExceptT $ liftIO . runExceptT $
Q.runTx pgpool (Q.ReadCommitted, Just Q.ReadWrite) getPartialCronEvents Q.runTx pgpool (Q.ReadCommitted, Just Q.ReadWrite) getPartialCronEvents
case cronScheduledEvents of case cronScheduledEvents of
Right partialEvents -> do Right partialEvents -> do
-- save the locked standalone events that have been fetched from the -- save the locked standalone events that have been fetched from the
@ -380,19 +380,20 @@ processCronEvents logger logEnv httpMgr pgpool getSC lockedCronEvents = do
either logInternalError pure finally either logInternalError pure finally
Left err -> logInternalError err Left err -> logInternalError err
where where
logInternalError err = L.unLogger logger $ ScheduledTriggerInternalErr err logInternalError err = liftIO . L.unLogger logger $ ScheduledTriggerInternalErr err
processStandAloneEvents processStandAloneEvents
:: HasVersion :: (HasVersion, MonadIO m)
=> L.Logger L.Hasura => Env.Environment
-> L.Logger L.Hasura
-> LogEnvHeaders -> LogEnvHeaders
-> HTTP.Manager -> HTTP.Manager
-> Q.PGPool -> Q.PGPool
-> TVar (Set.Set StandAloneScheduledEventId) -> TVar (Set.Set StandAloneScheduledEventId)
-> IO () -> m ()
processStandAloneEvents logger logEnv httpMgr pgpool lockedStandAloneEvents = do processStandAloneEvents env logger logEnv httpMgr pgpool lockedStandAloneEvents = do
standAloneScheduledEvents <- standAloneScheduledEvents <-
runExceptT $ liftIO . runExceptT $
Q.runTx pgpool (Q.ReadCommitted, Just Q.ReadWrite) getOneOffScheduledEvents Q.runTx pgpool (Q.ReadCommitted, Just Q.ReadWrite) getOneOffScheduledEvents
case standAloneScheduledEvents of case standAloneScheduledEvents of
Right standAloneScheduledEvents' -> do Right standAloneScheduledEvents' -> do
@ -410,8 +411,8 @@ processStandAloneEvents logger logEnv httpMgr pgpool lockedStandAloneEvents = do
headerConf headerConf
comment ) comment )
-> do -> do
webhookInfo <- runExceptT $ resolveWebhook webhookConf webhookInfo <- liftIO . runExceptT $ resolveWebhook env webhookConf
headerInfo <- runExceptT $ getHeaderInfosFromConf headerConf headerInfo <- liftIO . runExceptT $ getHeaderInfosFromConf env headerConf
case webhookInfo of case webhookInfo of
Right webhookInfo' -> do Right webhookInfo' -> do
@ -440,22 +441,23 @@ processStandAloneEvents logger logEnv httpMgr pgpool lockedStandAloneEvents = do
Left standAloneScheduledEventsErr -> logInternalError standAloneScheduledEventsErr Left standAloneScheduledEventsErr -> logInternalError standAloneScheduledEventsErr
where where
logInternalError err = L.unLogger logger $ ScheduledTriggerInternalErr err logInternalError err = liftIO . L.unLogger logger $ ScheduledTriggerInternalErr err
processScheduledTriggers processScheduledTriggers
:: HasVersion :: (HasVersion, MonadIO m)
=> L.Logger L.Hasura => Env.Environment
-> L.Logger L.Hasura
-> LogEnvHeaders -> LogEnvHeaders
-> HTTP.Manager -> HTTP.Manager
-> Q.PGPool -> Q.PGPool
-> IO SchemaCache -> IO SchemaCache
-> LockedEventsCtx -> LockedEventsCtx
-> IO void -> m void
processScheduledTriggers logger logEnv httpMgr pgpool getSC LockedEventsCtx {..} = processScheduledTriggers env logger logEnv httpMgr pgpool getSC LockedEventsCtx {..} =
forever $ do forever $ do
processCronEvents logger logEnv httpMgr pgpool getSC leCronEvents processCronEvents logger logEnv httpMgr pgpool getSC leCronEvents
processStandAloneEvents logger logEnv httpMgr pgpool leStandAloneEvents processStandAloneEvents env logger logEnv httpMgr pgpool leStandAloneEvents
sleep (minutes 1) liftIO $ sleep (minutes 1)
processScheduledEvent :: processScheduledEvent ::
( MonadReader r m ( MonadReader r m

View File

@ -14,7 +14,7 @@ module Hasura.GraphQL.Execute
, EP.initPlanCache , EP.initPlanCache
, EP.clearPlanCache , EP.clearPlanCache
, EP.dumpPlanCache , EP.dumpPlanCache
, EQ.PreparedSql(..)
, ExecutionCtx(..) , ExecutionCtx(..)
, MonadGQLExecutionCheck(..) , MonadGQLExecutionCheck(..)
@ -27,6 +27,7 @@ import Data.Text.Conversions
import qualified Data.Aeson as J import qualified Data.Aeson as J
import qualified Data.CaseInsensitive as CI import qualified Data.CaseInsensitive as CI
import qualified Data.Environment as Env
import qualified Data.HashMap.Strict as Map import qualified Data.HashMap.Strict as Map
import qualified Data.Text as T import qualified Data.Text as T
@ -120,13 +121,6 @@ getExecPlanPartial userInfo sc queryType req = do
where where
roleName = _uiRole userInfo roleName = _uiRole userInfo
-- checkQueryInAllowlist =
-- -- only for non-admin roles
-- when (roleName /= adminRoleName) $ do
-- let notInAllowlist =
-- not $ _isQueryInAllowlist (_grQuery req) (scAllowlist sc)
-- when notInAllowlist $ modifyQErr modErr $ throw400 ValidationFailed "query is not allowed"
contextMap = contextMap =
case queryType of case queryType of
ET.QueryHasura -> scGQLContext sc ET.QueryHasura -> scGQLContext sc
@ -172,10 +166,10 @@ getExecPlanPartial userInfo sc queryType req = do
"in the document when operationName is not specified" "in the document when operationName is not specified"
-- The graphql query is resolved into a sequence of execution operations -- The graphql query is resolved into a sequence of execution operations
data ResolvedExecutionPlan data ResolvedExecutionPlan m
= QueryExecutionPlan (EPr.ExecutionPlan (LazyRespTx, EQ.GeneratedSqlMap) EPr.RemoteCall (G.Name, J.Value)) = QueryExecutionPlan (EPr.ExecutionPlan (m EncJSON, EQ.GeneratedSqlMap) EPr.RemoteCall (G.Name, J.Value))
-- ^ query execution; remote schemas and introspection possible -- ^ query execution; remote schemas and introspection possible
| MutationExecutionPlan (EPr.ExecutionPlan (LazyRespTx, HTTP.ResponseHeaders) EPr.RemoteCall (G.Name, J.Value)) | MutationExecutionPlan (EPr.ExecutionPlan (m EncJSON, HTTP.ResponseHeaders) EPr.RemoteCall (G.Name, J.Value))
-- ^ mutation execution; only __typename introspection supported -- ^ mutation execution; only __typename introspection supported
| SubscriptionExecutionPlan (EPr.ExecutionPlan EL.LiveQueryPlan Void Void) | SubscriptionExecutionPlan (EPr.ExecutionPlan EL.LiveQueryPlan Void Void)
-- ^ live query execution; remote schemas and introspection not supported -- ^ live query execution; remote schemas and introspection not supported
@ -212,8 +206,15 @@ checkQueryInAllowlist enableAL userInfo req sc =
unGQLExecDoc q unGQLExecDoc q
getResolvedExecPlan getResolvedExecPlan
:: forall m . (HasVersion, MonadError QErr m, MonadIO m) :: forall m tx
=> PGExecCtx . ( HasVersion
, MonadError QErr m
, MonadIO m
, MonadIO tx
, MonadTx tx
)
=> Env.Environment
-> PGExecCtx
-> EP.PlanCache -> EP.PlanCache
-> UserInfo -> UserInfo
-> SQLGenCtx -> SQLGenCtx
@ -223,8 +224,8 @@ getResolvedExecPlan
-> HTTP.Manager -> HTTP.Manager
-> [HTTP.Header] -> [HTTP.Header]
-> (GQLReqUnparsed, GQLReqParsed) -> (GQLReqUnparsed, GQLReqParsed)
-> m (Telem.CacheHit, ResolvedExecutionPlan) -> m (Telem.CacheHit,ResolvedExecutionPlan tx)
getResolvedExecPlan pgExecCtx planCache userInfo sqlGenCtx getResolvedExecPlan env pgExecCtx planCache userInfo sqlGenCtx
sc scVer queryType httpManager reqHeaders (reqUnparsed, reqParsed) = do sc scVer queryType httpManager reqHeaders (reqUnparsed, reqParsed) = do
planM <- liftIO $ EP.getPlan scVer (_uiRole userInfo) opNameM queryStr planM <- liftIO $ EP.getPlan scVer (_uiRole userInfo) opNameM queryStr
@ -234,7 +235,7 @@ getResolvedExecPlan pgExecCtx planCache userInfo sqlGenCtx
-- plans are only for queries and subscriptions -- plans are only for queries and subscriptions
Just plan -> (Telem.Hit,) <$> case plan of Just plan -> (Telem.Hit,) <$> case plan of
EP.RPQuery queryPlan -> do EP.RPQuery queryPlan -> do
(tx, genSql) <- EQ.queryOpFromPlan httpManager reqHeaders userInfo queryVars queryPlan -- (tx, genSql) <- EQ.queryOpFromPlan env httpManager reqHeaders userInfo queryVars queryPlan
return $ QueryExecutionPlan _ -- tx (Just genSql) return $ QueryExecutionPlan _ -- tx (Just genSql)
EP.RPSubs subsPlan -> EP.RPSubs subsPlan ->
return $ SubscriptionExecutionPlan _ -- <$> EL.reuseLiveQueryPlan pgExecCtx usrVars queryVars subsPlan return $ SubscriptionExecutionPlan _ -- <$> EL.reuseLiveQueryPlan pgExecCtx usrVars queryVars subsPlan
@ -244,7 +245,7 @@ getResolvedExecPlan pgExecCtx planCache userInfo sqlGenCtx
-- addPlanToCache plan = -- addPlanToCache plan =
-- liftIO $ EP.addPlan scVer (userRole userInfo) -- liftIO $ EP.addPlan scVer (userRole userInfo)
-- opNameM queryStr plan planCache -- opNameM queryStr plan planCache
noExistingPlan :: m ResolvedExecutionPlan noExistingPlan :: m (ResolvedExecutionPlan tx)
noExistingPlan = do noExistingPlan = do
-- GraphQL requests may incorporate fragments which insert a pre-defined -- GraphQL requests may incorporate fragments which insert a pre-defined
-- part of a GraphQL query. Here we make sure to remember those -- part of a GraphQL query. Here we make sure to remember those
@ -260,13 +261,13 @@ getResolvedExecPlan pgExecCtx planCache userInfo sqlGenCtx
-- (Here the above fragment inlining is actually executed.) -- (Here the above fragment inlining is actually executed.)
inlinedSelSet <- EI.inlineSelectionSet fragments selSet inlinedSelSet <- EI.inlineSelectionSet fragments selSet
(execPlan, plan, _unprepared) <- (execPlan, plan, _unprepared) <-
EQ.convertQuerySelSet gCtx userInfo httpManager reqHeaders inlinedSelSet varDefs (_grVariables reqUnparsed) EQ.convertQuerySelSet env gCtx userInfo httpManager reqHeaders inlinedSelSet varDefs (_grVariables reqUnparsed)
-- traverse_ (addPlanToCache . EP.RPQuery) plan -- traverse_ (addPlanToCache . EP.RPQuery) plan
return $ QueryExecutionPlan $ execPlan return $ QueryExecutionPlan $ execPlan
G.TypedOperationDefinition G.OperationTypeMutation _ varDefs _ selSet -> do G.TypedOperationDefinition G.OperationTypeMutation _ varDefs _ selSet -> do
-- (Here the above fragment inlining is actually executed.) -- (Here the above fragment inlining is actually executed.)
inlinedSelSet <- EI.inlineSelectionSet fragments selSet inlinedSelSet <- EI.inlineSelectionSet fragments selSet
queryTx <- EM.convertMutationSelectionSet gCtx sqlGenCtx userInfo httpManager reqHeaders queryTx <- EM.convertMutationSelectionSet env gCtx sqlGenCtx userInfo httpManager reqHeaders
inlinedSelSet varDefs (_grVariables reqUnparsed) inlinedSelSet varDefs (_grVariables reqUnparsed)
-- traverse_ (addPlanToCache . EP.RPQuery) plan -- traverse_ (addPlanToCache . EP.RPQuery) plan
return $ MutationExecutionPlan $ queryTx return $ MutationExecutionPlan $ queryTx
@ -315,6 +316,10 @@ getResolvedExecPlan pgExecCtx planCache userInfo sqlGenCtx
-- ] -- ]
-- } -- }
-- } -- }
-- Parse as query to check correctness
(_execPlan :: EPr.ExecutionPlan (tx EncJSON, EQ.GeneratedSqlMap) EPr.RemoteCall (G.Name,J.Value)
, _plan, unpreparedAST) <-
EQ.convertQuerySelSet env gCtx userInfo httpManager reqHeaders inlinedSelSet varDefs (_grVariables reqUnparsed)
case NE.nonEmpty inlinedSelSet of case NE.nonEmpty inlinedSelSet of
Nothing -> throw500 "empty selset for subscription" Nothing -> throw500 "empty selset for subscription"
Just (_ :| rst) -> Just (_ :| rst) ->
@ -322,9 +327,6 @@ getResolvedExecPlan pgExecCtx planCache userInfo sqlGenCtx
in in
unless (multipleAllowed || null rst) $ unless (multipleAllowed || null rst) $
throw400 ValidationFailed $ "subscriptions must select one top level field" throw400 ValidationFailed $ "subscriptions must select one top level field"
-- Parse as query to check correctness
(_execPlan, _plan, unpreparedAST) <-
EQ.convertQuerySelSet gCtx userInfo httpManager reqHeaders inlinedSelSet varDefs (_grVariables reqUnparsed)
validSubscriptionAST <- for unpreparedAST validateSubscriptionRootField validSubscriptionAST <- for unpreparedAST validateSubscriptionRootField
(lqOp, plan) <- EL.buildLiveQueryPlan pgExecCtx userInfo validSubscriptionAST (lqOp, plan) <- EL.buildLiveQueryPlan pgExecCtx userInfo validSubscriptionAST
-- getSubsOpM pgExecCtx userInfo inlinedSelSet -- getSubsOpM pgExecCtx userInfo inlinedSelSet
@ -351,7 +353,8 @@ execRemoteGQ
, MonadReader ExecutionCtx m , MonadReader ExecutionCtx m
, MonadQueryLog m , MonadQueryLog m
) )
=> RequestId => Env.Environment
-> RequestId
-> UserInfo -> UserInfo
-> [HTTP.Header] -> [HTTP.Header]
-> GQLReqUnparsed -> GQLReqUnparsed
@ -359,12 +362,12 @@ execRemoteGQ
-> G.TypedOperationDefinition G.NoFragments G.Name -> G.TypedOperationDefinition G.NoFragments G.Name
-> m (DiffTime, HttpResponse EncJSON) -> m (DiffTime, HttpResponse EncJSON)
-- ^ Also returns time spent in http request, for telemetry. -- ^ Also returns time spent in http request, for telemetry.
execRemoteGQ reqId userInfo reqHdrs q rsi opDef = do execRemoteGQ env reqId userInfo reqHdrs q rsi opDef = do
execCtx <- ask execCtx <- ask
let logger = _ecxLogger execCtx let logger = _ecxLogger execCtx
manager = _ecxHttpManager execCtx manager = _ecxHttpManager execCtx
opType = G._todType opDef opType = G._todType opDef
logQueryLog logger q Nothing reqId logQueryLog logger q Nothing reqId
(time, respHdrs, resp) <- execRemoteGQ' manager userInfo reqHdrs q rsi opType (time, respHdrs, resp) <- execRemoteGQ' env manager userInfo reqHdrs q rsi opType
let !httpResp = HttpResponse (encJFromLBS resp) respHdrs let !httpResp = HttpResponse (encJFromLBS resp) respHdrs
return (time, httpResp) return (time, httpResp)

View File

@ -30,6 +30,7 @@ import qualified Data.Aeson.Casing as J
import qualified Data.Aeson.Extended as J import qualified Data.Aeson.Extended as J
import qualified Data.Aeson.TH as J import qualified Data.Aeson.TH as J
import qualified Data.ByteString as B import qualified Data.ByteString as B
import qualified Data.Environment as E
import qualified Data.HashMap.Strict as Map import qualified Data.HashMap.Strict as Map
import qualified Data.HashMap.Strict.InsOrd as OMap import qualified Data.HashMap.Strict.InsOrd as OMap
import qualified Data.Sequence as Seq import qualified Data.Sequence as Seq

View File

@ -35,6 +35,8 @@ module Hasura.GraphQL.Execute.LiveQuery.Poll (
, LiveQueryMetadata(..) , LiveQueryMetadata(..)
) where ) where
import Data.List.Split (chunksOf)
import GHC.AssertNF
import Hasura.Prelude import Hasura.Prelude
import qualified Control.Concurrent.Async as A import qualified Control.Concurrent.Async as A
@ -54,8 +56,6 @@ import qualified Database.PG.Query as Q
import qualified ListT import qualified ListT
import qualified StmContainers.Map as STMMap import qualified StmContainers.Map as STMMap
import Data.List.Split (chunksOf)
import GHC.AssertNF
import Control.Lens import Control.Lens
import qualified Hasura.GraphQL.Execute.LiveQuery.TMap as TMap import qualified Hasura.GraphQL.Execute.LiveQuery.TMap as TMap
@ -101,7 +101,6 @@ data Subscriber
data LiveQueryMetadata data LiveQueryMetadata
= LiveQueryMetadata = LiveQueryMetadata
{ _lqmExecutionTime :: !Clock.DiffTime { _lqmExecutionTime :: !Clock.DiffTime
-- ^ Time spent waiting on the generated query to execute on postgres or the remote.
} }
data LiveQueryResponse data LiveQueryResponse
@ -257,10 +256,10 @@ data Poller
data PollerIOState data PollerIOState
= PollerIOState = PollerIOState
{ _pThread :: !Immortal.Thread { _pThread :: !Immortal.Thread
-- ^ a handle on the pollers worker thread that can be used to -- ^ a handle on the pollers worker thread that can be used to
-- 'Immortal.stop' it if all its cohorts stop listening -- 'Immortal.stop' it if all its cohorts stop listening
, _pId :: !PollerId , _pId :: !PollerId
} }
data PollerKey data PollerKey
@ -440,7 +439,6 @@ pollQuery logger pollerId lqOpts pgExecCtx pgQuery cohortMap = do
, _pdTotalTime = totalTime , _pdTotalTime = totalTime
} }
where where
LiveQueriesOptions batchSize _ = lqOpts LiveQueriesOptions batchSize _ = lqOpts
getCohortSnapshot (cohortVars, handlerC) = do getCohortSnapshot (cohortVars, handlerC) = do

View File

@ -61,6 +61,7 @@ data LiveQueryId
, _lqiSubscriber :: !SubscriberId , _lqiSubscriber :: !SubscriberId
} deriving Show } deriving Show
addLiveQuery addLiveQuery
:: L.Logger L.Hasura :: L.Logger L.Hasura
-> SubscriberMetadata -> SubscriberMetadata
@ -123,6 +124,7 @@ addLiveQuery logger subscriberMetadata lqState plan onResultAction = do
newPoller = Poller <$> TMap.new <*> STM.newEmptyTMVar newPoller = Poller <$> TMap.new <*> STM.newEmptyTMVar
removeLiveQuery removeLiveQuery
:: L.Logger L.Hasura :: L.Logger L.Hasura
-> LiveQueriesState -> LiveQueriesState

View File

@ -3,6 +3,7 @@ module Hasura.GraphQL.Execute.Mutation where
import Hasura.Prelude import Hasura.Prelude
import qualified Data.Aeson as J import qualified Data.Aeson as J
import qualified Data.Environment as Env
import qualified Data.HashMap.Strict as Map import qualified Data.HashMap.Strict as Map
import qualified Data.HashMap.Strict.InsOrd as OMap import qualified Data.HashMap.Strict.InsOrd as OMap
import qualified Data.IntMap as IntMap import qualified Data.IntMap as IntMap
@ -34,37 +35,40 @@ import qualified Language.GraphQL.Draft.Syntax as G
convertDelete convertDelete
:: (HasVersion, MonadIO m) :: (HasVersion, MonadIO m)
=> SessionVariables => Env.Environment
-> SessionVariables
-> RQL.MutationRemoteJoinCtx -> RQL.MutationRemoteJoinCtx
-> RQL.AnnDelG UnpreparedValue -> RQL.AnnDelG UnpreparedValue
-> Bool -> Bool
-> m RespTx -> m RespTx
convertDelete usrVars rjCtx deleteOperation stringifyNum = do convertDelete env usrVars rjCtx deleteOperation stringifyNum = do
pure $ RQL.execDeleteQuery stringifyNum (Just rjCtx) (preparedDelete, planVariablesSequence usrVars planningState) pure $ RQL.execDeleteQuery env stringifyNum (Just rjCtx) (preparedDelete, planVariablesSequence usrVars planningState)
where (preparedDelete, planningState) = runIdentity $ runPlan $ RQL.traverseAnnDel prepareWithPlan deleteOperation where (preparedDelete, planningState) = runIdentity $ runPlan $ RQL.traverseAnnDel prepareWithPlan deleteOperation
convertUpdate convertUpdate
:: (HasVersion, MonadIO m) :: (HasVersion, MonadIO m)
=> SessionVariables => Env.Environment
-> SessionVariables
-> RQL.MutationRemoteJoinCtx -> RQL.MutationRemoteJoinCtx
-> RQL.AnnUpdG UnpreparedValue -> RQL.AnnUpdG UnpreparedValue
-> Bool -> Bool
-> m RespTx -> m RespTx
convertUpdate usrVars rjCtx updateOperation stringifyNum = do convertUpdate env usrVars rjCtx updateOperation stringifyNum = do
pure $ if null $ RQL.uqp1OpExps updateOperation pure $ if null $ RQL.uqp1OpExps updateOperation
then pure $ buildEmptyMutResp $ RQL.uqp1Output preparedUpdate then pure $ buildEmptyMutResp $ RQL.uqp1Output preparedUpdate
else RQL.execUpdateQuery stringifyNum (Just rjCtx) (preparedUpdate, Seq.empty) else RQL.execUpdateQuery env stringifyNum (Just rjCtx) (preparedUpdate, Seq.empty)
where preparedUpdate = runIdentity $ RQL.traverseAnnUpd (pure . unpreparedToTextSQL) updateOperation where preparedUpdate = runIdentity $ RQL.traverseAnnUpd (pure . unpreparedToTextSQL) updateOperation
convertInsert convertInsert
:: (HasVersion, MonadIO m) :: (HasVersion, MonadIO m)
=> SessionVariables => Env.Environment
-> SessionVariables
-> RQL.MutationRemoteJoinCtx -> RQL.MutationRemoteJoinCtx
-> AnnMultiInsert UnpreparedValue -> AnnMultiInsert UnpreparedValue
-> Bool -> Bool
-> m RespTx -> m RespTx
convertInsert usrVars rjCtx insertOperation stringifyNum = do convertInsert env usrVars rjCtx insertOperation stringifyNum = do
pure $ convertToSQLTransaction preparedInsert rjCtx Seq.empty stringifyNum pure $ convertToSQLTransaction env preparedInsert rjCtx Seq.empty stringifyNum
where preparedInsert = fmapAnnInsert unpreparedToTextSQL insertOperation where preparedInsert = fmapAnnInsert unpreparedToTextSQL insertOperation
planVariablesSequence :: SessionVariables -> PlanningSt -> Seq.Seq Q.PrepArg planVariablesSequence :: SessionVariables -> PlanningSt -> Seq.Seq Q.PrepArg
@ -75,18 +79,19 @@ convertMutationRootField
, MonadIO m , MonadIO m
, MonadError QErr m , MonadError QErr m
) )
=> UserInfo => Env.Environment
-> UserInfo
-> HTTP.Manager -> HTTP.Manager
-> HTTP.RequestHeaders -> HTTP.RequestHeaders
-> Bool -> Bool
-> MutationRootField UnpreparedValue -> MutationRootField UnpreparedValue
-> m (Either (LazyRespTx, HTTP.ResponseHeaders) RemoteField) -> m (Either (LazyRespTx, HTTP.ResponseHeaders) RemoteField)
convertMutationRootField userInfo manager reqHeaders stringifyNum = \case convertMutationRootField env userInfo manager reqHeaders stringifyNum = \case
RFDB (MDBInsert s) -> noResponseHeaders =<< convertInsert userSession rjCtx s stringifyNum RFDB (MDBInsert s) -> noResponseHeaders =<< convertInsert env userSession rjCtx s stringifyNum
RFDB (MDBUpdate s) -> noResponseHeaders =<< convertUpdate userSession rjCtx s stringifyNum RFDB (MDBUpdate s) -> noResponseHeaders =<< convertUpdate env userSession rjCtx s stringifyNum
RFDB (MDBDelete s) -> noResponseHeaders =<< convertDelete userSession rjCtx s stringifyNum RFDB (MDBDelete s) -> noResponseHeaders =<< convertDelete env userSession rjCtx s stringifyNum
RFRemote remote -> pure $ Right remote RFRemote remote -> pure $ Right remote
RFAction (AMSync s) -> Left <$> first liftTx <$> resolveActionExecution userInfo s actionExecContext RFAction (AMSync s) -> Left <$> first liftTx <$> resolveActionExecution env userInfo s actionExecContext
RFAction (AMAsync s) -> noResponseHeaders =<< resolveActionMutationAsync s reqHeaders userSession RFAction (AMAsync s) -> noResponseHeaders =<< resolveActionMutationAsync s reqHeaders userSession
RFRaw s -> noResponseHeaders $ pure $ encJFromJValue s RFRaw s -> noResponseHeaders $ pure $ encJFromJValue s
where where
@ -99,8 +104,13 @@ convertMutationRootField userInfo manager reqHeaders stringifyNum = \case
rjCtx = (manager, reqHeaders, userInfo) rjCtx = (manager, reqHeaders, userInfo)
convertMutationSelectionSet convertMutationSelectionSet
:: (HasVersion, MonadIO m, MonadError QErr m) :: ( HasVersion
=> GQLContext , MonadIO m
, MonadError QErr m
, MonadTx tx
)
=> Env.Environment
-> GQLContext
-> SQLGenCtx -> SQLGenCtx
-> UserInfo -> UserInfo
-> HTTP.Manager -> HTTP.Manager
@ -108,8 +118,8 @@ convertMutationSelectionSet
-> G.SelectionSet G.NoFragments G.Name -> G.SelectionSet G.NoFragments G.Name
-> [G.VariableDefinition] -> [G.VariableDefinition]
-> Maybe GH.VariableValues -> Maybe GH.VariableValues
-> m (ExecutionPlan (LazyRespTx, HTTP.ResponseHeaders) RemoteCall (G.Name, J.Value)) -> m (ExecutionPlan (tx EncJSON, HTTP.ResponseHeaders) RemoteCall (G.Name, J.Value))
convertMutationSelectionSet gqlContext sqlGenCtx userInfo manager reqHeaders fields varDefs varValsM = do convertMutationSelectionSet env gqlContext sqlGenCtx userInfo manager reqHeaders fields varDefs varValsM = do
mutationParser <- onNothing (gqlMutationParser gqlContext) $ mutationParser <- onNothing (gqlMutationParser gqlContext) $
throw400 ValidationFailed "no mutations exist" throw400 ValidationFailed "no mutations exist"
-- Parse the GraphQL query into the RQL AST -- Parse the GraphQL query into the RQL AST
@ -119,13 +129,13 @@ convertMutationSelectionSet gqlContext sqlGenCtx userInfo manager reqHeaders fie
>>= (mutationParser >>> (`onLeft` reportParseErrors)) >>= (mutationParser >>> (`onLeft` reportParseErrors))
-- Transform the RQL AST into a prepared SQL query -- Transform the RQL AST into a prepared SQL query
txs <- for unpreparedQueries $ convertMutationRootField userInfo manager reqHeaders (stringifyNum sqlGenCtx) txs <- for unpreparedQueries $ convertMutationRootField env userInfo manager reqHeaders (stringifyNum sqlGenCtx)
let txList = OMap.toList txs let txList = OMap.toList txs
case (mapMaybe takeTx txList, mapMaybe takeRemote txList) of case (mapMaybe takeTx txList, mapMaybe takeRemote txList) of
(dbPlans, []) -> do (dbPlans, []) -> do
let allHeaders = concatMap (snd . snd) dbPlans let allHeaders = concatMap (snd . snd) dbPlans
combinedTx = toSingleTx $ map (G.unName *** fst) dbPlans combinedTx = toSingleTx $ map (G.unName *** fst) dbPlans
pure $ ExecStepDB (combinedTx, allHeaders) pure $ ExecStepDB (liftTx $ lazyTxToQTx combinedTx, allHeaders)
([], remotes@(firstRemote:_)) -> do ([], remotes@(firstRemote:_)) -> do
let (remoteOperation, varValsM') = let (remoteOperation, varValsM') =
buildTypedOperation buildTypedOperation

View File

@ -16,13 +16,15 @@ import qualified Data.Aeson as J
import qualified Data.Aeson.Casing as J import qualified Data.Aeson.Casing as J
import qualified Data.Aeson.TH as J import qualified Data.Aeson.TH as J
import Hasura.RQL.Types
import Hasura.Session
import qualified Hasura.Cache as Cache import qualified Hasura.Cache as Cache
import qualified Hasura.GraphQL.Execute.LiveQuery as LQ import qualified Hasura.GraphQL.Execute.LiveQuery as LQ
import qualified Hasura.GraphQL.Execute.Query as EQ import qualified Hasura.GraphQL.Execute.Query as EQ
import qualified Hasura.GraphQL.Execute.Types as ET import qualified Hasura.GraphQL.Execute.Types as ET
import qualified Hasura.GraphQL.Transport.HTTP.Protocol as GH import qualified Hasura.GraphQL.Transport.HTTP.Protocol as GH
import Hasura.RQL.Types
import Hasura.Session
data PlanId data PlanId
= PlanId = PlanId
@ -49,13 +51,13 @@ newtype PlanCache
= PlanCache {_unPlanCache :: Cache.Cache PlanId ReusablePlan} = PlanCache {_unPlanCache :: Cache.Cache PlanId ReusablePlan}
data ReusablePlan data ReusablePlan
= RPQuery !EQ.ReusableQueryPlan = RPQuery !EQ.ReusableQueryPlan -- TODO (if we do query plan caching) [QueryRootFldUnresolved]
| RPSubs !LQ.ReusableLiveQueryPlan | RPSubs !LQ.ReusableLiveQueryPlan
instance J.ToJSON ReusablePlan where instance J.ToJSON ReusablePlan where
toJSON = \case toJSON = \case
RPQuery queryPlan -> J.toJSON queryPlan RPQuery queryPlan -> J.toJSON queryPlan
RPSubs subsPlan -> J.toJSON subsPlan RPSubs subsPlan -> J.toJSON subsPlan
newtype PlanCacheOptions newtype PlanCacheOptions
= PlanCacheOptions { unPlanCacheSize :: Maybe Cache.CacheSize } = PlanCacheOptions { unPlanCacheSize :: Maybe Cache.CacheSize }

View File

@ -12,6 +12,7 @@ module Hasura.GraphQL.Execute.Query
import qualified Data.Aeson as J import qualified Data.Aeson as J
import qualified Data.ByteString as B import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as LBS import qualified Data.ByteString.Lazy as LBS
import qualified Data.Environment as Env
import qualified Data.HashMap.Strict as Map import qualified Data.HashMap.Strict as Map
import qualified Data.HashMap.Strict.InsOrd as OMap import qualified Data.HashMap.Strict.InsOrd as OMap
import qualified Data.IntMap as IntMap import qualified Data.IntMap as IntMap
@ -28,6 +29,7 @@ import Hasura.Server.Version (HasVersion)
import qualified Hasura.SQL.DML as S import qualified Hasura.SQL.DML as S
import Hasura.EncJSON import Hasura.EncJSON
import Hasura.Db
import Hasura.GraphQL.Context import Hasura.GraphQL.Context
import Hasura.GraphQL.Execute.Prepare import Hasura.GraphQL.Execute.Prepare
import Hasura.GraphQL.Execute.Resolve import Hasura.GraphQL.Execute.Resolve
@ -114,13 +116,18 @@ withPlan usrVars (PGPlan q reqVars prepMap remoteJoins) annVars = do
-- turn the current plan into a transaction -- turn the current plan into a transaction
mkCurPlanTx mkCurPlanTx
:: (HasVersion, MonadError QErr m) :: ( HasVersion
=> HTTP.Manager , MonadError QErr m
, MonadIO tx
, MonadTx tx
)
=> Env.Environment
-> HTTP.Manager
-> [HTTP.Header] -> [HTTP.Header]
-> UserInfo -> UserInfo
-> FieldPlans -> FieldPlans
-> m (LazyRespTx, GeneratedSqlMap) -> m (tx EncJSON, GeneratedSqlMap)
mkCurPlanTx manager reqHdrs userInfo fldPlans = do mkCurPlanTx env manager reqHdrs userInfo fldPlans = do
-- generate the SQL and prepared vars or the bytestring -- generate the SQL and prepared vars or the bytestring
resolved <- forM fldPlans $ \(alias, fldPlan) -> do resolved <- forM fldPlans $ \(alias, fldPlan) -> do
fldResp <- case fldPlan of fldResp <- case fldPlan of
@ -131,7 +138,7 @@ mkCurPlanTx manager reqHdrs userInfo fldPlans = do
RFPActionQuery tx -> pure $ RRActionQuery tx RFPActionQuery tx -> pure $ RRActionQuery tx
return (alias, fldResp) return (alias, fldResp)
return (mkLazyRespTx manager reqHdrs userInfo resolved, mkGeneratedSqlMap resolved) pure (mkLazyRespTx env manager reqHdrs userInfo resolved, mkGeneratedSqlMap resolved)
getVarArgNum :: (MonadState PlanningSt m) => G.Name -> m Int getVarArgNum :: (MonadState PlanningSt m) => G.Name -> m Int
getVarArgNum var = do getVarArgNum var = do
@ -211,19 +218,20 @@ parseGraphQLQuery gqlContext varDefs varValsM fields =
throwError (err400 ValidationFailed peMessage){ qePath = pePath } throwError (err400 ValidationFailed peMessage){ qePath = pePath }
convertQuerySelSet convertQuerySelSet
:: forall m. (HasVersion, MonadError QErr m, MonadIO m) :: forall m tx . (HasVersion, MonadError QErr m, MonadIO m, MonadIO tx, MonadTx tx)
=> GQLContext => Env.Environment
-> GQLContext
-> UserInfo -> UserInfo
-> HTTP.Manager -> HTTP.Manager
-> HTTP.RequestHeaders -> HTTP.RequestHeaders
-> G.SelectionSet G.NoFragments G.Name -> G.SelectionSet G.NoFragments G.Name
-> [G.VariableDefinition] -> [G.VariableDefinition]
-> Maybe GH.VariableValues -> Maybe GH.VariableValues
-> m ( ExecutionPlan (LazyRespTx, GeneratedSqlMap) RemoteCall (G.Name, J.Value) -> m ( ExecutionPlan (tx EncJSON, GeneratedSqlMap) RemoteCall (G.Name, J.Value)
, Maybe ReusableQueryPlan , Maybe ReusableQueryPlan
, InsOrdHashMap G.Name (QueryRootField UnpreparedValue) , InsOrdHashMap G.Name (QueryRootField UnpreparedValue)
) )
convertQuerySelSet gqlContext userInfo manager reqHeaders fields varDefs varValsM = do convertQuerySelSet env gqlContext userInfo manager reqHeaders fields varDefs varValsM = do
-- Parse the GraphQL query into the RQL AST -- Parse the GraphQL query into the RQL AST
(unpreparedQueries, _reusability) <- parseGraphQLQuery gqlContext varDefs varValsM fields (unpreparedQueries, _reusability) <- parseGraphQLQuery gqlContext varDefs varValsM fields
@ -261,7 +269,7 @@ convertQuerySelSet gqlContext userInfo manager reqHeaders fields varDefs varVals
executionPlan <- case (dbPlans, remoteFields) of executionPlan <- case (dbPlans, remoteFields) of
(dbs, Seq.Empty) -> ExecStepDB <$> mkCurPlanTx manager reqHeaders userInfo (toList dbs) (dbs, Seq.Empty) -> ExecStepDB <$> mkCurPlanTx env manager reqHeaders userInfo (toList dbs)
(Seq.Empty, remotes@(firstRemote Seq.:<| _)) -> do (Seq.Empty, remotes@(firstRemote Seq.:<| _)) -> do
let (remoteOperation, varValsM) = let (remoteOperation, varValsM) =
buildTypedOperation buildTypedOperation
@ -281,20 +289,25 @@ convertQuerySelSet gqlContext userInfo manager reqHeaders fields varDefs varVals
:: ActionQuery UnpreparedValue -> StateT PlanningSt m ActionQueryPlan :: ActionQuery UnpreparedValue -> StateT PlanningSt m ActionQueryPlan
convertActionQuery = \case convertActionQuery = \case
AQQuery s -> (AQPQuery . fst) <$> AQQuery s -> (AQPQuery . fst) <$>
lift (resolveActionExecution userInfo s $ ActionExecContext manager reqHeaders usrVars) lift (resolveActionExecution env userInfo s $ ActionExecContext manager reqHeaders usrVars)
AQAsync s -> AQPAsyncQuery <$> AQAsync s -> AQPAsyncQuery <$>
DS.traverseAnnSimpleSelect prepareWithPlan (resolveAsyncActionQuery userInfo s) DS.traverseAnnSimpleSelect prepareWithPlan (resolveAsyncActionQuery userInfo s)
-- use the existing plan and new variables to create a pg query -- use the existing plan and new variables to create a pg query
queryOpFromPlan queryOpFromPlan
:: (HasVersion, MonadError QErr m) :: ( HasVersion
=> HTTP.Manager , MonadError QErr m
, MonadIO tx
, MonadTx tx
)
=> Env.Environment
-> HTTP.Manager
-> [HTTP.Header] -> [HTTP.Header]
-> UserInfo -> UserInfo
-> Maybe GH.VariableValues -> Maybe GH.VariableValues
-> ReusableQueryPlan -> ReusableQueryPlan
-> m (LazyRespTx, GeneratedSqlMap) -> m (tx EncJSON, GeneratedSqlMap)
queryOpFromPlan manager reqHdrs userInfo varValsM (ReusableQueryPlan varTypes fldPlans) = do queryOpFromPlan env manager reqHdrs userInfo varValsM (ReusableQueryPlan varTypes fldPlans) = do
validatedVars <- _validateVariablesForReuse varTypes varValsM validatedVars <- _validateVariablesForReuse varTypes varValsM
-- generate the SQL and prepared vars or the bytestring -- generate the SQL and prepared vars or the bytestring
resolved <- forM fldPlans $ \(alias, fldPlan) -> resolved <- forM fldPlans $ \(alias, fldPlan) ->
@ -302,7 +315,7 @@ queryOpFromPlan manager reqHdrs userInfo varValsM (ReusableQueryPlan varTypes fl
RFPRaw resp -> return $ RRRaw resp RFPRaw resp -> return $ RRRaw resp
RFPPostgres pgPlan -> RRSql <$> withPlan (_uiSession userInfo) pgPlan validatedVars RFPPostgres pgPlan -> RRSql <$> withPlan (_uiSession userInfo) pgPlan validatedVars
return (mkLazyRespTx manager reqHdrs userInfo resolved, mkGeneratedSqlMap resolved) pure (mkLazyRespTx env manager reqHdrs userInfo resolved, mkGeneratedSqlMap resolved)
data PreparedSql data PreparedSql
= PreparedSql = PreparedSql
@ -334,19 +347,28 @@ data ResolvedQuery
-- prepared statement -- prepared statement
type GeneratedSqlMap = [(G.Name, Maybe PreparedSql)] type GeneratedSqlMap = [(G.Name, Maybe PreparedSql)]
mkLazyRespTx :: HasVersion mkLazyRespTx
=> HTTP.Manager -> [HTTP.Header] -> UserInfo -> [(G.Name, ResolvedQuery)] -> LazyRespTx :: ( HasVersion
mkLazyRespTx manager reqHdrs userInfo resolved = , MonadIO tx
fmap encJFromAssocList $ forM resolved $ \(alias, node) -> do , MonadTx tx
)
=> Env.Environment
-> HTTP.Manager
-> [HTTP.Header]
-> UserInfo
-> [(G.Name, ResolvedQuery)]
-> tx EncJSON
mkLazyRespTx env manager reqHdrs userInfo resolved =
encJFromAssocList <$> forM resolved \(alias, node) -> do
resp <- case node of resp <- case node of
RRRaw bs -> return $ encJFromBS bs RRRaw bs -> return $ encJFromBS bs
RRSql (PreparedSql q args maybeRemoteJoins) -> do RRSql (PreparedSql q args maybeRemoteJoins) -> do
let prepArgs = map fst args let prepArgs = map fst args
case maybeRemoteJoins of case maybeRemoteJoins of
Nothing -> liftTx $ asSingleRowJsonResp q (map fst args) Nothing -> liftTx $ asSingleRowJsonResp q (map fst args)
Just remoteJoins -> Just remoteJoins ->
executeQueryWithRemoteJoins manager reqHdrs userInfo q prepArgs remoteJoins executeQueryWithRemoteJoins env manager reqHdrs userInfo q prepArgs remoteJoins
RRActionQuery tx -> tx RRActionQuery tx' -> liftTx $ lazyTxToQTx tx'
return (G.unName alias, resp) return (G.unName alias, resp)
mkGeneratedSqlMap :: [(G.Name, ResolvedQuery)] -> GeneratedSqlMap mkGeneratedSqlMap :: [(G.Name, ResolvedQuery)] -> GeneratedSqlMap

View File

@ -6,6 +6,7 @@ module Hasura.GraphQL.Explain
import qualified Data.Aeson as J import qualified Data.Aeson as J
import qualified Data.Aeson.Casing as J import qualified Data.Aeson.Casing as J
import qualified Data.Aeson.TH as J import qualified Data.Aeson.TH as J
import qualified Data.Environment as Env
import qualified Data.HashMap.Strict as Map import qualified Data.HashMap.Strict as Map
import qualified Data.HashMap.Strict.InsOrd as OMap import qualified Data.HashMap.Strict.InsOrd as OMap
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q

View File

@ -14,10 +14,11 @@ import Hasura.Prelude
import qualified Data.Aeson as J import qualified Data.Aeson as J
import qualified Data.ByteString.Lazy as BL import qualified Data.ByteString.Lazy as BL
import qualified Data.Environment as Env
import qualified Data.HashMap.Strict as Map import qualified Data.HashMap.Strict as Map
import qualified Data.Text as T import qualified Data.Text as T
import qualified Language.GraphQL.Draft.Parser as G
import qualified Language.GraphQL.Draft.Syntax as G import qualified Language.GraphQL.Draft.Syntax as G
import qualified Language.GraphQL.Draft.Parser as G
import qualified Network.HTTP.Client as HTTP import qualified Network.HTTP.Client as HTTP
import qualified Network.HTTP.Types as N import qualified Network.HTTP.Types as N
import qualified Network.Wreq as Wreq import qualified Network.Wreq as Wreq
@ -37,12 +38,13 @@ introspectionQuery = $(embedStringFile "src-rsr/introspection.json")
fetchRemoteSchema fetchRemoteSchema
:: forall m :: forall m
. (HasVersion, MonadIO m, MonadUnique m, MonadError QErr m) . (HasVersion, MonadIO m, MonadUnique m, MonadError QErr m)
=> HTTP.Manager => Env.Environment
-> HTTP.Manager
-> RemoteSchemaName -> RemoteSchemaName
-> RemoteSchemaInfo -> RemoteSchemaInfo
-> m RemoteSchemaCtx -> m RemoteSchemaCtx
fetchRemoteSchema manager schemaName schemaInfo@(RemoteSchemaInfo url headerConf _ timeout) = do fetchRemoteSchema env manager schemaName schemaInfo@(RemoteSchemaInfo url headerConf _ timeout) = do
headers <- makeHeadersFromConf headerConf headers <- makeHeadersFromConf env headerConf
let hdrsWithDefaults = addDefaultHeaders headers let hdrsWithDefaults = addDefaultHeaders headers
initReqE <- liftIO $ try $ HTTP.parseRequest (show url) initReqE <- liftIO $ try $ HTTP.parseRequest (show url)
@ -379,17 +381,18 @@ execRemoteGQ'
, MonadIO m , MonadIO m
, MonadError QErr m , MonadError QErr m
) )
=> HTTP.Manager => Env.Environment
-> HTTP.Manager
-> UserInfo -> UserInfo
-> [N.Header] -> [N.Header]
-> GQLReqUnparsed -> GQLReqUnparsed
-> RemoteSchemaInfo -> RemoteSchemaInfo
-> G.OperationType -> G.OperationType
-> m (DiffTime, [N.Header], BL.ByteString) -> m (DiffTime, [N.Header], BL.ByteString)
execRemoteGQ' manager userInfo reqHdrs q rsi opType = do execRemoteGQ' env manager userInfo reqHdrs q rsi opType = do
when (opType == G.OperationTypeSubscription) $ when (opType == G.OperationTypeSubscription) $
throw400 NotSupported "subscription to remote server is not supported" throw400 NotSupported "subscription to remote server is not supported"
confHdrs <- makeHeadersFromConf hdrConf confHdrs <- makeHeadersFromConf env hdrConf
let clientHdrs = bool [] (mkClientHeadersForward reqHdrs) fwdClientHdrs let clientHdrs = bool [] (mkClientHeadersForward reqHdrs) fwdClientHdrs
-- filter out duplicate headers -- filter out duplicate headers
-- priority: conf headers > resolved userinfo vars > client headers -- priority: conf headers > resolved userinfo vars > client headers
@ -407,7 +410,6 @@ execRemoteGQ' manager userInfo reqHdrs q rsi opType = do
, HTTP.requestBody = HTTP.RequestBodyLBS (J.encode q) , HTTP.requestBody = HTTP.RequestBodyLBS (J.encode q)
, HTTP.responseTimeout = HTTP.responseTimeoutMicro (timeout * 1000000) , HTTP.responseTimeout = HTTP.responseTimeoutMicro (timeout * 1000000)
} }
(time, res) <- withElapsedTime $ liftIO $ try $ HTTP.httpLbs req manager (time, res) <- withElapsedTime $ liftIO $ try $ HTTP.httpLbs req manager
resp <- either httpThrow return res resp <- either httpThrow return res
pure (time, mkSetCookieHeaders resp, resp ^. Wreq.responseBody) pure (time, mkSetCookieHeaders resp, resp ^. Wreq.responseBody)

View File

@ -20,12 +20,14 @@ module Hasura.GraphQL.Resolve
import Data.Has import Data.Has
import qualified Data.Environment as Env
import qualified Data.HashMap.Strict as Map import qualified Data.HashMap.Strict as Map
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
import qualified Language.GraphQL.Draft.Syntax as G import qualified Language.GraphQL.Draft.Syntax as G
import qualified Network.HTTP.Client as HTTP import qualified Network.HTTP.Client as HTTP
import qualified Network.HTTP.Types as HTTP import qualified Network.HTTP.Types as HTTP
import Hasura.EncJSON
import Hasura.GraphQL.Resolve.Context import Hasura.GraphQL.Resolve.Context
import Hasura.Prelude import Hasura.Prelude
import Hasura.RQL.Types import Hasura.RQL.Types
@ -105,10 +107,11 @@ queryFldToPGAST
, HasVersion , HasVersion
, MonadIO m , MonadIO m
) )
=> V.Field => Env.Environment
-> V.Field
-> RA.QueryActionExecuter -> RA.QueryActionExecuter
-> m QueryRootFldUnresolved -> m QueryRootFldUnresolved
queryFldToPGAST fld actionExecuter = do queryFldToPGAST env fld actionExecuter = do
opCtx <- getOpCtx $ V._fName fld opCtx <- getOpCtx $ V._fName fld
userInfo <- asks getter userInfo <- asks getter
case opCtx of case opCtx of
@ -147,8 +150,9 @@ queryFldToPGAST fld actionExecuter = do
f = case jsonAggType of f = case jsonAggType of
DS.JASMultipleRows -> QRFActionExecuteList DS.JASMultipleRows -> QRFActionExecuteList
DS.JASSingleObject -> QRFActionExecuteObject DS.JASSingleObject -> QRFActionExecuteObject
f <$> actionExecuter (RA.resolveActionQuery fld ctx (_uiSession userInfo)) f <$> actionExecuter (RA.resolveActionQuery env fld ctx (_uiSession userInfo))
QCSelectConnection pk ctx -> QCSelectConnection pk ctx -> do
validateHdrs userInfo (_socHeaders ctx)
QRFConnection <$> RS.convertConnectionSelect pk ctx fld QRFConnection <$> RS.convertConnectionSelect pk ctx fld
QCFuncConnection pk ctx -> QCFuncConnection pk ctx ->
QRFConnection <$> RS.convertConnectionFuncQuery pk ctx fld QRFConnection <$> RS.convertConnectionFuncQuery pk ctx fld
@ -167,34 +171,37 @@ mutFldToTx
, Has HTTP.Manager r , Has HTTP.Manager r
, Has [HTTP.Header] r , Has [HTTP.Header] r
, MonadIO m , MonadIO m
, MonadIO tx
, MonadTx tx
) )
=> V.Field => Env.Environment
-> m (RespTx, HTTP.ResponseHeaders) -> V.Field
mutFldToTx fld = do -> m (tx EncJSON, HTTP.ResponseHeaders)
mutFldToTx env fld = do
userInfo <- asks getter userInfo <- asks getter
opCtx <- getOpCtx $ V._fName fld opCtx <- getOpCtx $ V._fName fld
let noRespHeaders = fmap (,[]) let noRespHeaders = fmap (,[])
case opCtx of case opCtx of
MCInsert ctx -> do MCInsert ctx -> do
validateHdrs userInfo (_iocHeaders ctx) validateHdrs userInfo (_iocHeaders ctx)
noRespHeaders $ RI.convertInsert (userRole userInfo) (_iocTable ctx) fld noRespHeaders $ RI.convertInsert env rjCtx roleName (_iocTable ctx) fld
MCInsertOne ctx -> do MCInsertOne ctx -> do
validateHdrs userInfo (_iocHeaders ctx) validateHdrs userInfo (_iocHeaders ctx)
noRespHeaders $ RI.convertInsertOne (userRole userInfo) (_iocTable ctx) fld noRespHeaders $ RI.convertInsertOne env rjCtx roleName (_iocTable ctx) fld
MCUpdate ctx -> do MCUpdate ctx -> do
validateHdrs userInfo (_uocHeaders ctx) validateHdrs userInfo (_uocHeaders ctx)
noRespHeaders $ RM.convertUpdate ctx fld noRespHeaders $ RM.convertUpdate env ctx rjCtx fld
MCUpdateByPk ctx -> do MCUpdateByPk ctx -> do
validateHdrs userInfo (_uocHeaders ctx) validateHdrs userInfo (_uocHeaders ctx)
noRespHeaders $ RM.convertUpdateByPk ctx fld noRespHeaders $ RM.convertUpdateByPk env ctx rjCtx fld
MCDelete ctx -> do MCDelete ctx -> do
validateHdrs userInfo (_docHeaders ctx) validateHdrs userInfo (_docHeaders ctx)
noRespHeaders $ RM.convertDelete ctx fld noRespHeaders $ RM.convertDelete env ctx rjCtx fld
MCDeleteByPk ctx -> do MCDeleteByPk ctx -> do
validateHdrs userInfo (_docHeaders ctx) validateHdrs userInfo (_docHeaders ctx)
noRespHeaders $ RM.convertDeleteByPk ctx fld noRespHeaders $ RM.convertDeleteByPk env ctx rjCtx fld
MCAction ctx -> MCAction ctx ->
RA.resolveActionMutation fld ctx (userVars userInfo) RA.resolveActionMutation env fld ctx userInfo
getOpCtx getOpCtx
:: ( MonadReusability m :: ( MonadReusability m

View File

@ -14,29 +14,33 @@ module Hasura.GraphQL.Resolve.Action
import Hasura.Prelude import Hasura.Prelude
import qualified Control.Concurrent.Async as A
import qualified Data.Aeson as J
import qualified Data.Aeson.Casing as J
import qualified Data.Aeson.TH as J
import qualified Data.ByteString.Lazy as BL
import qualified Data.CaseInsensitive as CI
import qualified Data.HashMap.Strict as Map
import qualified Data.Text as T
import qualified Data.UUID as UUID
import qualified Database.PG.Query as Q
import qualified Language.GraphQL.Draft.Syntax as G
import qualified Network.HTTP.Client as HTTP
import qualified Network.HTTP.Types as HTTP
import qualified Network.Wreq as Wreq
import Control.Concurrent (threadDelay) import qualified Data.Aeson as J
import Control.Exception (try) import qualified Data.Aeson.Casing as J
import qualified Data.Aeson.TH as J
import qualified Data.ByteString.Lazy as BL
import qualified Data.CaseInsensitive as CI
import qualified Data.HashMap.Strict as Map
import qualified Data.Text as T
import qualified Data.UUID as UUID
import qualified Database.PG.Query as Q
import qualified Language.GraphQL.Draft.Syntax as G
import qualified Network.HTTP.Client as HTTP
import qualified Network.HTTP.Types as HTTP
import qualified Network.Wreq as Wreq
import Control.Concurrent (threadDelay)
import Control.Exception (try)
import Control.Lens import Control.Lens
import Data.IORef import Data.IORef
import qualified Hasura.RQL.DML.RemoteJoin as RJ import qualified Hasura.RQL.DML.RemoteJoin as RJ
import qualified Hasura.RQL.DML.Select as RS import qualified Hasura.RQL.DML.Select as RS
-- import qualified Hasura.GraphQL.Resolve.Select as GRS -- import qualified Hasura.GraphQL.Resolve.Select as GRS
import Control.Monad.Trans.Control (MonadBaseControl)
import qualified Control.Concurrent.Async.Lifted.Safe as LA
import qualified Data.Environment as Env
import Hasura.EncJSON import Hasura.EncJSON
import Hasura.GraphQL.Execute.Prepare import Hasura.GraphQL.Execute.Prepare
@ -125,14 +129,15 @@ $(J.deriveToJSON (J.aesonDrop 4 J.snakeCase) ''ActionInternalError)
-- , Has HTTP.Manager r -- , Has HTTP.Manager r
-- , Has [HTTP.Header] r -- , Has [HTTP.Header] r
-- ) -- )
-- => Env.Environment
-- => Field -- => Field
-- -> ActionMutationExecutionContext -- -> ActionMutationExecutionContext
-- -> UserVars -- -> UserVars
-- -> m (RespTx, HTTP.ResponseHeaders) -- -> m (RespTx, HTTP.ResponseHeaders)
-- resolveActionMutation field executionContext sessionVariables = -- resolveActionMutation env field executionContext sessionVariables =
-- case executionContext of -- case executionContext of
-- ActionMutationSyncWebhook executionContextSync -> -- ActionMutationSyncWebhook executionContextSync ->
-- resolveActionMutationSync field executionContextSync sessionVariables -- resolveActionMutationSync env field executionContextSync sessionVariables
-- ActionMutationAsync -> -- ActionMutationAsync ->
-- (,[]) <$> resolveActionMutationAsync field sessionVariables -- (,[]) <$> resolveActionMutationAsync field sessionVariables
@ -142,14 +147,15 @@ resolveActionExecution
, MonadError QErr m , MonadError QErr m
, MonadIO m , MonadIO m
) )
=> UserInfo => Env.Environment
-> UserInfo
-> AnnActionExecution UnpreparedValue -> AnnActionExecution UnpreparedValue
-> ActionExecContext -> ActionExecContext
-> m (RespTx, HTTP.ResponseHeaders) -> m (RespTx, HTTP.ResponseHeaders)
resolveActionExecution userInfo annAction execContext = do resolveActionExecution env userInfo annAction execContext = do
let actionContext = ActionContext actionName let actionContext = ActionContext actionName
handlerPayload = ActionWebhookPayload actionContext sessionVariables inputPayload handlerPayload = ActionWebhookPayload actionContext sessionVariables inputPayload
(webhookRes, respHeaders) <- callWebhook manager outputType outputFields reqHeaders confHeaders (webhookRes, respHeaders) <- callWebhook env manager outputType outputFields reqHeaders confHeaders
forwardClientHeaders resolvedWebhook handlerPayload forwardClientHeaders resolvedWebhook handlerPayload
let webhookResponseExpression = RS.AEInput $ UVLiteral $ let webhookResponseExpression = RS.AEInput $ UVLiteral $
toTxtValue $ WithScalarType PGJSONB $ PGValJSONB $ Q.JSONB $ J.toJSON webhookRes toTxtValue $ WithScalarType PGJSONB $ PGValJSONB $ Q.JSONB $ J.toJSON webhookRes
@ -163,7 +169,7 @@ resolveActionExecution userInfo annAction execContext = do
Just remoteJoins -> Just remoteJoins ->
let query = Q.fromBuilder $ toSQL $ let query = Q.fromBuilder $ toSQL $
RS.mkSQLSelect jsonAggType astResolvedWithoutRemoteJoins RS.mkSQLSelect jsonAggType astResolvedWithoutRemoteJoins
in RJ.executeQueryWithRemoteJoins manager reqHeaders userInfo query [] remoteJoins in RJ.executeQueryWithRemoteJoins env manager reqHeaders userInfo query [] remoteJoins
Nothing -> Nothing ->
asSingleRowJsonResp (Q.fromBuilder $ toSQL $ RS.mkSQLSelect jsonAggType astResolved) [] asSingleRowJsonResp (Q.fromBuilder $ toSQL $ RS.mkSQLSelect jsonAggType astResolved) []
where where
@ -200,17 +206,18 @@ restrictActionExecuter errMsg _ =
-- , Has OrdByCtx r -- , Has OrdByCtx r
-- , Has SQLGenCtx r -- , Has SQLGenCtx r
-- ) -- )
-- => Field -- => Env.Environment
-- -> Field
-- -> ActionExecutionContext -- -> ActionExecutionContext
-- -> SessionVariables -- -> SessionVariables
-- -> HTTP.Manager -- -> HTTP.Manager
-- -> [HTTP.Header] -- -> [HTTP.Header]
-- -> m (RS.AnnSimpleSelG UnresolvedVal) -- -> m (RS.AnnSimpleSelG UnresolvedVal)
-- resolveActionQuery field executionContext sessionVariables httpManager reqHeaders = do -- resolveActionQuery env field executionContext sessionVariables httpManager reqHeaders = do
-- let inputArgs = J.toJSON $ fmap annInpValueToJson $ _fArguments field -- let inputArgs = J.toJSON $ fmap annInpValueToJson $ _fArguments field
-- actionContext = ActionContext actionName -- actionContext = ActionContext actionName
-- handlerPayload = ActionWebhookPayload actionContext sessionVariables inputArgs -- handlerPayload = ActionWebhookPayload actionContext sessionVariables inputArgs
-- (webhookRes, _) <- callWebhook httpManager outputType outputFields reqHeaders confHeaders -- (webhookRes, _) <- callWebhook env httpManager outputType outputFields reqHeaders confHeaders
-- forwardClientHeaders resolvedWebhook handlerPayload -- forwardClientHeaders resolvedWebhook handlerPayload
-- let webhookResponseExpression = RS.AEInput $ UVSQL $ -- let webhookResponseExpression = RS.AEInput $ UVSQL $
-- toTxtValue $ WithScalarType PGJSONB $ PGValJSONB $ Q.JSONB $ J.toJSON webhookRes -- toTxtValue $ WithScalarType PGJSONB $ PGValJSONB $ Q.JSONB $ J.toJSON webhookRes
@ -238,13 +245,14 @@ table provides the action response. See Note [Resolving async action query/subsc
-- | Resolve asynchronous action mutation which returns only the action uuid -- | Resolve asynchronous action mutation which returns only the action uuid
resolveActionMutationAsync resolveActionMutationAsync
:: (MonadError QErr m) :: ( MonadError QErr m
, MonadTx tx)
=> AnnActionMutationAsync => AnnActionMutationAsync
-> [HTTP.Header] -> [HTTP.Header]
-> SessionVariables -> SessionVariables
-> m RespTx -> m (tx EncJSON)
resolveActionMutationAsync annAction reqHeaders sessionVariables = do resolveActionMutationAsync annAction reqHeaders sessionVariables = do
pure $ do pure $ liftTx do
actionId <- runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler [Q.sql| actionId <- runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler [Q.sql|
INSERT INTO INSERT INTO
"hdb_catalog"."hdb_action_log" "hdb_catalog"."hdb_action_log"
@ -302,9 +310,9 @@ resolveAsyncActionQuery userInfo annAction =
actionLogTable = QualifiedObject (SchemaName "hdb_catalog") (TableName "hdb_action_log") actionLogTable = QualifiedObject (SchemaName "hdb_catalog") (TableName "hdb_action_log")
-- TODO (from master):- Avoid using PGColumnInfo -- TODO (from master):- Avoid using PGColumnInfo
mkAnnFldFromPGCol column columnType = mkAnnFldFromPGCol column' columnType =
flip RS.mkAnnColumnField Nothing $ flip RS.mkAnnColumnField Nothing $
PGColumnInfo (unsafePGCol column) (G.unsafeMkName column) 0 (PGColumnScalar columnType) True Nothing PGColumnInfo (unsafePGCol column') (G.unsafeMkName column') 0 (PGColumnScalar columnType) True Nothing
tableBoolExpression = tableBoolExpression =
let actionIdColumnInfo = PGColumnInfo (unsafePGCol "id") $$(G.litName "id") let actionIdColumnInfo = PGColumnInfo (unsafePGCol "id") $$(G.litName "id")
@ -334,23 +342,29 @@ data ActionLogItem
-- | Process async actions from hdb_catalog.hdb_action_log table. This functions is executed in a background thread. -- | Process async actions from hdb_catalog.hdb_action_log table. This functions is executed in a background thread.
-- See Note [Async action architecture] above -- See Note [Async action architecture] above
asyncActionsProcessor asyncActionsProcessor
:: HasVersion :: forall m void
=> IORef (RebuildableSchemaCache Run, SchemaCacheVer) . ( HasVersion
, MonadIO m
, MonadBaseControl IO m
, LA.Forall (LA.Pure m)
)
=> Env.Environment
-> IORef (RebuildableSchemaCache Run, SchemaCacheVer)
-> Q.PGPool -> Q.PGPool
-> HTTP.Manager -> HTTP.Manager
-> IO void -> m void
asyncActionsProcessor cacheRef pgPool httpManager = forever $ do asyncActionsProcessor env cacheRef pgPool httpManager = forever $ do
asyncInvocations <- getUndeliveredEvents asyncInvocations <- liftIO getUndeliveredEvents
actionCache <- scActions . lastBuiltSchemaCache . fst <$> readIORef cacheRef actionCache <- scActions . lastBuiltSchemaCache . fst <$> liftIO (readIORef cacheRef)
A.mapConcurrently_ (callHandler actionCache) asyncInvocations LA.mapConcurrently_ (callHandler actionCache) asyncInvocations
threadDelay (1 * 1000 * 1000) liftIO $ threadDelay (1 * 1000 * 1000)
where where
runTx :: (Monoid a) => Q.TxE QErr a -> IO a runTx :: (Monoid a) => Q.TxE QErr a -> IO a
runTx q = do runTx q = do
res <- runExceptT $ Q.runTx' pgPool q res <- runExceptT $ Q.runTx' pgPool q
either mempty return res either mempty return res
callHandler :: ActionCache -> ActionLogItem -> IO () callHandler :: ActionCache -> ActionLogItem -> m ()
callHandler actionCache actionLogItem = do callHandler actionCache actionLogItem = do
let ActionLogItem actionId actionName reqHeaders let ActionLogItem actionId actionName reqHeaders
sessionVariables inputPayload = actionLogItem sessionVariables inputPayload = actionLogItem
@ -365,10 +379,10 @@ asyncActionsProcessor cacheRef pgPool httpManager = forever $ do
outputType = _adOutputType definition outputType = _adOutputType definition
actionContext = ActionContext actionName actionContext = ActionContext actionName
eitherRes <- runExceptT $ eitherRes <- runExceptT $
callWebhook httpManager outputType outputFields reqHeaders confHeaders callWebhook env httpManager outputType outputFields reqHeaders confHeaders
forwardClientHeaders webhookUrl $ forwardClientHeaders webhookUrl $
ActionWebhookPayload actionContext sessionVariables inputPayload ActionWebhookPayload actionContext sessionVariables inputPayload
case eitherRes of liftIO $ case eitherRes of
Left e -> setError actionId e Left e -> setError actionId e
Right (responsePayload, _) -> setCompleted actionId $ J.toJSON responsePayload Right (responsePayload, _) -> setCompleted actionId $ J.toJSON responsePayload
@ -423,7 +437,8 @@ asyncActionsProcessor cacheRef pgPool httpManager = forever $ do
callWebhook callWebhook
:: forall m. (HasVersion, MonadIO m, MonadError QErr m) :: forall m. (HasVersion, MonadIO m, MonadError QErr m)
=> HTTP.Manager => Env.Environment
-> HTTP.Manager
-> GraphQLType -> GraphQLType
-> ActionOutputFields -> ActionOutputFields
-> [HTTP.Header] -> [HTTP.Header]
@ -432,18 +447,23 @@ callWebhook
-> ResolvedWebhook -> ResolvedWebhook
-> ActionWebhookPayload -> ActionWebhookPayload
-> m (ActionWebhookResponse, HTTP.ResponseHeaders) -> m (ActionWebhookResponse, HTTP.ResponseHeaders)
callWebhook manager outputType outputFields reqHeaders confHeaders callWebhook env manager outputType outputFields reqHeaders confHeaders
forwardClientHeaders resolvedWebhook actionWebhookPayload = do forwardClientHeaders resolvedWebhook actionWebhookPayload = do
resolvedConfHeaders <- makeHeadersFromConf confHeaders resolvedConfHeaders <- makeHeadersFromConf env confHeaders
let clientHeaders = if forwardClientHeaders then mkClientHeadersForward reqHeaders else [] let clientHeaders = if forwardClientHeaders then mkClientHeadersForward reqHeaders else []
contentType = ("Content-Type", "application/json") contentType = ("Content-Type", "application/json")
options = wreqOptions manager $ -- Using HashMap to avoid duplicate headers between configuration headers
-- Using HashMap to avoid duplicate headers between configuration headers -- and client headers where configuration headers are preferred
-- and client headers where configuration headers are preferred hdrs = contentType : (Map.toList . Map.fromList) (resolvedConfHeaders <> clientHeaders)
contentType : (Map.toList . Map.fromList) (resolvedConfHeaders <> clientHeaders)
postPayload = J.toJSON actionWebhookPayload postPayload = J.toJSON actionWebhookPayload
url = unResolvedWebhook resolvedWebhook url = unResolvedWebhook resolvedWebhook
httpResponse <- liftIO $ try $ Wreq.postWith options (T.unpack url) postPayload httpResponse <- do
initReq <- liftIO $ HTTP.parseRequest (T.unpack url)
let req = initReq { HTTP.method = "POST"
, HTTP.requestHeaders = addDefaultHeaders hdrs
, HTTP.requestBody = HTTP.RequestBodyLBS (J.encode postPayload)
}
liftIO . try $ HTTP.httpLbs req manager
let requestInfo = ActionRequestInfo url postPayload $ let requestInfo = ActionRequestInfo url postPayload $
confHeaders <> toHeadersConf clientHeaders confHeaders <> toHeadersConf clientHeaders
case httpResponse of case httpResponse of

View File

@ -28,10 +28,8 @@ module Hasura.GraphQL.Resolve.InputValue
import Hasura.Prelude import Hasura.Prelude
import qualified Text.Builder as TB import qualified Text.Builder as TB
import qualified Language.GraphQL.Draft.Syntax as G import qualified Language.GraphQL.Draft.Syntax as G
import qualified Hasura.RQL.Types as RQL import qualified Hasura.RQL.Types as RQL
import Hasura.GraphQL.Resolve.Context import Hasura.GraphQL.Resolve.Context

View File

@ -26,6 +26,7 @@ import qualified Data.Sequence as Seq
import qualified Data.Text as T import qualified Data.Text as T
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
import qualified Language.GraphQL.Draft.Syntax as G import qualified Language.GraphQL.Draft.Syntax as G
import qualified Data.Environment as Env
import qualified Hasura.GraphQL.Parser as P import qualified Hasura.GraphQL.Parser as P
import qualified Hasura.RQL.DML.Delete.Types as RQL import qualified Hasura.RQL.DML.Delete.Types as RQL
@ -591,26 +592,28 @@ fmapAnnInsert f (annIns, mutationOutput) =
convertToSQLTransaction convertToSQLTransaction
:: (HasVersion, MonadTx m, MonadIO m) :: (HasVersion, MonadTx m, MonadIO m)
=> AnnMultiInsert S.SQLExp => Env.Environment
-> AnnMultiInsert S.SQLExp
-> RQL.MutationRemoteJoinCtx -> RQL.MutationRemoteJoinCtx
-> Seq.Seq Q.PrepArg -> Seq.Seq Q.PrepArg
-> Bool -> Bool
-> m EncJSON -> m EncJSON
convertToSQLTransaction (annIns, mutationOutput) rjCtx planVars stringifyNum = convertToSQLTransaction env (annIns, mutationOutput) rjCtx planVars stringifyNum =
if null $ _aiInsObj annIns if null $ _aiInsObj annIns
then pure $ buildEmptyMutResp mutationOutput then pure $ buildEmptyMutResp mutationOutput
else insertMultipleObjects annIns [] rjCtx mutationOutput planVars stringifyNum else insertMultipleObjects env annIns [] rjCtx mutationOutput planVars stringifyNum
insertMultipleObjects insertMultipleObjects
:: (HasVersion, MonadTx m, MonadIO m) :: (HasVersion, MonadTx m, MonadIO m)
=> MultiObjIns S.SQLExp => Env.Environment
-> MultiObjIns S.SQLExp
-> [(PGCol, S.SQLExp)] -> [(PGCol, S.SQLExp)]
-> RQL.MutationRemoteJoinCtx -> RQL.MutationRemoteJoinCtx
-> RQL.MutationOutput -> RQL.MutationOutput
-> Seq.Seq Q.PrepArg -> Seq.Seq Q.PrepArg
-> Bool -> Bool
-> m EncJSON -> m EncJSON
insertMultipleObjects multiObjIns additionalColumns rjCtx mutationOutput planVars stringifyNum = insertMultipleObjects env multiObjIns additionalColumns rjCtx mutationOutput planVars stringifyNum =
bool withoutRelsInsert withRelsInsert anyRelsToInsert bool withoutRelsInsert withRelsInsert anyRelsToInsert
where where
AnnIns insObjs table conflictClause checkCondition columnInfos defVals = multiObjIns AnnIns insObjs table conflictClause checkCondition columnInfos defVals = multiObjIns
@ -631,33 +634,34 @@ insertMultipleObjects multiObjIns additionalColumns rjCtx mutationOutput planVar
checkCondition checkCondition
mutationOutput mutationOutput
columnInfos columnInfos
RQL.execInsertQuery stringifyNum (Just rjCtx) (insertQuery, planVars) RQL.execInsertQuery env stringifyNum (Just rjCtx) (insertQuery, planVars)
withRelsInsert = do withRelsInsert = do
insertRequests <- for insObjs \obj -> do insertRequests <- for insObjs \obj -> do
let singleObj = AnnIns obj table conflictClause checkCondition columnInfos defVals let singleObj = AnnIns obj table conflictClause checkCondition columnInfos defVals
insertObject singleObj additionalColumns rjCtx planVars stringifyNum insertObject env singleObj additionalColumns rjCtx planVars stringifyNum
let affectedRows = sum $ map fst insertRequests let affectedRows = sum $ map fst insertRequests
columnValues = catMaybes $ map snd insertRequests columnValues = catMaybes $ map snd insertRequests
selectExpr <- RQL.mkSelCTEFromColVals table columnInfos columnValues selectExpr <- RQL.mkSelCTEFromColVals table columnInfos columnValues
let (mutOutputRJ, remoteJoins) = RQL.getRemoteJoinsMutationOutput mutationOutput let (mutOutputRJ, remoteJoins) = RQL.getRemoteJoinsMutationOutput mutationOutput
sqlQuery = Q.fromBuilder $ toSQL $ sqlQuery = Q.fromBuilder $ toSQL $
RQL.mkMutationOutputExp table columnInfos (Just affectedRows) selectExpr mutOutputRJ stringifyNum RQL.mkMutationOutputExp table columnInfos (Just affectedRows) selectExpr mutOutputRJ stringifyNum
RQL.executeMutationOutputQuery sqlQuery [] $ (,rjCtx) <$> remoteJoins RQL.executeMutationOutputQuery env sqlQuery [] $ (,rjCtx) <$> remoteJoins
insertObject insertObject
:: (HasVersion, MonadTx m, MonadIO m) :: (HasVersion, MonadTx m, MonadIO m)
=> SingleObjIns S.SQLExp => Env.Environment
-> SingleObjIns S.SQLExp
-> [(PGCol, S.SQLExp)] -> [(PGCol, S.SQLExp)]
-> RQL.MutationRemoteJoinCtx -> RQL.MutationRemoteJoinCtx
-> Seq.Seq Q.PrepArg -> Seq.Seq Q.PrepArg
-> Bool -> Bool
-> m (Int, Maybe (ColumnValues TxtEncodedPGVal)) -> m (Int, Maybe (ColumnValues TxtEncodedPGVal))
insertObject singleObjIns additionalColumns rjCtx planVars stringifyNum = do insertObject env singleObjIns additionalColumns rjCtx planVars stringifyNum = do
validateInsert (map fst columns) (map _riRelInfo objectRels) (map fst additionalColumns) validateInsert (map fst columns) (map _riRelInfo objectRels) (map fst additionalColumns)
-- insert all object relations and fetch this insert dependent column values -- insert all object relations and fetch this insert dependent column values
objInsRes <- forM objectRels $ insertObjRel planVars rjCtx stringifyNum objInsRes <- forM objectRels $ insertObjRel env planVars rjCtx stringifyNum
-- prepare final insert columns -- prepare final insert columns
let objRelAffRows = sum $ map fst objInsRes let objRelAffRows = sum $ map fst objInsRes
@ -683,7 +687,7 @@ insertObject singleObjIns additionalColumns rjCtx planVars stringifyNum = do
withArrRels colValM = do withArrRels colValM = do
colVal <- onNothing colValM $ throw400 NotSupported cannotInsArrRelErr colVal <- onNothing colValM $ throw400 NotSupported cannotInsArrRelErr
arrDepColsWithVal <- fetchFromColVals colVal arrRelDepCols arrDepColsWithVal <- fetchFromColVals colVal arrRelDepCols
arrInsARows <- forM arrayRels $ insertArrRel arrDepColsWithVal rjCtx planVars stringifyNum arrInsARows <- forM arrayRels $ insertArrRel env arrDepColsWithVal rjCtx planVars stringifyNum
return $ sum arrInsARows return $ sum arrInsARows
asSingleObject = \case asSingleObject = \case
@ -697,13 +701,14 @@ insertObject singleObjIns additionalColumns rjCtx planVars stringifyNum = do
insertObjRel insertObjRel
:: (HasVersion, MonadTx m, MonadIO m) :: (HasVersion, MonadTx m, MonadIO m)
=> Seq.Seq Q.PrepArg => Env.Environment
-> Seq.Seq Q.PrepArg
-> RQL.MutationRemoteJoinCtx -> RQL.MutationRemoteJoinCtx
-> Bool -> Bool
-> ObjRelIns S.SQLExp -> ObjRelIns S.SQLExp
-> m (Int, [(PGCol, S.SQLExp)]) -> m (Int, [(PGCol, S.SQLExp)])
insertObjRel planVars rjCtx stringifyNum objRelIns = do insertObjRel env planVars rjCtx stringifyNum objRelIns = do
(affRows, colValM) <- insertObject singleObjIns [] rjCtx planVars stringifyNum (affRows, colValM) <- insertObject env singleObjIns [] rjCtx planVars stringifyNum
colVal <- onNothing colValM $ throw400 NotSupported errMsg colVal <- onNothing colValM $ throw400 NotSupported errMsg
retColsWithVals <- fetchFromColVals colVal rColInfos retColsWithVals <- fetchFromColVals colVal rColInfos
let columns = flip mapMaybe (Map.toList mapCols) \(column, target) -> do let columns = flip mapMaybe (Map.toList mapCols) \(column, target) -> do
@ -724,17 +729,18 @@ insertObjRel planVars rjCtx stringifyNum objRelIns = do
insertArrRel insertArrRel
:: (HasVersion, MonadTx m, MonadIO m) :: (HasVersion, MonadTx m, MonadIO m)
=> [(PGCol, S.SQLExp)] => Env.Environment
-> [(PGCol, S.SQLExp)]
-> RQL.MutationRemoteJoinCtx -> RQL.MutationRemoteJoinCtx
-> Seq.Seq Q.PrepArg -> Seq.Seq Q.PrepArg
-> Bool -> Bool
-> ArrRelIns S.SQLExp -> ArrRelIns S.SQLExp
-> m Int -> m Int
insertArrRel resCols rjCtx planVars stringifyNum arrRelIns = do insertArrRel env resCols rjCtx planVars stringifyNum arrRelIns = do
let additionalColumns = flip mapMaybe resCols \(column, value) -> do let additionalColumns = flip mapMaybe resCols \(column, value) -> do
target <- Map.lookup column mapping target <- Map.lookup column mapping
Just (target, value) Just (target, value)
resBS <- insertMultipleObjects multiObjIns additionalColumns rjCtx mutOutput planVars stringifyNum resBS <- insertMultipleObjects env multiObjIns additionalColumns rjCtx mutOutput planVars stringifyNum
resObj <- decodeEncJSON resBS resObj <- decodeEncJSON resBS
onNothing (Map.lookup ("affected_rows" :: T.Text) resObj) $ onNothing (Map.lookup ("affected_rows" :: T.Text) resObj) $
throw500 "affected_rows not returned in array rel insert" throw500 "affected_rows not returned in array rel insert"

View File

@ -25,6 +25,7 @@ import Hasura.Session
import qualified Data.Aeson as J import qualified Data.Aeson as J
import qualified Data.HashMap.Strict as Map import qualified Data.HashMap.Strict as Map
import qualified Data.Environment as Env
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
import qualified Hasura.GraphQL.Execute as E import qualified Hasura.GraphQL.Execute as E
import qualified Hasura.GraphQL.Execute.Query as EQ import qualified Hasura.GraphQL.Execute.Query as EQ
@ -42,14 +43,15 @@ runGQ
, E.MonadGQLExecutionCheck m , E.MonadGQLExecutionCheck m
, MonadQueryLog m , MonadQueryLog m
) )
=> RequestId => Env.Environment
-> RequestId
-> UserInfo -> UserInfo
-> Wai.IpAddress -> Wai.IpAddress
-> [HTTP.Header] -> [HTTP.Header]
-> E.GraphQLQueryType -> E.GraphQLQueryType
-> GQLReqUnparsed -> GQLReqUnparsed
-> m (HttpResponse EncJSON) -> m (HttpResponse EncJSON)
runGQ reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do runGQ env reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
-- The response and misc telemetry data: -- The response and misc telemetry data:
let telemTransport = Telem.HTTP let telemTransport = Telem.HTTP
(telemTimeTot_DT, (telemCacheHit, telemLocality, (telemTimeIO_DT, telemQueryType, !resp))) <- withElapsedTime $ do (telemTimeTot_DT, (telemCacheHit, telemLocality, (telemTimeIO_DT, telemQueryType, !resp))) <- withElapsedTime $ do
@ -59,7 +61,7 @@ runGQ reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
reqParsed <- E.checkGQLExecution userInfo (reqHeaders, ipAddress) enableAL sc reqUnparsed reqParsed <- E.checkGQLExecution userInfo (reqHeaders, ipAddress) enableAL sc reqUnparsed
>>= flip onLeft throwError >>= flip onLeft throwError
(telemCacheHit, execPlan) <- E.getResolvedExecPlan pgExecCtx planCache (telemCacheHit, execPlan) <- E.getResolvedExecPlan env pgExecCtx planCache
userInfo sqlGenCtx sc scVer queryType userInfo sqlGenCtx sc scVer queryType
httpManager reqHeaders (reqUnparsed, reqParsed) httpManager reqHeaders (reqUnparsed, reqParsed)
case execPlan of case execPlan of
@ -89,7 +91,7 @@ runGQ reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
throw400 UnexpectedPayload "subscriptions are not supported over HTTP, use websockets instead" throw400 UnexpectedPayload "subscriptions are not supported over HTTP, use websockets instead"
{- {-
E.GExPHasura resolvedOp -> do E.GExPHasura resolvedOp -> do
(telemTimeIO, telemQueryType, respHdrs, resp) <- runHasuraGQ reqId reqUnparsed userInfo resolvedOp (telemTimeIO, telemQueryType, respHdrs, resp) <- runHasuraGQ reqId (reqUnparsed, reqParsed) userInfo resolvedOp
return (telemCacheHit, Telem.Local, (telemTimeIO, telemQueryType, HttpResponse resp respHdrs)) return (telemCacheHit, Telem.Local, (telemTimeIO, telemQueryType, HttpResponse resp respHdrs))
E.GExPRemote rsi opDef -> do E.GExPRemote rsi opDef -> do
let telemQueryType | G._todType opDef == G.OperationTypeMutation = Telem.Mutation let telemQueryType | G._todType opDef == G.OperationTypeMutation = Telem.Mutation
@ -105,7 +107,7 @@ runGQ reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
runRemoteGQ telemCacheHit rsi opDef = do runRemoteGQ telemCacheHit rsi opDef = do
let telemQueryType | G._todType opDef == G.OperationTypeMutation = Telem.Mutation let telemQueryType | G._todType opDef == G.OperationTypeMutation = Telem.Mutation
| otherwise = Telem.Query | otherwise = Telem.Query
(telemTimeIO, resp) <- E.execRemoteGQ reqId userInfo reqHeaders reqUnparsed rsi opDef (telemTimeIO, resp) <- E.execRemoteGQ env reqId userInfo reqHeaders reqUnparsed rsi opDef
return (telemCacheHit, Telem.Remote, (telemTimeIO, telemQueryType, resp)) return (telemCacheHit, Telem.Remote, (telemTimeIO, telemQueryType, resp))
-- | Run (execute) a batched GraphQL query (see 'GQLBatchedReqs') -- | Run (execute) a batched GraphQL query (see 'GQLBatchedReqs')
@ -117,7 +119,8 @@ runGQBatched
, E.MonadGQLExecutionCheck m , E.MonadGQLExecutionCheck m
, MonadQueryLog m , MonadQueryLog m
) )
=> RequestId => Env.Environment
-> RequestId
-> ResponseInternalErrorsConfig -> ResponseInternalErrorsConfig
-> UserInfo -> UserInfo
-> Wai.IpAddress -> Wai.IpAddress
@ -126,10 +129,10 @@ runGQBatched
-> GQLBatchedReqs GQLQueryText -> GQLBatchedReqs GQLQueryText
-- ^ the batched request with unparsed GraphQL query -- ^ the batched request with unparsed GraphQL query
-> m (HttpResponse EncJSON) -> m (HttpResponse EncJSON)
runGQBatched reqId responseErrorsConfig userInfo ipAddress reqHdrs queryType query = do runGQBatched env reqId responseErrorsConfig userInfo ipAddress reqHdrs queryType query = do
case query of case query of
GQLSingleRequest req -> GQLSingleRequest req ->
runGQ reqId userInfo ipAddress reqHdrs queryType req runGQ env reqId userInfo ipAddress reqHdrs queryType req
GQLBatchedReqs reqs -> do GQLBatchedReqs reqs -> do
-- It's unclear what we should do if we receive multiple -- It's unclear what we should do if we receive multiple
-- responses with distinct headers, so just do the simplest thing -- responses with distinct headers, so just do the simplest thing
@ -140,7 +143,7 @@ runGQBatched reqId responseErrorsConfig userInfo ipAddress reqHdrs queryType que
. encJFromList . encJFromList
. map (either (encJFromJValue . encodeGQErr includeInternal) _hrBody) . map (either (encJFromJValue . encodeGQErr includeInternal) _hrBody)
removeHeaders <$> traverse (try . runGQ reqId userInfo ipAddress reqHdrs queryType) reqs removeHeaders <$> traverse (try . runGQ env reqId userInfo ipAddress reqHdrs queryType) reqs
where where
try = flip catchError (pure . Left) . fmap Right try = flip catchError (pure . Left) . fmap Right
@ -201,21 +204,21 @@ runHasuraGQ
, MonadQueryLog m , MonadQueryLog m
) )
=> RequestId => RequestId
-> GQLReqUnparsed -> (GQLReqUnparsed, GQLReqParsed)
-> UserInfo -> UserInfo
-> E.ExecOp -> E.ExecOp (LazyTx QErr)
-> m (DiffTime, Telem.QueryType, HTTP.ResponseHeaders, EncJSON) -> m (DiffTime, Telem.QueryType, HTTP.ResponseHeaders, EncJSON)
-- ^ Also return 'Mutation' when the operation was a mutation, and the time -- ^ Also return 'Mutation' when the operation was a mutation, and the time
-- spent in the PG query; for telemetry. -- spent in the PG query; for telemetry.
runHasuraGQ reqId query userInfo resolvedOp = do runHasuraGQ reqId (query, _queryParsed) userInfo resolvedOp = do
(E.ExecutionCtx logger _ pgExecCtx _ _ _ _ _) <- ask (E.ExecutionCtx logger _ pgExecCtx _ _ _ _ _) <- ask
logQuery' logger (telemTimeIO, respE) <- withElapsedTime $ runExceptT $ case resolvedOp of
(telemTimeIO, respE) <- withElapsedTime $ liftIO $ runExceptT $ case resolvedOp of E.ExOpQuery tx genSql _asts -> do
E.ExOpQuery tx _genSql -> do
-- log the generated SQL and the graphql query -- log the generated SQL and the graphql query
-- L.unLogger logger $ QueryLog query genSql reqId logQueryLog logger query genSql reqId
([],) <$> runQueryTx pgExecCtx tx ([],) <$> runQueryTx pgExecCtx tx
E.ExOpMutation respHeaders tx -> do E.ExOpMutation respHeaders tx -> do
logQueryLog logger query Nothing reqId
(respHeaders,) <$> runLazyTx pgExecCtx Q.ReadWrite (withUserInfo userInfo tx) (respHeaders,) <$> runLazyTx pgExecCtx Q.ReadWrite (withUserInfo userInfo tx)
E.ExOpSubs _ -> E.ExOpSubs _ ->
throw400 UnexpectedPayload throw400 UnexpectedPayload

View File

@ -37,7 +37,7 @@ newtype GQLExecDoc
deriving (Ord, Show, Eq, Hashable) deriving (Ord, Show, Eq, Hashable)
instance J.FromJSON GQLExecDoc where instance J.FromJSON GQLExecDoc where
parseJSON v = (GQLExecDoc . G.getExecutableDefinitions) <$> J.parseJSON v parseJSON v = GQLExecDoc . G.getExecutableDefinitions <$> J.parseJSON v
instance J.ToJSON GQLExecDoc where instance J.ToJSON GQLExecDoc where
toJSON = J.toJSON . G.ExecutableDocument . unGQLExecDoc toJSON = J.toJSON . G.ExecutableDocument . unGQLExecDoc

View File

@ -6,6 +6,7 @@ module Hasura.GraphQL.Transport.WebSocket
, createWSServerEnv , createWSServerEnv
, stopWSServerApp , stopWSServerApp
, WSServerEnv , WSServerEnv
, WSLog(..)
) where ) where
-- NOTE!: -- NOTE!:
@ -33,6 +34,7 @@ import qualified Network.HTTP.Types as H
import qualified Network.Wai.Extended as Wai import qualified Network.Wai.Extended as Wai
import qualified Network.WebSockets as WS import qualified Network.WebSockets as WS
import qualified StmContainers.Map as STMMap import qualified StmContainers.Map as STMMap
import qualified Data.Environment as Env
import Control.Concurrent.Extended (sleep) import Control.Concurrent.Extended (sleep)
import Control.Exception.Lifted import Control.Exception.Lifted
@ -49,13 +51,15 @@ import Hasura.RQL.Types
import Hasura.Server.Auth (AuthMode, UserAuthentication, import Hasura.Server.Auth (AuthMode, UserAuthentication,
resolveUserInfo) resolveUserInfo)
import Hasura.Server.Cors import Hasura.Server.Cors
import Hasura.Server.Utils (RequestId, getRequestId) import Hasura.Server.Utils (RequestId,
getRequestId)
import Hasura.Server.Version (HasVersion) import Hasura.Server.Version (HasVersion)
import Hasura.Session import Hasura.Session
import qualified Hasura.GraphQL.Execute as E import qualified Hasura.GraphQL.Execute as E
import qualified Hasura.GraphQL.Execute.LiveQuery as LQ import qualified Hasura.GraphQL.Execute.LiveQuery as LQ
import qualified Hasura.GraphQL.Execute.LiveQuery.Poll as LQ import qualified Hasura.GraphQL.Execute.LiveQuery.Poll as LQ
import qualified Hasura.GraphQL.Execute.Query as EQ
import qualified Hasura.GraphQL.Transport.WebSocket.Server as WS import qualified Hasura.GraphQL.Transport.WebSocket.Server as WS
import qualified Hasura.Logging as L import qualified Hasura.Logging as L
import qualified Hasura.Server.Telemetry.Counters as Telem import qualified Hasura.Server.Telemetry.Counters as Telem
@ -218,7 +222,7 @@ data WSServerEnv
onConn :: (MonadIO m) onConn :: (MonadIO m)
=> L.Logger L.Hasura -> CorsPolicy -> WS.OnConnH m WSConnData => L.Logger L.Hasura -> CorsPolicy -> WS.OnConnH m WSConnData
onConn (L.Logger logger) corsPolicy wsId requestHead ipAdress = do onConn (L.Logger logger) corsPolicy wsId requestHead ipAddress = do
res <- runExceptT $ do res <- runExceptT $ do
(errType, queryType) <- checkPath (errType, queryType) <- checkPath
let reqHdrs = WS.requestHeaders requestHead let reqHdrs = WS.requestHeaders requestHead
@ -244,7 +248,7 @@ onConn (L.Logger logger) corsPolicy wsId requestHead ipAdress = do
accept (hdrs, errType, queryType) = do accept (hdrs, errType, queryType) = do
logger $ mkWsInfoLog Nothing (WsConnInfo wsId Nothing Nothing) EAccepted logger $ mkWsInfoLog Nothing (WsConnInfo wsId Nothing Nothing) EAccepted
connData <- liftIO $ WSConnData connData <- liftIO $ WSConnData
<$> STM.newTVarIO (CSNotInitialised hdrs ipAdress) <$> STM.newTVarIO (CSNotInitialised hdrs ipAddress)
<*> STMMap.newIO <*> STMMap.newIO
<*> pure errType <*> pure errType
<*> pure queryType <*> pure queryType
@ -302,8 +306,8 @@ onConn (L.Logger logger) corsPolicy wsId requestHead ipAdress = do
onStart onStart
:: forall m. (HasVersion, MonadIO m, E.MonadGQLExecutionCheck m, MonadQueryLog m) :: forall m. (HasVersion, MonadIO m, E.MonadGQLExecutionCheck m, MonadQueryLog m)
=> WSServerEnv -> WSConn -> StartMsg -> m () => Env.Environment -> WSServerEnv -> WSConn -> StartMsg -> m ()
onStart serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do onStart env serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
timerTot <- startTimer timerTot <- startTimer
opM <- liftIO $ STM.atomically $ STMMap.lookup opId opMap opM <- liftIO $ STM.atomically $ STMMap.lookup opId opMap
@ -327,7 +331,7 @@ onStart serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
reqParsedE <- lift $ E.checkGQLExecution userInfo (reqHdrs, ipAddress) enableAL sc q reqParsedE <- lift $ E.checkGQLExecution userInfo (reqHdrs, ipAddress) enableAL sc q
reqParsed <- either (withComplete . preExecErr requestId) return reqParsedE reqParsed <- either (withComplete . preExecErr requestId) return reqParsedE
execPlanE <- runExceptT $ E.getResolvedExecPlan pgExecCtx execPlanE <- runExceptT $ E.getResolvedExecPlan env pgExecCtx
planCache userInfo sqlGenCtx sc scVer queryType httpMgr reqHdrs (q, reqParsed) planCache userInfo sqlGenCtx sc scVer queryType httpMgr reqHdrs (q, reqParsed)
(telemCacheHit, execPlan) <- either (withComplete . preExecErr requestId) return execPlanE (telemCacheHit, execPlan) <- either (withComplete . preExecErr requestId) return execPlanE
@ -401,15 +405,14 @@ onStart serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
-> Telem.CacheHit -> RequestId -> GQLReqUnparsed -> UserInfo -> E.ExecOp -> Telem.CacheHit -> RequestId -> GQLReqUnparsed -> UserInfo -> E.ExecOp
-> ExceptT () m () -> ExceptT () m ()
runHasuraGQ timerTot telemCacheHit reqId query userInfo = \case runHasuraGQ timerTot telemCacheHit reqId query userInfo = \case
E.ExOpQuery opTx genSql -> E.ExOpQuery opTx genSql _asts ->
execQueryOrMut Telem.Query genSql $ runQueryTx pgExecCtx opTx execQueryOrMut Telem.Query genSql $ runQueryTx pgExecCtx opTx
-- Response headers discarded over websockets -- Response headers discarded over websockets
E.ExOpMutation _ opTx -> E.ExOpMutation _ opTx -> do
execQueryOrMut Telem.Mutation Nothing $ execQueryOrMut Telem.Mutation Nothing $
runLazyTx pgExecCtx Q.ReadWrite $ withUserInfo userInfo opTx runLazyTx pgExecCtx Q.ReadWrite $ withUserInfo userInfo opTx
E.ExOpSubs lqOp -> do E.ExOpSubs lqOp -> do
-- log the graphql query -- log the graphql query
-- L.unLogger logger $ QueryLog query Nothing reqId
logQueryLog logger query Nothing reqId logQueryLog logger query Nothing reqId
let subscriberMetadata = LQ.mkSubscriberMetadata $ J.object let subscriberMetadata = LQ.mkSubscriberMetadata $ J.object
[ "websocket_id" J..= WS.getWSId wsConn [ "websocket_id" J..= WS.getWSId wsConn
@ -428,6 +431,11 @@ onStart serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
where where
telemLocality = Telem.Local telemLocality = Telem.Local
execQueryOrMut
:: Telem.QueryType
-> Maybe EQ.GeneratedSqlMap
-> ExceptT QErr (ExceptT () m) EncJSON
-> ExceptT () m ()
execQueryOrMut telemQueryType genSql action = do execQueryOrMut telemQueryType genSql action = do
logOpEv ODStarted (Just reqId) logOpEv ODStarted (Just reqId)
-- log the generated SQL and the graphql query -- log the generated SQL and the graphql query
@ -459,7 +467,7 @@ onStart serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
-- if it's not a subscription, use HTTP to execute the query on the remote -- if it's not a subscription, use HTTP to execute the query on the remote
(runExceptT $ flip runReaderT execCtx $ (runExceptT $ flip runReaderT execCtx $
E.execRemoteGQ reqId userInfo reqHdrs q rsi opDef) >>= \case E.execRemoteGQ env reqId userInfo reqHdrs q rsi opDef) >>= \case
Left err -> postExecErr reqId err Left err -> postExecErr reqId err
Right (telemTimeIO_DT, !val) -> do Right (telemTimeIO_DT, !val) -> do
-- Telemetry. NOTE: don't time network IO: -- Telemetry. NOTE: don't time network IO:
@ -546,11 +554,17 @@ onStart serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
catchAndIgnore m = void $ runExceptT m catchAndIgnore m = void $ runExceptT m
onMessage onMessage
:: (HasVersion, MonadIO m, UserAuthentication m, E.MonadGQLExecutionCheck m, MonadQueryLog m) :: ( HasVersion
=> AuthMode , MonadIO m
, UserAuthentication m
, E.MonadGQLExecutionCheck m
, MonadQueryLog m
)
=> Env.Environment
-> AuthMode
-> WSServerEnv -> WSServerEnv
-> WSConn -> BL.ByteString -> m () -> WSConn -> BL.ByteString -> m ()
onMessage authMode serverEnv wsConn msgRaw = onMessage env authMode serverEnv wsConn msgRaw = do
case J.eitherDecode msgRaw of case J.eitherDecode msgRaw of
Left e -> do Left e -> do
let err = ConnErrMsg $ "parsing ClientMessage failed: " <> T.pack e let err = ConnErrMsg $ "parsing ClientMessage failed: " <> T.pack e
@ -561,7 +575,7 @@ onMessage authMode serverEnv wsConn msgRaw =
CMConnInit params -> onConnInit (_wseLogger serverEnv) CMConnInit params -> onConnInit (_wseLogger serverEnv)
(_wseHManager serverEnv) (_wseHManager serverEnv)
wsConn authMode params wsConn authMode params
CMStart startMsg -> onStart serverEnv wsConn startMsg CMStart startMsg -> onStart env serverEnv wsConn startMsg
CMStop stopMsg -> liftIO $ onStop serverEnv wsConn stopMsg CMStop stopMsg -> liftIO $ onStop serverEnv wsConn stopMsg
-- The idea is cleanup will be handled by 'onClose', but... -- The idea is cleanup will be handled by 'onClose', but...
-- NOTE: we need to close the websocket connection when we receive the -- NOTE: we need to close the websocket connection when we receive the
@ -571,6 +585,7 @@ onMessage authMode serverEnv wsConn msgRaw =
where where
logger = _wseLogger serverEnv logger = _wseLogger serverEnv
onStop :: WSServerEnv -> WSConn -> StopMsg -> IO () onStop :: WSServerEnv -> WSConn -> StopMsg -> IO ()
onStop serverEnv wsConn (StopMsg opId) = do onStop serverEnv wsConn (StopMsg opId) = do
-- When a stop message is received for an operation, it may not be present in OpMap -- When a stop message is received for an operation, it may not be present in OpMap
@ -642,7 +657,7 @@ onConnInit logger manager wsConn authMode connParamsM = do
let headers = mkHeaders connState let headers = mkHeaders connState
res <- resolveUserInfo logger manager headers authMode res <- resolveUserInfo logger manager headers authMode
case res of case res of
Left e -> do Left e -> do
let !initErr = CSInitError $ qeError e let !initErr = CSInitError $ qeError e
liftIO $ do liftIO $ do
-- TODO(PDV) disabled for now; printing odd errors: $assertNFHere initErr -- so we don't write thunks to mutable vars -- TODO(PDV) disabled for now; printing odd errors: $assertNFHere initErr -- so we don't write thunks to mutable vars
@ -651,8 +666,9 @@ onConnInit logger manager wsConn authMode connParamsM = do
let connErr = ConnErrMsg $ qeError e let connErr = ConnErrMsg $ qeError e
logWSEvent logger wsConn $ EConnErr connErr logWSEvent logger wsConn $ EConnErr connErr
sendMsg wsConn $ SMConnErr connErr sendMsg wsConn $ SMConnErr connErr
Right (userInfo, expTimeM) -> do Right (userInfo, expTimeM) -> do
let !csInit = CSInitialised $ WsClientState userInfo expTimeM paramHeaders ipAddress let !csInit = CSInitialised $ WsClientState userInfo expTimeM paramHeaders ipAddress
liftIO $ do liftIO $ do
-- TODO(PDV) disabled for now; printing odd errors: $assertNFHere csInit -- so we don't write thunks to mutable vars -- TODO(PDV) disabled for now; printing odd errors: $assertNFHere csInit -- so we don't write thunks to mutable vars
STM.atomically $ STM.writeTVar (_wscUser $ WS.getData wsConn) csInit STM.atomically $ STM.writeTVar (_wscUser $ WS.getData wsConn) csInit
@ -710,11 +726,11 @@ createWSServerEnv
-> Bool -> Bool
-> E.PlanCache -> E.PlanCache
-> m WSServerEnv -> m WSServerEnv
createWSServerEnv logger pgExecCtx lqState getSchemaCache httpManager createWSServerEnv logger isPgCtx lqState getSchemaCache httpManager
corsPolicy sqlGenCtx enableAL planCache = do corsPolicy sqlGenCtx enableAL planCache = do
wsServer <- liftIO $ STM.atomically $ WS.createWSServer logger wsServer <- liftIO $ STM.atomically $ WS.createWSServer logger
return $ return $
WSServerEnv logger pgExecCtx lqState getSchemaCache httpManager corsPolicy WSServerEnv logger isPgCtx lqState getSchemaCache httpManager corsPolicy
sqlGenCtx planCache wsServer enableAL sqlGenCtx planCache wsServer enableAL
createWSServerApp createWSServerApp
@ -723,22 +739,23 @@ createWSServerApp
, MC.MonadBaseControl IO m , MC.MonadBaseControl IO m
, LA.Forall (LA.Pure m) , LA.Forall (LA.Pure m)
, UserAuthentication m , UserAuthentication m
, WS.MonadWSLog m
, E.MonadGQLExecutionCheck m , E.MonadGQLExecutionCheck m
, WS.MonadWSLog m
, MonadQueryLog m , MonadQueryLog m
) )
=> AuthMode => Env.Environment
-> AuthMode
-> WSServerEnv -> WSServerEnv
-> WS.HasuraServerApp m -> WS.HasuraServerApp m
-- ^ aka generalized 'WS.ServerApp' -- -- ^ aka generalized 'WS.ServerApp'
createWSServerApp authMode serverEnv = \ !ipAddress !pendingConn -> createWSServerApp env authMode serverEnv = \ !ipAddress !pendingConn ->
WS.createServerApp (_wseServer serverEnv) handlers ipAddress pendingConn WS.createServerApp (_wseServer serverEnv) handlers ipAddress pendingConn
where where
handlers = handlers =
WS.WSHandlers WS.WSHandlers
-- Mask async exceptions during event processing to help maintain integrity of mutable vars: -- Mask async exceptions during event processing to help maintain integrity of mutable vars:
(\rid rh ip -> mask_ $ onConn (_wseLogger serverEnv) (_wseCorsPolicy serverEnv) rid rh ip) (\rid rh ip -> mask_ $ onConn (_wseLogger serverEnv) (_wseCorsPolicy serverEnv) rid rh ip)
(\conn bs -> mask_ $ onMessage authMode serverEnv conn bs) (\conn bs -> mask_ $ onMessage env authMode serverEnv conn bs)
(\conn -> mask_ $ onClose (_wseLogger serverEnv) (_wseLiveQMap serverEnv) conn) (\conn -> mask_ $ onClose (_wseLogger serverEnv) (_wseLiveQMap serverEnv) conn)
stopWSServerApp :: WSServerEnv -> IO () stopWSServerApp :: WSServerEnv -> IO ()

View File

@ -8,6 +8,7 @@ module Hasura.GraphQL.Transport.WebSocket.Protocol
, ServerMsg(..) , ServerMsg(..)
, ServerMsgType(..) , ServerMsgType(..)
, encodeServerMsg , encodeServerMsg
, serverMsgType
, DataMsg(..) , DataMsg(..)
, ErrorMsg(..) , ErrorMsg(..)
, ConnErrMsg(..) , ConnErrMsg(..)
@ -115,6 +116,14 @@ instance Show ServerMsgType where
instance J.ToJSON ServerMsgType where instance J.ToJSON ServerMsgType where
toJSON = J.toJSON . show toJSON = J.toJSON . show
serverMsgType :: ServerMsg -> ServerMsgType
serverMsgType SMConnAck = SMT_GQL_CONNECTION_ACK
serverMsgType SMConnKeepAlive = SMT_GQL_CONNECTION_KEEP_ALIVE
serverMsgType (SMConnErr _) = SMT_GQL_CONNECTION_ERROR
serverMsgType (SMData _) = SMT_GQL_DATA
serverMsgType (SMErr _) = SMT_GQL_ERROR
serverMsgType (SMComplete _) = SMT_GQL_COMPLETE
encodeServerMsg :: ServerMsg -> BL.ByteString encodeServerMsg :: ServerMsg -> BL.ByteString
encodeServerMsg msg = encodeServerMsg msg =
encJToLBS $ encJFromAssocList $ case msg of encJToLBS $ encJFromAssocList $ case msg of

View File

@ -3,7 +3,8 @@
module Hasura.GraphQL.Transport.WebSocket.Server module Hasura.GraphQL.Transport.WebSocket.Server
( WSId(..) ( WSId(..)
, WSLog(..)
, WSEvent(..)
, WSConn , WSConn
, getData , getData
, getWSId , getWSId
@ -17,6 +18,7 @@ module Hasura.GraphQL.Transport.WebSocket.Server
, WSHandlers(..) , WSHandlers(..)
, WSServer , WSServer
, HasuraServerApp
, WSEventInfo(..) , WSEventInfo(..)
, WSQueueResponse(..) , WSQueueResponse(..)
, ServerMsgType(..) , ServerMsgType(..)
@ -26,9 +28,6 @@ module Hasura.GraphQL.Transport.WebSocket.Server
, shutdown , shutdown
, MonadWSLog (..) , MonadWSLog (..)
, HasuraServerApp
, WSEvent(..)
, WSLog(..)
) where ) where
import qualified Control.Concurrent.Async as A import qualified Control.Concurrent.Async as A
@ -225,6 +224,9 @@ type OnConnH m a = WSId -> WS.RequestHead -> IpAddress -> m (Either WS.Reject
type OnCloseH m a = WSConn a -> m () type OnCloseH m a = WSConn a -> m ()
type OnMessageH m a = WSConn a -> BL.ByteString -> m () type OnMessageH m a = WSConn a -> BL.ByteString -> m ()
-- | aka generalized 'WS.ServerApp' over @m@, which takes an IPAddress
type HasuraServerApp m = IpAddress -> WS.PendingConnection -> m ()
data WSHandlers m a data WSHandlers m a
= WSHandlers = WSHandlers
{ _hOnConn :: OnConnH m a { _hOnConn :: OnConnH m a
@ -232,16 +234,13 @@ data WSHandlers m a
, _hOnClose :: OnCloseH m a , _hOnClose :: OnCloseH m a
} }
-- | aka generalized 'WS.ServerApp' over @m@, which takes an IPAddress
type HasuraServerApp m = IpAddress -> WS.PendingConnection -> m ()
createServerApp createServerApp
:: (MonadIO m, MC.MonadBaseControl IO m, LA.Forall (LA.Pure m), MonadWSLog m) :: (MonadIO m, MC.MonadBaseControl IO m, LA.Forall (LA.Pure m), MonadWSLog m)
=> WSServer a => WSServer a
-- user provided handlers
-> WSHandlers m a -> WSHandlers m a
-- aka WS.ServerApp -- ^ user provided handlers
-> HasuraServerApp m -> HasuraServerApp m
-- ^ aka WS.ServerApp
{-# INLINE createServerApp #-} {-# INLINE createServerApp #-}
createServerApp (WSServer logger@(L.Logger writeLog) serverStatus) wsHandlers !ipAddress !pendingConn = do createServerApp (WSServer logger@(L.Logger writeLog) serverStatus) wsHandlers !ipAddress !pendingConn = do
wsId <- WSId <$> liftIO UUID.nextRandom wsId <- WSId <$> liftIO UUID.nextRandom
@ -261,7 +260,7 @@ createServerApp (WSServer logger@(L.Logger writeLog) serverStatus) wsHandlers !i
-- least log properly and re-raise: -- least log properly and re-raise:
logUnexpectedExceptions = handle $ \(e :: SomeException) -> do logUnexpectedExceptions = handle $ \(e :: SomeException) -> do
writeLog $ L.UnstructuredLog L.LevelError $ fromString $ writeLog $ L.UnstructuredLog L.LevelError $ fromString $
"Unexpected exception raised in websocket. Please report this as a bug: "<>show e "Unexpected exception raised in websocket. Please report this as a bug: " <> show e
throwIO e throwIO e
shuttingDownReject = shuttingDownReject =

View File

@ -795,7 +795,7 @@ instance (MonadReusability m) => MonadReusability (StateT s m) where
markNotReusable = lift markNotReusable markNotReusable = lift markNotReusable
newtype ReusabilityT m a = ReusabilityT { unReusabilityT :: StateT QueryReusability m a } newtype ReusabilityT m a = ReusabilityT { unReusabilityT :: StateT QueryReusability m a }
deriving (Functor, Applicative, Monad, MonadError e, MonadReader r, MonadIO) deriving (Functor, Applicative, Monad, MonadError e, MonadReader r, MonadIO, MonadTrans)
instance (Monad m) => MonadReusability (ReusabilityT m) where instance (Monad m) => MonadReusability (ReusabilityT m) where
recordVariableUse varName varType = ReusabilityT $ recordVariableUse varName varType = ReusabilityT $

View File

@ -27,15 +27,14 @@ import Hasura.EncJSON
import Hasura.GraphQL.Utils import Hasura.GraphQL.Utils
import Hasura.Prelude import Hasura.Prelude
import Hasura.RQL.Types import Hasura.RQL.Types
import Data.URL.Template
import Hasura.Session import Hasura.Session
import Hasura.SQL.Types import Hasura.SQL.Types
import qualified Data.Aeson as J import qualified Data.Aeson as J
import qualified Data.Aeson.Casing as J import qualified Data.Aeson.Casing as J
import qualified Data.Aeson.TH as J import qualified Data.Aeson.TH as J
import qualified Data.Environment as Env
import qualified Data.HashMap.Strict as Map import qualified Data.HashMap.Strict as Map
import qualified Data.Text as T
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
import qualified Language.GraphQL.Draft.Syntax as G import qualified Language.GraphQL.Draft.Syntax as G
@ -76,7 +75,7 @@ persistCreateAction (CreateAction actionName actionDefinition comment) = do
VALUES ($1, $2, $3) VALUES ($1, $2, $3)
|] (actionName, Q.AltJ actionDefinition, comment) True |] (actionName, Q.AltJ actionDefinition, comment) True
{- Note [Postgres scalars in action input arguments] {-| Note [Postgres scalars in action input arguments]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It's very comfortable to be able to reference Postgres scalars in actions It's very comfortable to be able to reference Postgres scalars in actions
input arguments. For example, see the following action mutation: input arguments. For example, see the following action mutation:
@ -95,14 +94,15 @@ referred scalars.
-} -}
resolveAction resolveAction
:: (QErrM m, MonadIO m) :: QErrM m
=> AnnotatedCustomTypes => Env.Environment
-> AnnotatedCustomTypes
-> ActionDefinitionInput -> ActionDefinitionInput
-> HashSet PGScalarType -- See Note [Postgres scalars in custom types] -> HashSet PGScalarType -- See Note [Postgres scalars in custom types]
-> m ( ResolvedActionDefinition -> m ( ResolvedActionDefinition
, AnnotatedObjectType , AnnotatedObjectType
) )
resolveAction AnnotatedCustomTypes{..} ActionDefinition{..} allPGScalars = do resolveAction env AnnotatedCustomTypes{..} ActionDefinition{..} allPGScalars = do
resolvedArguments <- forM _adArguments $ \argumentDefinition -> do resolvedArguments <- forM _adArguments $ \argumentDefinition -> do
forM argumentDefinition $ \argumentType -> do forM argumentDefinition $ \argumentType -> do
let gType = unGraphQLType argumentType let gType = unGraphQLType argumentType
@ -123,16 +123,12 @@ resolveAction AnnotatedCustomTypes{..} ActionDefinition{..} allPGScalars = do
outputObject <- onNothing (Map.lookup outputBaseType _actObjects) $ outputObject <- onNothing (Map.lookup outputBaseType _actObjects) $
throw400 NotExists $ "the type: " <> showName outputBaseType throw400 NotExists $ "the type: " <> showName outputBaseType
<> " is not an object type defined in custom types" <> " is not an object type defined in custom types"
resolvedWebhook <- resolveWebhook _adHandler resolvedWebhook <- resolveWebhook env _adHandler
pure ( ActionDefinition resolvedArguments _adOutputType _adType pure ( ActionDefinition resolvedArguments _adOutputType _adType
_adHeaders _adForwardClientHeaders resolvedWebhook _adHeaders _adForwardClientHeaders resolvedWebhook
, outputObject , outputObject
) )
where where
resolveWebhook (InputWebhook urlTemplate) = do
eitherRenderedTemplate <- renderURLTemplate urlTemplate
either (throw400 Unexpected . T.pack) (pure . ResolvedWebhook) eitherRenderedTemplate
lookupPGScalar baseType = -- see Note [Postgres scalars in custom types] lookupPGScalar baseType = -- see Note [Postgres scalars in custom types]
fmap (flip ScalarTypeDefinition Nothing) $ fmap (flip ScalarTypeDefinition Nothing) $
find ((==) baseType) $ mapMaybe (G.mkName . toSQLTxt) $ find ((==) baseType) $ mapMaybe (G.mkName . toSQLTxt) $
@ -225,8 +221,9 @@ resolveAction
-> m ( ResolvedActionDefinition -> m ( ResolvedActionDefinition
, AnnotatedObjectType , AnnotatedObjectType
, HashSet PGScalarType , HashSet PGScalarType
) -- ^ see Note [Postgres scalars in action input arguments]. -- ^ see Note [Postgres scalars in action input arguments].
resolveAction customTypes allPGScalars actionDefinition = do )
resolveAction env customTypes allPGScalars actionDefinition = do
let responseType = unGraphQLType $ _adOutputType actionDefinition let responseType = unGraphQLType $ _adOutputType actionDefinition
responseBaseType = G.getBaseType responseType responseBaseType = G.getBaseType responseType
@ -253,7 +250,7 @@ resolveAction customTypes allPGScalars actionDefinition = do
-- Check if the response type is an object -- Check if the response type is an object
outputObject <- getObjectTypeInfo responseBaseType outputObject <- getObjectTypeInfo responseBaseType
resolvedDef <- traverse resolveWebhook actionDefinition resolvedDef <- traverse (resolveWebhook env) actionDefinition
pure (resolvedDef, outputObject, reusedPGScalars) pure (resolvedDef, outputObject, reusedPGScalars)
where where
getNonObjectTypeInfo typeName = getNonObjectTypeInfo typeName =

View File

@ -20,7 +20,6 @@ module Hasura.RQL.DDL.EventTrigger
) where ) where
import Data.Aeson import Data.Aeson
import System.Environment (lookupEnv)
import Hasura.EncJSON import Hasura.EncJSON
import Hasura.Prelude import Hasura.Prelude
@ -32,6 +31,7 @@ import Hasura.SQL.Types
import qualified Hasura.SQL.DML as S import qualified Hasura.SQL.DML as S
import qualified Data.Text as T import qualified Data.Text as T
import qualified Data.Environment as Env
import qualified Data.Text.Lazy as TL import qualified Data.Text.Lazy as TL
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
import qualified Text.Shakespeare.Text as ST import qualified Text.Shakespeare.Text as ST
@ -208,16 +208,19 @@ subTableP1 (CreateEventTriggerQuery name qt insert update delete enableManual re
SubCArray pgcols -> forM_ pgcols (assertPGCol (_tciFieldInfoMap ti) "") SubCArray pgcols -> forM_ pgcols (assertPGCol (_tciFieldInfoMap ti) "")
subTableP2Setup subTableP2Setup
:: (QErrM m, MonadIO m) :: QErrM m
=> QualifiedTable -> EventTriggerConf -> m (EventTriggerInfo, [SchemaDependency]) => Env.Environment
subTableP2Setup qt (EventTriggerConf name def webhook webhookFromEnv rconf mheaders) = do -> QualifiedTable
-> EventTriggerConf
-> m (EventTriggerInfo, [SchemaDependency])
subTableP2Setup env qt (EventTriggerConf name def webhook webhookFromEnv rconf mheaders) = do
webhookConf <- case (webhook, webhookFromEnv) of webhookConf <- case (webhook, webhookFromEnv) of
(Just w, Nothing) -> return $ WCValue w (Just w, Nothing) -> return $ WCValue w
(Nothing, Just wEnv) -> return $ WCEnv wEnv (Nothing, Just wEnv) -> return $ WCEnv wEnv
_ -> throw500 "expected webhook or webhook_from_env" _ -> throw500 "expected webhook or webhook_from_env"
let headerConfs = fromMaybe [] mheaders let headerConfs = fromMaybe [] mheaders
webhookInfo <- getWebhookInfoFromConf webhookConf webhookInfo <- getWebhookInfoFromConf env webhookConf
headerInfos <- getHeaderInfosFromConf headerConfs headerInfos <- getHeaderInfosFromConf env headerConfs
let eTrigInfo = EventTriggerInfo name def rconf webhookInfo headerInfos let eTrigInfo = EventTriggerInfo name def rconf webhookInfo headerInfos
tabDep = SchemaDependency (SOTable qt) DRParent tabDep = SchemaDependency (SOTable qt) DRParent
pure (eTrigInfo, tabDep:getTrigDefDeps qt def) pure (eTrigInfo, tabDep:getTrigDefDeps qt def)
@ -310,30 +313,35 @@ runInvokeEventTrigger (InvokeEventTriggerQuery name payload) = do
_ -> throw400 NotSupported "manual mode is not enabled for event trigger" _ -> throw400 NotSupported "manual mode is not enabled for event trigger"
getHeaderInfosFromConf getHeaderInfosFromConf
:: (QErrM m, MonadIO m) :: QErrM m
=> [HeaderConf] -> m [EventHeaderInfo] => Env.Environment
getHeaderInfosFromConf = mapM getHeader -> [HeaderConf]
-> m [EventHeaderInfo]
getHeaderInfosFromConf env = mapM getHeader
where where
getHeader :: (QErrM m, MonadIO m) => HeaderConf -> m EventHeaderInfo getHeader :: QErrM m => HeaderConf -> m EventHeaderInfo
getHeader hconf = case hconf of getHeader hconf = case hconf of
(HeaderConf _ (HVValue val)) -> return $ EventHeaderInfo hconf val (HeaderConf _ (HVValue val)) -> return $ EventHeaderInfo hconf val
(HeaderConf _ (HVEnv val)) -> do (HeaderConf _ (HVEnv val)) -> do
envVal <- getEnv val envVal <- getEnv env val
return $ EventHeaderInfo hconf envVal return $ EventHeaderInfo hconf envVal
getWebhookInfoFromConf getWebhookInfoFromConf
:: (QErrM m, MonadIO m) => WebhookConf -> m WebhookConfInfo :: QErrM m
getWebhookInfoFromConf wc = case wc of => Env.Environment
-> WebhookConf
-> m WebhookConfInfo
getWebhookInfoFromConf env wc = case wc of
WCValue w -> return $ WebhookConfInfo wc w WCValue w -> return $ WebhookConfInfo wc w
WCEnv we -> do WCEnv we -> do
envVal <- getEnv we envVal <- getEnv env we
return $ WebhookConfInfo wc envVal return $ WebhookConfInfo wc envVal
getEnv :: (QErrM m, MonadIO m) => T.Text -> m T.Text getEnv :: QErrM m => Env.Environment -> T.Text -> m T.Text
getEnv env = do getEnv env k = do
mEnv <- liftIO $ lookupEnv (T.unpack env) let mEnv = Env.lookupEnv env (T.unpack k)
case mEnv of case mEnv of
Nothing -> throw400 NotFound $ "environment variable '" <> env <> "' not set" Nothing -> throw400 NotFound $ "environment variable '" <> k <> "' not set"
Just envVal -> return (T.pack envVal) Just envVal -> return (T.pack envVal)
getEventTriggerDef getEventTriggerDef

View File

@ -6,10 +6,10 @@ import Hasura.Prelude
import Hasura.RQL.Instances () import Hasura.RQL.Instances ()
import Hasura.RQL.Types.Error import Hasura.RQL.Types.Error
import Language.Haskell.TH.Syntax (Lift) import Language.Haskell.TH.Syntax (Lift)
import System.Environment (lookupEnv)
import qualified Data.CaseInsensitive as CI import qualified Data.CaseInsensitive as CI
import qualified Data.Text as T import qualified Data.Text as T
import qualified Data.Environment as Env
import qualified Network.HTTP.Types as HTTP import qualified Network.HTTP.Types as HTTP
@ -46,15 +46,15 @@ instance ToJSON HeaderConf where
-- | Resolve configuration headers -- | Resolve configuration headers
makeHeadersFromConf makeHeadersFromConf
:: (MonadError QErr m, MonadIO m) => [HeaderConf] -> m [HTTP.Header] :: MonadError QErr m => Env.Environment -> [HeaderConf] -> m [HTTP.Header]
makeHeadersFromConf = mapM getHeader makeHeadersFromConf env = mapM getHeader
where where
getHeader hconf = getHeader hconf =
((CI.mk . txtToBs) *** txtToBs) <$> ((CI.mk . txtToBs) *** txtToBs) <$>
case hconf of case hconf of
(HeaderConf name (HVValue val)) -> return (name, val) (HeaderConf name (HVValue val)) -> return (name, val)
(HeaderConf name (HVEnv val)) -> do (HeaderConf name (HVEnv val)) -> do
mEnv <- liftIO $ lookupEnv (T.unpack val) let mEnv = Env.lookupEnv env (T.unpack val)
case mEnv of case mEnv of
Nothing -> throw400 NotFound $ "environment variable '" <> val <> "' not set" Nothing -> throw400 NotFound $ "environment variable '" <> val <> "' not set"
Just envval -> pure (name, T.pack envval) Just envval -> pure (name, T.pack envval)

View File

@ -6,7 +6,6 @@ where
import Hasura.Prelude import Hasura.Prelude
import qualified Data.Aeson as J import qualified Data.Aeson as J
import Data.List.Extended (duplicates)
import qualified Data.Text as T import qualified Data.Text as T
import qualified Data.Vector as V import qualified Data.Vector as V
import qualified Language.GraphQL.Draft.Parser as G import qualified Language.GraphQL.Draft.Parser as G
@ -15,6 +14,8 @@ import qualified Language.Haskell.TH.Syntax as TH
import qualified Network.URI as N import qualified Network.URI as N
import qualified System.Cron.Parser as Cr import qualified System.Cron.Parser as Cr
import Data.List.Extended (duplicates)
import Data.Scientific import Data.Scientific
import System.Cron.Types import System.Cron.Types
import Test.QuickCheck import Test.QuickCheck

View File

@ -28,6 +28,8 @@ import Hasura.RQL.Types
import Hasura.Server.Version (HasVersion) import Hasura.Server.Version (HasVersion)
import Hasura.SQL.Types import Hasura.SQL.Types
import qualified Data.Environment as Env
runAddRemoteSchema runAddRemoteSchema
:: ( HasVersion :: ( HasVersion
, QErrM m , QErrM m
@ -37,10 +39,12 @@ runAddRemoteSchema
, MonadUnique m , MonadUnique m
, HasHttpManager m , HasHttpManager m
) )
=> AddRemoteSchemaQuery -> m EncJSON => Env.Environment
runAddRemoteSchema q = do -> AddRemoteSchemaQuery
-> m EncJSON
runAddRemoteSchema env q = do
addRemoteSchemaP1 name addRemoteSchemaP1 name
addRemoteSchemaP2 q addRemoteSchemaP2 env q
buildSchemaCacheFor $ MORemoteSchema name buildSchemaCacheFor $ MORemoteSchema name
pure successMsg pure successMsg
where where
@ -57,16 +61,17 @@ addRemoteSchemaP1 name = do
addRemoteSchemaP2Setup addRemoteSchemaP2Setup
:: (HasVersion, QErrM m, MonadIO m, MonadUnique m, HasHttpManager m) :: (HasVersion, QErrM m, MonadIO m, MonadUnique m, HasHttpManager m)
=> AddRemoteSchemaQuery -> m RemoteSchemaCtx => Env.Environment
addRemoteSchemaP2Setup (AddRemoteSchemaQuery name def _) = do -> AddRemoteSchemaQuery -> m RemoteSchemaCtx
addRemoteSchemaP2Setup env (AddRemoteSchemaQuery name def _) = do
httpMgr <- askHttpManager httpMgr <- askHttpManager
rsi <- validateRemoteSchemaDef def rsi <- validateRemoteSchemaDef env def
fetchRemoteSchema httpMgr name rsi fetchRemoteSchema env httpMgr name rsi
addRemoteSchemaP2 addRemoteSchemaP2
:: (HasVersion, MonadTx m, MonadIO m, MonadUnique m, HasHttpManager m) => AddRemoteSchemaQuery -> m () :: (HasVersion, MonadTx m, MonadIO m, MonadUnique m, HasHttpManager m) => Env.Environment -> AddRemoteSchemaQuery -> m ()
addRemoteSchemaP2 q = do addRemoteSchemaP2 env q = do
void $ addRemoteSchemaP2Setup q void $ addRemoteSchemaP2Setup env q
liftTx $ addRemoteSchemaToCatalog q liftTx $ addRemoteSchemaToCatalog q
runRemoveRemoteSchema runRemoveRemoteSchema

View File

@ -18,6 +18,7 @@ import Hasura.Eventing.ScheduledTrigger
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
import qualified Data.Time.Clock as C import qualified Data.Time.Clock as C
import qualified Data.HashMap.Strict as Map import qualified Data.HashMap.Strict as Map
import qualified Data.Environment as Env
-- | runCreateCronTrigger will update a existing cron trigger when the 'replace' -- | runCreateCronTrigger will update a existing cron trigger when the 'replace'
-- value is set to @true@ and when replace is @false@ a new cron trigger will -- value is set to @true@ and when replace is @false@ a new cron trigger will
@ -61,11 +62,13 @@ addCronTriggerToCatalog CronTriggerMetadata {..} = liftTx $ do
insertCronEvents $ map (CronEventSeed ctName) scheduleTimes insertCronEvents $ map (CronEventSeed ctName) scheduleTimes
resolveCronTrigger resolveCronTrigger
:: (QErrM m, MonadIO m) :: (QErrM m)
=> CatalogCronTrigger -> m CronTriggerInfo => Env.Environment
resolveCronTrigger CatalogCronTrigger {..} = do -> CatalogCronTrigger
webhookInfo <- resolveWebhook _cctWebhookConf -> m CronTriggerInfo
headerInfo <- getHeaderInfosFromConf headers resolveCronTrigger env CatalogCronTrigger {..} = do
webhookInfo <- resolveWebhook env _cctWebhookConf
headerInfo <- getHeaderInfosFromConf env headers
pure $ pure $
CronTriggerInfo _cctName CronTriggerInfo _cctName
_cctCronSchedule _cctCronSchedule

View File

@ -23,6 +23,7 @@ import Hasura.Prelude
import qualified Data.HashMap.Strict.Extended as M import qualified Data.HashMap.Strict.Extended as M
import qualified Data.HashSet as HS import qualified Data.HashSet as HS
import qualified Data.Text as T import qualified Data.Text as T
import qualified Data.Environment as Env
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
import Control.Arrow.Extended import Control.Arrow.Extended
@ -59,11 +60,12 @@ import Hasura.SQL.Types
buildRebuildableSchemaCache buildRebuildableSchemaCache
:: (HasVersion, MonadIO m, MonadUnique m, MonadTx m, HasHttpManager m, HasSQLGenCtx m) :: (HasVersion, MonadIO m, MonadUnique m, MonadTx m, HasHttpManager m, HasSQLGenCtx m)
=> m (RebuildableSchemaCache m) => Env.Environment
buildRebuildableSchemaCache = do -> m (RebuildableSchemaCache m)
buildRebuildableSchemaCache env = do
catalogMetadata <- liftTx fetchCatalogData catalogMetadata <- liftTx fetchCatalogData
result <- flip runReaderT CatalogSync $ result <- flip runReaderT CatalogSync $
Inc.build buildSchemaCacheRule (catalogMetadata, initialInvalidationKeys) Inc.build (buildSchemaCacheRule env) (catalogMetadata, initialInvalidationKeys)
pure $ RebuildableSchemaCache (Inc.result result) initialInvalidationKeys (Inc.rebuildRule result) pure $ RebuildableSchemaCache (Inc.result result) initialInvalidationKeys (Inc.rebuildRule result)
newtype CacheRWT m a newtype CacheRWT m a
@ -113,8 +115,9 @@ buildSchemaCacheRule
:: ( HasVersion, ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr :: ( HasVersion, ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
, MonadIO m, MonadUnique m, MonadTx m , MonadIO m, MonadUnique m, MonadTx m
, MonadReader BuildReason m, HasHttpManager m, HasSQLGenCtx m ) , MonadReader BuildReason m, HasHttpManager m, HasSQLGenCtx m )
=> (CatalogMetadata, InvalidationKeys) `arr` SchemaCache => Env.Environment
buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do -> (CatalogMetadata, InvalidationKeys) `arr` SchemaCache
buildSchemaCacheRule env = proc (catalogMetadata, invalidationKeys) -> do
invalidationKeysDep <- Inc.newDependency -< invalidationKeys invalidationKeysDep <- Inc.newDependency -< invalidationKeys
-- Step 1: Process metadata and collect dependency information. -- Step 1: Process metadata and collect dependency information.
@ -318,7 +321,7 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
buildTableEventTriggers buildTableEventTriggers
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr :: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
, Inc.ArrowCache m arr, MonadIO m, MonadTx m, MonadReader BuildReason m, HasSQLGenCtx m ) , Inc.ArrowCache m arr, MonadTx m, MonadReader BuildReason m, HasSQLGenCtx m )
=> (TableCoreInfo, [CatalogEventTrigger]) `arr` EventTriggerInfoMap => (TableCoreInfo, [CatalogEventTrigger]) `arr` EventTriggerInfoMap
buildTableEventTriggers = buildInfoMap _cetName mkEventTriggerMetadataObject buildEventTrigger buildTableEventTriggers = buildInfoMap _cetName mkEventTriggerMetadataObject buildEventTrigger
where where
@ -330,7 +333,7 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
(| withRecordInconsistency ( (| withRecordInconsistency (
(| modifyErrA (do (| modifyErrA (do
etc <- bindErrorA -< decodeValue configuration etc <- bindErrorA -< decodeValue configuration
(info, dependencies) <- bindErrorA -< subTableP2Setup qt etc (info, dependencies) <- bindErrorA -< subTableP2Setup env qt etc
let tableColumns = M.mapMaybe (^? _FIColumn) (_tciFieldInfoMap tableInfo) let tableColumns = M.mapMaybe (^? _FIColumn) (_tciFieldInfoMap tableInfo)
recreateViewIfNeeded -< (qt, tableColumns, trn, etcDefinition etc) recreateViewIfNeeded -< (qt, tableColumns, trn, etcDefinition etc)
recordDependencies -< (metadataObject, schemaObjectId, dependencies) recordDependencies -< (metadataObject, schemaObjectId, dependencies)
@ -345,6 +348,45 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
liftTx $ delTriggerQ triggerName -- executes DROP IF EXISTS.. sql liftTx $ delTriggerQ triggerName -- executes DROP IF EXISTS.. sql
mkAllTriggersQ triggerName tableName (M.elems tableColumns) triggerDefinition mkAllTriggersQ triggerName tableName (M.elems tableColumns) triggerDefinition
buildCronTriggers
:: ( ArrowChoice arr
, Inc.ArrowDistribute arr
, ArrowWriter (Seq CollectedInfo) arr
, Inc.ArrowCache m arr
, MonadTx m)
=> ((),[CatalogCronTrigger])
`arr` HashMap TriggerName CronTriggerInfo
buildCronTriggers = buildInfoMap _cctName mkCronTriggerMetadataObject buildCronTrigger
where
buildCronTrigger = proc (_,cronTrigger) -> do
let triggerName = triggerNameToTxt $ _cctName cronTrigger
addCronTriggerContext e = "in cron trigger " <> triggerName <> ": " <> e
(| withRecordInconsistency (
(| modifyErrA (bindErrorA -< resolveCronTrigger env cronTrigger)
|) addCronTriggerContext)
|) (mkCronTriggerMetadataObject cronTrigger)
buildActions
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
, ArrowWriter (Seq CollectedInfo) arr)
=> ( (AnnotatedCustomTypes, HashSet PGScalarType)
, [ActionMetadata]
) `arr` HashMap ActionName ActionInfo
buildActions = buildInfoMap _amName mkActionMetadataObject buildAction
where
buildAction = proc ((resolvedCustomTypes, pgScalars), action) -> do
let ActionMetadata name comment def actionPermissions = action
addActionContext e = "in action " <> name <<> "; " <> e
(| withRecordInconsistency (
(| modifyErrA (do
(resolvedDef, outObject) <- liftEitherA <<< bindA -<
runExceptT $ resolveAction env resolvedCustomTypes def pgScalars
let permissionInfos = map (ActionPermissionInfo . _apmRole) actionPermissions
permissionMap = mapFromL _apiRole permissionInfos
returnA -< ActionInfo name outObject resolvedDef permissionMap comment)
|) addActionContext)
|) (mkActionMetadataObject action)
buildRemoteSchemas buildRemoteSchemas
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr :: ( ArrowChoice arr, Inc.ArrowDistribute arr, ArrowWriter (Seq CollectedInfo) arr
, Inc.ArrowCache m arr , MonadIO m, MonadUnique m, HasHttpManager m ) , Inc.ArrowCache m arr , MonadIO m, MonadUnique m, HasHttpManager m )
@ -359,48 +401,9 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
buildRemoteSchema = Inc.cache proc (invalidationKeys, remoteSchema) -> do buildRemoteSchema = Inc.cache proc (invalidationKeys, remoteSchema) -> do
Inc.dependOn -< Inc.selectKeyD (_arsqName remoteSchema) invalidationKeys Inc.dependOn -< Inc.selectKeyD (_arsqName remoteSchema) invalidationKeys
(| withRecordInconsistency (liftEitherA <<< bindA -< (| withRecordInconsistency (liftEitherA <<< bindA -<
runExceptT $ addRemoteSchemaP2Setup remoteSchema) runExceptT $ addRemoteSchemaP2Setup env remoteSchema)
|) (mkRemoteSchemaMetadataObject remoteSchema) |) (mkRemoteSchemaMetadataObject remoteSchema)
buildActions
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
, ArrowWriter (Seq CollectedInfo) arr, MonadIO m )
=> ( (AnnotatedCustomTypes, HashSet PGScalarType)
, [ActionMetadata]
) `arr` HashMap ActionName ActionInfo
buildActions = buildInfoMap _amName mkActionMetadataObject buildAction
where
buildAction = proc ((resolvedCustomTypes, pgScalars), action) -> do
let ActionMetadata name comment def actionPermissions = action
addActionContext e = "in action " <> name <<> "; " <> e
(| withRecordInconsistency (
(| modifyErrA (do
(resolvedDef, outObject) <- liftEitherA <<< bindA -<
runExceptT $ resolveAction resolvedCustomTypes def pgScalars
let permissionInfos = map (ActionPermissionInfo . _apmRole) actionPermissions
permissionMap = mapFromL _apiRole permissionInfos
returnA -< ActionInfo name outObject resolvedDef permissionMap comment)
|) addActionContext)
|) (mkActionMetadataObject action)
buildCronTriggers
:: ( ArrowChoice arr
, Inc.ArrowDistribute arr
, ArrowWriter (Seq CollectedInfo) arr
, Inc.ArrowCache m arr
, MonadIO m
, MonadTx m)
=> ((),[CatalogCronTrigger])
`arr` HashMap TriggerName CronTriggerInfo
buildCronTriggers = buildInfoMap _cctName mkCronTriggerMetadataObject buildCronTrigger
where
buildCronTrigger = proc (_,cronTrigger) -> do
let triggerName = triggerNameToTxt $ _cctName cronTrigger
addCronTriggerContext e = "in cron trigger " <> triggerName <> ": " <> e
(| withRecordInconsistency (
(| modifyErrA (bindErrorA -< resolveCronTrigger cronTrigger)
|) addCronTriggerContext)
|) (mkCronTriggerMetadataObject cronTrigger)
-- | @'withMetadataCheck' cascade action@ runs @action@ and checks if the schema changed as a -- | @'withMetadataCheck' cascade action@ runs @action@ and checks if the schema changed as a

View File

@ -12,6 +12,8 @@ import Data.Aeson
import Instances.TH.Lift () import Instances.TH.Lift ()
import qualified Data.Sequence as DS import qualified Data.Sequence as DS
import qualified Data.Environment as Env
import Hasura.EncJSON import Hasura.EncJSON
import Hasura.Prelude import Hasura.Prelude
@ -105,13 +107,18 @@ validateDeleteQ =
runDMLP1T . validateDeleteQWith sessVarFromCurrentSetting binRHSBuilder runDMLP1T . validateDeleteQWith sessVarFromCurrentSetting binRHSBuilder
execDeleteQuery execDeleteQuery
:: (HasVersion, MonadTx m, MonadIO m) ::
=> Bool ( HasVersion
, MonadTx m
, MonadIO m
)
=> Env.Environment
-> Bool
-> Maybe MutationRemoteJoinCtx -> Maybe MutationRemoteJoinCtx
-> (AnnDel, DS.Seq Q.PrepArg) -> (AnnDel, DS.Seq Q.PrepArg)
-> m EncJSON -> m EncJSON
execDeleteQuery strfyNum remoteJoinCtx (u, p) = execDeleteQuery env strfyNum remoteJoinCtx (u, p) =
runMutation $ mkMutation remoteJoinCtx (dqp1Table u) (deleteCTE, p) runMutation env $ mkMutation remoteJoinCtx (dqp1Table u) (deleteCTE, p)
(dqp1Output u) (dqp1AllCols u) strfyNum (dqp1Output u) (dqp1AllCols u) strfyNum
where where
deleteCTE = mkDeleteCTE u deleteCTE = mkDeleteCTE u
@ -120,7 +127,9 @@ runDelete
:: ( HasVersion, QErrM m, UserInfoM m, CacheRM m :: ( HasVersion, QErrM m, UserInfoM m, CacheRM m
, MonadTx m, HasSQLGenCtx m, MonadIO m , MonadTx m, HasSQLGenCtx m, MonadIO m
) )
=> DeleteQuery -> m EncJSON => Env.Environment
runDelete q = do -> DeleteQuery
-> m EncJSON
runDelete env q = do
strfyNum <- stringifyNum <$> askSQLGenCtx strfyNum <- stringifyNum <$> askSQLGenCtx
validateDeleteQ q >>= execDeleteQuery strfyNum Nothing validateDeleteQ q >>= execDeleteQuery env strfyNum Nothing

View File

@ -30,6 +30,7 @@ import Hasura.Server.Version (HasVersion)
import Hasura.Session import Hasura.Session
import Hasura.SQL.Types import Hasura.SQL.Types
import qualified Data.Environment as Env
mkInsertCTE :: InsertQueryP1 -> S.CTE mkInsertCTE :: InsertQueryP1 -> S.CTE
mkInsertCTE (InsertQueryP1 tn cols vals conflict (insCheck, updCheck) _ _) = mkInsertCTE (InsertQueryP1 tn cols vals conflict (insCheck, updCheck) _ _) =
@ -240,13 +241,17 @@ convInsQ =
binRHSBuilder binRHSBuilder
execInsertQuery execInsertQuery
:: (HasVersion, MonadTx m, MonadIO m) :: ( HasVersion
=> Bool , MonadTx m
, MonadIO m
)
=> Env.Environment
-> Bool
-> Maybe MutationRemoteJoinCtx -> Maybe MutationRemoteJoinCtx
-> (InsertQueryP1, DS.Seq Q.PrepArg) -> (InsertQueryP1, DS.Seq Q.PrepArg)
-> m EncJSON -> m EncJSON
execInsertQuery strfyNum remoteJoinCtx (u, p) = execInsertQuery env strfyNum remoteJoinCtx (u, p) =
runMutation runMutation env
$ mkMutation remoteJoinCtx (iqp1Table u) (insertCTE, p) $ mkMutation remoteJoinCtx (iqp1Table u) (insertCTE, p)
(iqp1Output u) (iqp1AllCols u) strfyNum (iqp1Output u) (iqp1AllCols u) strfyNum
where where
@ -329,8 +334,8 @@ runInsert
:: ( HasVersion, QErrM m, UserInfoM m :: ( HasVersion, QErrM m, UserInfoM m
, CacheRM m, MonadTx m, HasSQLGenCtx m, MonadIO m , CacheRM m, MonadTx m, HasSQLGenCtx m, MonadIO m
) )
=> InsertQuery -> m EncJSON => Env.Environment -> InsertQuery -> m EncJSON
runInsert q = do runInsert env q = do
res <- convInsQ q res <- convInsQ q
strfyNum <- stringifyNum <$> askSQLGenCtx strfyNum <- stringifyNum <$> askSQLGenCtx
execInsertQuery strfyNum Nothing res execInsertQuery env strfyNum Nothing res

View File

@ -11,11 +11,12 @@ where
import Hasura.Prelude import Hasura.Prelude
import qualified Data.HashMap.Strict as Map import qualified Data.Environment as Env
import qualified Data.Sequence as DS import qualified Data.HashMap.Strict as Map
import qualified Database.PG.Query as Q import qualified Data.Sequence as DS
import qualified Network.HTTP.Client as HTTP import qualified Database.PG.Query as Q
import qualified Network.HTTP.Types as N import qualified Network.HTTP.Client as HTTP
import qualified Network.HTTP.Types as N
import qualified Hasura.SQL.DML as S import qualified Hasura.SQL.DML as S
@ -58,17 +59,29 @@ mkMutation ctx table query output' allCols strfyNum =
in Mutation table query output allCols remoteJoinsCtx strfyNum in Mutation table query output allCols remoteJoinsCtx strfyNum
runMutation runMutation
:: (HasVersion, MonadTx m, MonadIO m) ::
=> Mutation -> m EncJSON ( HasVersion
runMutation mut = , MonadTx m
bool (mutateAndReturn mut) (mutateAndSel mut) $ , MonadIO m
)
=> Env.Environment
-> Mutation
-> m EncJSON
runMutation env mut =
bool (mutateAndReturn env mut) (mutateAndSel env mut) $
hasNestedFld $ _mOutput mut hasNestedFld $ _mOutput mut
mutateAndReturn mutateAndReturn
:: (HasVersion, MonadTx m, MonadIO m) ::
=> Mutation -> m EncJSON ( HasVersion
mutateAndReturn (Mutation qt (cte, p) mutationOutput allCols remoteJoins strfyNum) = , MonadTx m
executeMutationOutputQuery sqlQuery (toList p) remoteJoins , MonadIO m
)
=> Env.Environment
-> Mutation
-> m EncJSON
mutateAndReturn env (Mutation qt (cte, p) mutationOutput allCols remoteJoins strfyNum) =
executeMutationOutputQuery env sqlQuery (toList p) remoteJoins
where where
sqlQuery = Q.fromBuilder $ toSQL $ sqlQuery = Q.fromBuilder $ toSQL $
mkMutationOutputExp qt allCols Nothing cte mutationOutput strfyNum mkMutationOutputExp qt allCols Nothing cte mutationOutput strfyNum
@ -88,29 +101,40 @@ conditions **might** see some degradation.
-} -}
mutateAndSel mutateAndSel
:: (HasVersion, MonadTx m, MonadIO m) ::
=> Mutation -> m EncJSON ( HasVersion
mutateAndSel (Mutation qt q mutationOutput allCols remoteJoins strfyNum) = do , MonadTx m
, MonadIO m
)
=> Env.Environment
-> Mutation
-> m EncJSON
mutateAndSel env (Mutation qt q mutationOutput allCols remoteJoins strfyNum) = do
-- Perform mutation and fetch unique columns -- Perform mutation and fetch unique columns
MutateResp _ columnVals <- liftTx $ mutateAndFetchCols qt allCols q strfyNum MutateResp _ columnVals <- liftTx $ mutateAndFetchCols qt allCols q strfyNum
selCTE <- mkSelCTEFromColVals qt allCols columnVals selCTE <- mkSelCTEFromColVals qt allCols columnVals
let selWith = mkMutationOutputExp qt allCols Nothing selCTE mutationOutput strfyNum let selWith = mkMutationOutputExp qt allCols Nothing selCTE mutationOutput strfyNum
-- Perform select query and fetch returning fields -- Perform select query and fetch returning fields
executeMutationOutputQuery (Q.fromBuilder $ toSQL selWith) [] remoteJoins executeMutationOutputQuery env (Q.fromBuilder $ toSQL selWith) [] remoteJoins
executeMutationOutputQuery executeMutationOutputQuery
:: (HasVersion, MonadTx m, MonadIO m) ::
=> Q.Query -- ^ SQL query ( HasVersion
, MonadTx m
, MonadIO m
)
=> Env.Environment
-> Q.Query -- ^ SQL query
-> [Q.PrepArg] -- ^ Prepared params -> [Q.PrepArg] -- ^ Prepared params
-> Maybe (RemoteJoins, MutationRemoteJoinCtx) -- ^ Remote joins context -> Maybe (RemoteJoins, MutationRemoteJoinCtx) -- ^ Remote joins context
-> m EncJSON -> m EncJSON
executeMutationOutputQuery query prepArgs = \case executeMutationOutputQuery env query prepArgs = \case
Nothing -> Nothing ->
runIdentity . Q.getRow runIdentity . Q.getRow
-- See Note [Prepared statements in Mutations] -- See Note [Prepared statements in Mutations]
<$> liftTx (Q.rawQE dmlTxErrorHandler query prepArgs False) <$> liftTx (Q.rawQE dmlTxErrorHandler query prepArgs False)
Just (remoteJoins, (httpManager, reqHeaders, userInfo)) -> Just (remoteJoins, (httpManager, reqHeaders, userInfo)) ->
executeQueryWithRemoteJoins httpManager reqHeaders userInfo query prepArgs remoteJoins executeQueryWithRemoteJoins env httpManager reqHeaders userInfo query prepArgs remoteJoins
mutateAndFetchCols mutateAndFetchCols
:: QualifiedTable :: QualifiedTable

View File

@ -30,6 +30,7 @@ import qualified Hasura.SQL.DML as S
import qualified Data.Aeson as A import qualified Data.Aeson as A
import qualified Data.Aeson.Ordered as AO import qualified Data.Aeson.Ordered as AO
import qualified Data.Environment as Env
import qualified Data.HashMap.Strict as Map import qualified Data.HashMap.Strict as Map
import qualified Data.HashMap.Strict.Extended as Map import qualified Data.HashMap.Strict.Extended as Map
import qualified Data.HashMap.Strict.InsOrd as OMap import qualified Data.HashMap.Strict.InsOrd as OMap
@ -44,15 +45,19 @@ import qualified Network.HTTP.Types as N
-- | Executes given query and fetch response JSON from Postgres. Substitutes remote relationship fields. -- | Executes given query and fetch response JSON from Postgres. Substitutes remote relationship fields.
executeQueryWithRemoteJoins executeQueryWithRemoteJoins
:: (HasVersion, MonadTx m, MonadIO m) :: ( HasVersion
=> HTTP.Manager , MonadTx m
, MonadIO m
)
=> Env.Environment
-> HTTP.Manager
-> [N.Header] -> [N.Header]
-> UserInfo -> UserInfo
-> Q.Query -> Q.Query
-> [Q.PrepArg] -> [Q.PrepArg]
-> RemoteJoins -> RemoteJoins
-> m EncJSON -> m EncJSON
executeQueryWithRemoteJoins manager reqHdrs userInfo q prepArgs rjs = do executeQueryWithRemoteJoins env manager reqHdrs userInfo q prepArgs rjs = do
-- Step 1: Perform the query on database and fetch the response -- Step 1: Perform the query on database and fetch the response
pgRes <- runIdentity . Q.getRow <$> liftTx (Q.rawQE dmlTxErrorHandler q prepArgs True) pgRes <- runIdentity . Q.getRow <$> liftTx (Q.rawQE dmlTxErrorHandler q prepArgs True)
jsonRes <- either (throw500 . T.pack) pure $ AO.eitherDecode pgRes jsonRes <- either (throw500 . T.pack) pure $ AO.eitherDecode pgRes
@ -60,7 +65,7 @@ executeQueryWithRemoteJoins manager reqHdrs userInfo q prepArgs rjs = do
compositeJson <- traverseQueryResponseJSON rjMap jsonRes compositeJson <- traverseQueryResponseJSON rjMap jsonRes
let remoteJoins = collectRemoteFields compositeJson let remoteJoins = collectRemoteFields compositeJson
-- Step 3: Make queries to remote server and fetch graphql response -- Step 3: Make queries to remote server and fetch graphql response
remoteServerResp <- fetchRemoteJoinFields manager reqHdrs userInfo remoteJoins remoteServerResp <- fetchRemoteJoinFields env manager reqHdrs userInfo remoteJoins
-- Step 4: Replace remote fields in composite json with remote join values -- Step 4: Replace remote fields in composite json with remote join values
AO.toEncJSON <$> replaceRemoteFields compositeJson remoteServerResp AO.toEncJSON <$> replaceRemoteFields compositeJson remoteServerResp
where where
@ -406,19 +411,20 @@ fetchRemoteJoinFields
, MonadError QErr m , MonadError QErr m
, MonadIO m , MonadIO m
) )
=> HTTP.Manager => Env.Environment
-> HTTP.Manager
-> [N.Header] -> [N.Header]
-> UserInfo -> UserInfo
-> [RemoteJoinField] -> [RemoteJoinField]
-> m AO.Object -> m AO.Object
fetchRemoteJoinFields manager reqHdrs userInfo remoteJoins = do fetchRemoteJoinFields env manager reqHdrs userInfo remoteJoins = do
results <- forM (Map.toList remoteSchemaBatch) $ \(rsi, batch) -> do results <- forM (Map.toList remoteSchemaBatch) $ \(rsi, batch) -> do
let batchList = toList batch let batchList = toList batch
gqlReq = fieldsToRequest G.OperationTypeQuery gqlReq = fieldsToRequest G.OperationTypeQuery
(map _rjfField batchList) (map _rjfField batchList)
gqlReqUnparsed = (GQLQueryText . G.renderExecutableDoc . G.ExecutableDocument . unGQLExecDoc) <$> gqlReq gqlReqUnparsed = (GQLQueryText . G.renderExecutableDoc . G.ExecutableDocument . unGQLExecDoc) <$> gqlReq
-- NOTE: discard remote headers (for now): -- NOTE: discard remote headers (for now):
(_, _, respBody) <- execRemoteGQ' manager userInfo reqHdrs gqlReqUnparsed rsi G.OperationTypeQuery (_, _, respBody) <- execRemoteGQ' env manager userInfo reqHdrs gqlReqUnparsed rsi G.OperationTypeQuery
case AO.eitherDecode respBody of case AO.eitherDecode respBody of
Left e -> throw500 $ "Remote server response is not valid JSON: " <> T.pack e Left e -> throw500 $ "Remote server response is not valid JSON: " <> T.pack e
Right r -> do Right r -> do

View File

@ -6,6 +6,7 @@ module Hasura.RQL.DML.Select.Internal
) )
where where
import Instances.TH.Lift ()
import Control.Lens hiding (op) import Control.Lens hiding (op)
import Control.Monad.Writer.Strict import Control.Monad.Writer.Strict

View File

@ -28,6 +28,7 @@ import Hasura.SQL.Types
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
import qualified Hasura.SQL.DML as S import qualified Hasura.SQL.DML as S
import qualified Data.Environment as Env
-- NOTE: This function can be improved, because we use -- NOTE: This function can be improved, because we use
@ -76,7 +77,6 @@ mkUpdateCTE (AnnUpd tn opExps (permFltr, wc) chk _ columnsInfo) =
tableFltrExpr = toSQLBoolExp (S.QualTable tn) $ andAnnBoolExps permFltr wc tableFltrExpr = toSQLBoolExp (S.QualTable tn) $ andAnnBoolExps permFltr wc
checkExpr = toSQLBoolExp (S.QualTable tn) chk checkExpr = toSQLBoolExp (S.QualTable tn) chk
expandOperator :: [PGColumnInfo] -> (PGCol, UpdOpExpG S.SQLExp) -> S.SetExpItem expandOperator :: [PGColumnInfo] -> (PGCol, UpdOpExpG S.SQLExp) -> S.SetExpItem
expandOperator infos (column, op) = S.SetExpItem $ (column,) $ case op of expandOperator infos (column, op) = S.SetExpItem $ (column,) $ case op of
UpdSet e -> e UpdSet e -> e
@ -97,18 +97,6 @@ expandOperator infos (column, op) = S.SetExpItem $ (column,) $ case op of
Just (PGColumnScalar s) -> S.mkTypeAnn $ PGTypeScalar s Just (PGColumnScalar s) -> S.mkTypeAnn $ PGTypeScalar s
_ -> S.numericTypeAnn _ -> S.numericTypeAnn
execUpdateQuery
:: (HasVersion, MonadTx m, MonadIO m)
=> Bool
-> Maybe MutationRemoteJoinCtx
-> (AnnUpd, DS.Seq Q.PrepArg)
-> m EncJSON
execUpdateQuery strfyNum remoteJoinCtx (u, p) =
runMutation $ mkMutation remoteJoinCtx (uqp1Table u) (updateCTE, p)
(uqp1Output u) (uqp1AllCols u) strfyNum
where
updateCTE = mkUpdateCTE u
convInc convInc
:: (QErrM m) :: (QErrM m)
=> (PGColumnType -> Value -> m S.SQLExp) => (PGColumnType -> Value -> m S.SQLExp)
@ -259,11 +247,28 @@ validateUpdateQuery
validateUpdateQuery = validateUpdateQuery =
runDMLP1T . validateUpdateQueryWith sessVarFromCurrentSetting binRHSBuilder runDMLP1T . validateUpdateQueryWith sessVarFromCurrentSetting binRHSBuilder
execUpdateQuery
::
( HasVersion
, MonadTx m
, MonadIO m
)
=> Env.Environment
-> Bool
-> Maybe MutationRemoteJoinCtx
-> (AnnUpd, DS.Seq Q.PrepArg)
-> m EncJSON
execUpdateQuery env strfyNum remoteJoinCtx (u, p) =
runMutation env $ mkMutation remoteJoinCtx (uqp1Table u) (updateCTE, p)
(uqp1Output u) (uqp1AllCols u) strfyNum
where
updateCTE = mkUpdateCTE u
runUpdate runUpdate
:: ( HasVersion, QErrM m, UserInfoM m, CacheRM m :: ( HasVersion, QErrM m, UserInfoM m, CacheRM m
, MonadTx m, HasSQLGenCtx m, MonadIO m , MonadTx m, HasSQLGenCtx m, MonadIO m
) )
=> UpdateQuery -> m EncJSON => Env.Environment -> UpdateQuery -> m EncJSON
runUpdate q = do runUpdate env q = do
strfyNum <- stringifyNum <$> askSQLGenCtx strfyNum <- stringifyNum <$> askSQLGenCtx
validateUpdateQuery q >>= execUpdateQuery strfyNum Nothing validateUpdateQuery q >>= execUpdateQuery env strfyNum Nothing

View File

@ -44,7 +44,6 @@ import Hasura.Prelude
import Hasura.Session import Hasura.Session
import Hasura.SQL.Types import Hasura.SQL.Types
import Hasura.Db as R import Hasura.Db as R
import Hasura.RQL.Types.Action as R import Hasura.RQL.Types.Action as R
import Hasura.RQL.Types.BoolExp as R import Hasura.RQL.Types.BoolExp as R

View File

@ -63,6 +63,7 @@ import Language.Haskell.TH.Syntax (Lift, Q, TExp)
import qualified Data.HashMap.Strict as HM import qualified Data.HashMap.Strict as HM
import qualified Data.Text as T import qualified Data.Text as T
import qualified Data.Environment as Env
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
import qualified Language.GraphQL.Draft.Syntax as G import qualified Language.GraphQL.Draft.Syntax as G
import qualified Language.Haskell.TH.Syntax as TH import qualified Language.Haskell.TH.Syntax as TH
@ -304,8 +305,8 @@ instance FromJSON InputWebhook where
Left e -> fail $ "Parsing URL template failed: " ++ e Left e -> fail $ "Parsing URL template failed: " ++ e
Right v -> pure $ InputWebhook v Right v -> pure $ InputWebhook v
resolveWebhook :: (QErrM m,MonadIO m) => InputWebhook -> m ResolvedWebhook resolveWebhook :: QErrM m => Env.Environment -> InputWebhook -> m ResolvedWebhook
resolveWebhook (InputWebhook urlTemplate) = do resolveWebhook env (InputWebhook urlTemplate) = do
eitherRenderedTemplate <- renderURLTemplate urlTemplate let eitherRenderedTemplate = renderURLTemplate env urlTemplate
either (throw400 Unexpected . T.pack) either (throw400 Unexpected . T.pack)
(pure . ResolvedWebhook) eitherRenderedTemplate (pure . ResolvedWebhook) eitherRenderedTemplate

View File

@ -3,7 +3,6 @@ module Hasura.RQL.Types.RemoteSchema where
import Hasura.Prelude import Hasura.Prelude
import Hasura.RQL.Types.Common (NonEmptyText) import Hasura.RQL.Types.Common (NonEmptyText)
import Language.Haskell.TH.Syntax (Lift) import Language.Haskell.TH.Syntax (Lift)
import System.Environment (lookupEnv)
import qualified Data.Aeson as J import qualified Data.Aeson as J
import qualified Data.Aeson.Casing as J import qualified Data.Aeson.Casing as J
@ -11,6 +10,7 @@ import qualified Data.Aeson.TH as J
import qualified Data.Text as T import qualified Data.Text as T
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
import qualified Network.URI.Extended as N import qualified Network.URI.Extended as N
import qualified Data.Environment as Env
import Hasura.Incremental (Cacheable) import Hasura.Incremental (Cacheable)
import Hasura.RQL.DDL.Headers (HeaderConf (..)) import Hasura.RQL.DDL.Headers (HeaderConf (..))
@ -77,27 +77,26 @@ newtype RemoteSchemaNameQuery
$(J.deriveJSON (J.aesonDrop 5 J.snakeCase) ''RemoteSchemaNameQuery) $(J.deriveJSON (J.aesonDrop 5 J.snakeCase) ''RemoteSchemaNameQuery)
getUrlFromEnv :: (MonadIO m, MonadError QErr m) => Text -> m N.URI getUrlFromEnv :: (MonadIO m, MonadError QErr m) => Env.Environment -> Text -> m N.URI
getUrlFromEnv urlFromEnv = do getUrlFromEnv env urlFromEnv = do
mEnv <- liftIO . lookupEnv $ T.unpack urlFromEnv let mEnv = Env.lookupEnv env $ T.unpack urlFromEnv
env <- maybe (throw400 InvalidParams $ envNotFoundMsg urlFromEnv) return uri <- maybe (throw400 InvalidParams $ envNotFoundMsg urlFromEnv) return mEnv
mEnv maybe (throw400 InvalidParams $ invalidUri uri) return $ N.parseURI uri
maybe (throw400 InvalidParams $ invalidUri env) return $ N.parseURI env
where where
invalidUri uri = "not a valid URI: " <> T.pack uri invalidUri x = "not a valid URI: " <> T.pack x
envNotFoundMsg e = envNotFoundMsg e = "environment variable '" <> e <> "' not set"
"environment variable '" <> e <> "' not set"
validateRemoteSchemaDef validateRemoteSchemaDef
:: (MonadError QErr m, MonadIO m) :: (MonadError QErr m, MonadIO m)
=> RemoteSchemaDef => Env.Environment
-> RemoteSchemaDef
-> m RemoteSchemaInfo -> m RemoteSchemaInfo
validateRemoteSchemaDef (RemoteSchemaDef mUrl mUrlEnv hdrC fwdHdrs mTimeout) = validateRemoteSchemaDef env (RemoteSchemaDef mUrl mUrlEnv hdrC fwdHdrs mTimeout) =
case (mUrl, mUrlEnv) of case (mUrl, mUrlEnv) of
(Just url, Nothing) -> (Just url, Nothing) ->
return $ RemoteSchemaInfo url hdrs fwdHdrs timeout return $ RemoteSchemaInfo url hdrs fwdHdrs timeout
(Nothing, Just urlEnv) -> do (Nothing, Just urlEnv) -> do
url <- getUrlFromEnv urlEnv url <- getUrlFromEnv env urlEnv
return $ RemoteSchemaInfo url hdrs fwdHdrs timeout return $ RemoteSchemaInfo url hdrs fwdHdrs timeout
(Nothing, Nothing) -> (Nothing, Nothing) ->
throw400 InvalidParams "both `url` and `url_from_env` can't be empty" throw400 InvalidParams "both `url` and `url_from_env` can't be empty"

View File

@ -139,7 +139,6 @@ import Hasura.RQL.Types.Table
import Hasura.Session import Hasura.Session
import Hasura.SQL.Types import Hasura.SQL.Types
import Data.Aeson import Data.Aeson
import Data.Aeson.Casing import Data.Aeson.Casing
import Data.Aeson.TH import Data.Aeson.TH

View File

@ -9,6 +9,7 @@ import Data.Aeson.Casing
import Data.Aeson.TH import Data.Aeson.TH
import Data.Time (UTCTime) import Data.Time (UTCTime)
import qualified Data.Environment as Env
import qualified Data.HashMap.Strict as HM import qualified Data.HashMap.Strict as HM
import qualified Data.Text as T import qualified Data.Text as T
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
@ -41,7 +42,6 @@ import Hasura.Server.Utils
import Hasura.Server.Version (HasVersion) import Hasura.Server.Version (HasVersion)
import Hasura.Session import Hasura.Session
data RQLQueryV1 data RQLQueryV1
= RQAddExistingTableOrView !TrackTable = RQAddExistingTableOrView !TrackTable
| RQTrackTable !TrackTable | RQTrackTable !TrackTable
@ -191,12 +191,12 @@ recordSchemaUpdate instanceId invalidations =
runQuery runQuery
:: (HasVersion, MonadIO m, MonadError QErr m) :: (HasVersion, MonadIO m, MonadError QErr m)
=> PGExecCtx -> InstanceId => Env.Environment -> PGExecCtx -> InstanceId
-> UserInfo -> RebuildableSchemaCache Run -> HTTP.Manager -> UserInfo -> RebuildableSchemaCache Run -> HTTP.Manager
-> SQLGenCtx -> SystemDefined -> RQLQuery -> m (EncJSON, RebuildableSchemaCache Run) -> SQLGenCtx -> SystemDefined -> RQLQuery -> m (EncJSON, RebuildableSchemaCache Run)
runQuery pgExecCtx instanceId userInfo sc hMgr sqlGenCtx systemDefined query = do runQuery env pgExecCtx instanceId userInfo sc hMgr sqlGenCtx systemDefined query = do
accessMode <- getQueryAccessMode query accessMode <- getQueryAccessMode query
resE <- runQueryM query resE <- runQueryM env query
& runHasSystemDefinedT systemDefined & runHasSystemDefinedT systemDefined
& runCacheRWT sc & runCacheRWT sc
& peelRun runCtx pgExecCtx accessMode & peelRun runCtx pgExecCtx accessMode
@ -221,85 +221,85 @@ runQuery pgExecCtx instanceId userInfo sc hMgr sqlGenCtx systemDefined query = d
-- by hand. -- by hand.
queryModifiesSchemaCache :: RQLQuery -> Bool queryModifiesSchemaCache :: RQLQuery -> Bool
queryModifiesSchemaCache (RQV1 qi) = case qi of queryModifiesSchemaCache (RQV1 qi) = case qi of
RQAddExistingTableOrView _ -> True RQAddExistingTableOrView _ -> True
RQTrackTable _ -> True RQTrackTable _ -> True
RQUntrackTable _ -> True RQUntrackTable _ -> True
RQTrackFunction _ -> True RQTrackFunction _ -> True
RQUntrackFunction _ -> True RQUntrackFunction _ -> True
RQSetTableIsEnum _ -> True RQSetTableIsEnum _ -> True
RQCreateObjectRelationship _ -> True RQCreateObjectRelationship _ -> True
RQCreateArrayRelationship _ -> True RQCreateArrayRelationship _ -> True
RQDropRelationship _ -> True RQDropRelationship _ -> True
RQSetRelationshipComment _ -> False RQSetRelationshipComment _ -> False
RQRenameRelationship _ -> True RQRenameRelationship _ -> True
RQAddComputedField _ -> True RQAddComputedField _ -> True
RQDropComputedField _ -> True RQDropComputedField _ -> True
RQCreateRemoteRelationship _ -> True RQCreateRemoteRelationship _ -> True
RQUpdateRemoteRelationship _ -> True RQUpdateRemoteRelationship _ -> True
RQDeleteRemoteRelationship _ -> True RQDeleteRemoteRelationship _ -> True
RQCreateInsertPermission _ -> True RQCreateInsertPermission _ -> True
RQCreateSelectPermission _ -> True RQCreateSelectPermission _ -> True
RQCreateUpdatePermission _ -> True RQCreateUpdatePermission _ -> True
RQCreateDeletePermission _ -> True RQCreateDeletePermission _ -> True
RQDropInsertPermission _ -> True RQDropInsertPermission _ -> True
RQDropSelectPermission _ -> True RQDropSelectPermission _ -> True
RQDropUpdatePermission _ -> True RQDropUpdatePermission _ -> True
RQDropDeletePermission _ -> True RQDropDeletePermission _ -> True
RQSetPermissionComment _ -> False RQSetPermissionComment _ -> False
RQGetInconsistentMetadata _ -> False RQGetInconsistentMetadata _ -> False
RQDropInconsistentMetadata _ -> True RQDropInconsistentMetadata _ -> True
RQInsert _ -> False RQInsert _ -> False
RQSelect _ -> False RQSelect _ -> False
RQUpdate _ -> False RQUpdate _ -> False
RQDelete _ -> False RQDelete _ -> False
RQCount _ -> False RQCount _ -> False
RQAddRemoteSchema _ -> True RQAddRemoteSchema _ -> True
RQRemoveRemoteSchema _ -> True RQRemoveRemoteSchema _ -> True
RQReloadRemoteSchema _ -> True RQReloadRemoteSchema _ -> True
RQIntrospectRemoteSchema _ -> False RQIntrospectRemoteSchema _ -> False
RQCreateEventTrigger _ -> True RQCreateEventTrigger _ -> True
RQDeleteEventTrigger _ -> True RQDeleteEventTrigger _ -> True
RQRedeliverEvent _ -> False RQRedeliverEvent _ -> False
RQInvokeEventTrigger _ -> False RQInvokeEventTrigger _ -> False
RQCreateCronTrigger _ -> True RQCreateCronTrigger _ -> True
RQDeleteCronTrigger _ -> True RQDeleteCronTrigger _ -> True
RQCreateScheduledEvent _ -> False RQCreateScheduledEvent _ -> False
RQCreateQueryCollection _ -> True RQCreateQueryCollection _ -> True
RQDropQueryCollection _ -> True RQDropQueryCollection _ -> True
RQAddQueryToCollection _ -> True RQAddQueryToCollection _ -> True
RQDropQueryFromCollection _ -> True RQDropQueryFromCollection _ -> True
RQAddCollectionToAllowlist _ -> True RQAddCollectionToAllowlist _ -> True
RQDropCollectionFromAllowlist _ -> True RQDropCollectionFromAllowlist _ -> True
RQRunSql q -> isSchemaCacheBuildRequiredRunSQL q RQRunSql q -> isSchemaCacheBuildRequiredRunSQL q
RQReplaceMetadata _ -> True RQReplaceMetadata _ -> True
RQExportMetadata _ -> False RQExportMetadata _ -> False
RQClearMetadata _ -> True RQClearMetadata _ -> True
RQReloadMetadata _ -> True RQReloadMetadata _ -> True
RQCreateAction _ -> True RQCreateAction _ -> True
RQDropAction _ -> True RQDropAction _ -> True
RQUpdateAction _ -> True RQUpdateAction _ -> True
RQCreateActionPermission _ -> True RQCreateActionPermission _ -> True
RQDropActionPermission _ -> True RQDropActionPermission _ -> True
RQDumpInternalState _ -> False RQDumpInternalState _ -> False
RQSetCustomTypes _ -> True RQSetCustomTypes _ -> True
RQBulk qs -> any queryModifiesSchemaCache qs RQBulk qs -> any queryModifiesSchemaCache qs
queryModifiesSchemaCache (RQV2 qi) = case qi of queryModifiesSchemaCache (RQV2 qi) = case qi of
RQV2TrackTable _ -> True RQV2TrackTable _ -> True
@ -346,9 +346,10 @@ runQueryM
, MonadIO m, MonadUnique m, HasHttpManager m, HasSQLGenCtx m , MonadIO m, MonadUnique m, HasHttpManager m, HasSQLGenCtx m
, HasSystemDefined m , HasSystemDefined m
) )
=> RQLQuery => Env.Environment
-> RQLQuery
-> m EncJSON -> m EncJSON
runQueryM rq = withPathK "args" $ case rq of runQueryM env rq = withPathK "args" $ case rq of
RQV1 q -> runQueryV1M q RQV1 q -> runQueryV1M q
RQV2 q -> runQueryV2M q RQV2 q -> runQueryV2M q
where where
@ -384,13 +385,13 @@ runQueryM rq = withPathK "args" $ case rq of
RQGetInconsistentMetadata q -> runGetInconsistentMetadata q RQGetInconsistentMetadata q -> runGetInconsistentMetadata q
RQDropInconsistentMetadata q -> runDropInconsistentMetadata q RQDropInconsistentMetadata q -> runDropInconsistentMetadata q
RQInsert q -> runInsert q RQInsert q -> runInsert env q
RQSelect q -> runSelect q RQSelect q -> runSelect q
RQUpdate q -> runUpdate q RQUpdate q -> runUpdate env q
RQDelete q -> runDelete q RQDelete q -> runDelete env q
RQCount q -> runCount q RQCount q -> runCount q
RQAddRemoteSchema q -> runAddRemoteSchema q RQAddRemoteSchema q -> runAddRemoteSchema env q
RQRemoveRemoteSchema q -> runRemoveRemoteSchema q RQRemoveRemoteSchema q -> runRemoveRemoteSchema q
RQReloadRemoteSchema q -> runReloadRemoteSchema q RQReloadRemoteSchema q -> runReloadRemoteSchema q
RQIntrospectRemoteSchema q -> runIntrospectRemoteSchema q RQIntrospectRemoteSchema q -> runIntrospectRemoteSchema q
@ -433,7 +434,7 @@ runQueryM rq = withPathK "args" $ case rq of
RQSetCustomTypes q -> runSetCustomTypes q RQSetCustomTypes q -> runSetCustomTypes q
RQBulk qs -> encJFromList <$> indexedMapM runQueryM qs RQBulk qs -> encJFromList <$> indexedMapM (runQueryM env) qs
runQueryV2M = \case runQueryV2M = \case
RQV2TrackTable q -> runTrackTableV2Q q RQV2TrackTable q -> runTrackTableV2Q q
@ -444,86 +445,86 @@ runQueryM rq = withPathK "args" $ case rq of
requiresAdmin :: RQLQuery -> Bool requiresAdmin :: RQLQuery -> Bool
requiresAdmin = \case requiresAdmin = \case
RQV1 q -> case q of RQV1 q -> case q of
RQAddExistingTableOrView _ -> True RQAddExistingTableOrView _ -> True
RQTrackTable _ -> True RQTrackTable _ -> True
RQUntrackTable _ -> True RQUntrackTable _ -> True
RQSetTableIsEnum _ -> True RQSetTableIsEnum _ -> True
RQTrackFunction _ -> True RQTrackFunction _ -> True
RQUntrackFunction _ -> True RQUntrackFunction _ -> True
RQCreateObjectRelationship _ -> True RQCreateObjectRelationship _ -> True
RQCreateArrayRelationship _ -> True RQCreateArrayRelationship _ -> True
RQDropRelationship _ -> True RQDropRelationship _ -> True
RQSetRelationshipComment _ -> True RQSetRelationshipComment _ -> True
RQRenameRelationship _ -> True RQRenameRelationship _ -> True
RQAddComputedField _ -> True RQAddComputedField _ -> True
RQDropComputedField _ -> True RQDropComputedField _ -> True
RQCreateRemoteRelationship _ -> True RQCreateRemoteRelationship _ -> True
RQUpdateRemoteRelationship _ -> True RQUpdateRemoteRelationship _ -> True
RQDeleteRemoteRelationship _ -> True RQDeleteRemoteRelationship _ -> True
RQCreateInsertPermission _ -> True RQCreateInsertPermission _ -> True
RQCreateSelectPermission _ -> True RQCreateSelectPermission _ -> True
RQCreateUpdatePermission _ -> True RQCreateUpdatePermission _ -> True
RQCreateDeletePermission _ -> True RQCreateDeletePermission _ -> True
RQDropInsertPermission _ -> True RQDropInsertPermission _ -> True
RQDropSelectPermission _ -> True RQDropSelectPermission _ -> True
RQDropUpdatePermission _ -> True RQDropUpdatePermission _ -> True
RQDropDeletePermission _ -> True RQDropDeletePermission _ -> True
RQSetPermissionComment _ -> True RQSetPermissionComment _ -> True
RQGetInconsistentMetadata _ -> True RQGetInconsistentMetadata _ -> True
RQDropInconsistentMetadata _ -> True RQDropInconsistentMetadata _ -> True
RQInsert _ -> False RQInsert _ -> False
RQSelect _ -> False RQSelect _ -> False
RQUpdate _ -> False RQUpdate _ -> False
RQDelete _ -> False RQDelete _ -> False
RQCount _ -> False RQCount _ -> False
RQAddRemoteSchema _ -> True RQAddRemoteSchema _ -> True
RQRemoveRemoteSchema _ -> True RQRemoveRemoteSchema _ -> True
RQReloadRemoteSchema _ -> True RQReloadRemoteSchema _ -> True
RQIntrospectRemoteSchema _ -> True RQIntrospectRemoteSchema _ -> True
RQCreateEventTrigger _ -> True RQCreateEventTrigger _ -> True
RQDeleteEventTrigger _ -> True RQDeleteEventTrigger _ -> True
RQRedeliverEvent _ -> True RQRedeliverEvent _ -> True
RQInvokeEventTrigger _ -> True RQInvokeEventTrigger _ -> True
RQCreateCronTrigger _ -> True RQCreateCronTrigger _ -> True
RQDeleteCronTrigger _ -> True RQDeleteCronTrigger _ -> True
RQCreateScheduledEvent _ -> True RQCreateScheduledEvent _ -> True
RQCreateQueryCollection _ -> True RQCreateQueryCollection _ -> True
RQDropQueryCollection _ -> True RQDropQueryCollection _ -> True
RQAddQueryToCollection _ -> True RQAddQueryToCollection _ -> True
RQDropQueryFromCollection _ -> True RQDropQueryFromCollection _ -> True
RQAddCollectionToAllowlist _ -> True RQAddCollectionToAllowlist _ -> True
RQDropCollectionFromAllowlist _ -> True RQDropCollectionFromAllowlist _ -> True
RQReplaceMetadata _ -> True RQReplaceMetadata _ -> True
RQClearMetadata _ -> True RQClearMetadata _ -> True
RQExportMetadata _ -> True RQExportMetadata _ -> True
RQReloadMetadata _ -> True RQReloadMetadata _ -> True
RQCreateAction _ -> True RQCreateAction _ -> True
RQDropAction _ -> True RQDropAction _ -> True
RQUpdateAction _ -> True RQUpdateAction _ -> True
RQCreateActionPermission _ -> True RQCreateActionPermission _ -> True
RQDropActionPermission _ -> True RQDropActionPermission _ -> True
RQDumpInternalState _ -> True RQDumpInternalState _ -> True
RQSetCustomTypes _ -> True RQSetCustomTypes _ -> True
RQRunSql _ -> True RQRunSql _ -> True
RQBulk qs -> any requiresAdmin qs RQBulk qs -> any requiresAdmin qs
RQV2 q -> case q of RQV2 q -> case q of
RQV2TrackTable _ -> True RQV2TrackTable _ -> True

View File

@ -3,10 +3,24 @@
module Hasura.Server.App where module Hasura.Server.App where
import Hasura.Prelude hiding (get, put) import Hasura.Prelude hiding (get, put)
import Control.Concurrent.MVar.Lifted
import Control.Exception (IOException, try)
import Control.Monad.Trans.Control (MonadBaseControl)
import Control.Monad.Stateless
import Data.Aeson hiding (json)
import Data.Int (Int64)
import Data.IORef
import Data.Time.Clock (UTCTime)
import Data.Time.Clock.POSIX (getPOSIXTime)
import Network.Mime (defaultMimeLookup)
import System.FilePath (joinPath, takeFileName)
import Web.Spock.Core ((<//>))
import qualified Control.Concurrent.Async.Lifted.Safe as LA import qualified Control.Concurrent.Async.Lifted.Safe as LA
import qualified Data.ByteString.Lazy as BL import qualified Data.ByteString.Lazy as BL
import qualified Data.CaseInsensitive as CI import qualified Data.CaseInsensitive as CI
import qualified Data.Environment as Env
import qualified Data.HashMap.Strict as M import qualified Data.HashMap.Strict as M
import qualified Data.HashSet as S import qualified Data.HashSet as S
import qualified Data.Text as T import qualified Data.Text as T
@ -20,21 +34,6 @@ import qualified System.Metrics.Json as EKG
import qualified Text.Mustache as M import qualified Text.Mustache as M
import qualified Web.Spock.Core as Spock import qualified Web.Spock.Core as Spock
import Control.Concurrent.MVar.Lifted
import Control.Exception (IOException, try)
import Control.Monad.Stateless
import Control.Monad.Trans.Control (MonadBaseControl)
import Control.Monad.Unique
import Data.Aeson hiding (json)
import Data.Int (Int64)
import Data.IORef
import Data.Time.Clock (UTCTime)
import Data.Time.Clock.POSIX (getPOSIXTime)
import Network.Mime (defaultMimeLookup)
import System.Exit (exitFailure)
import System.FilePath (joinPath, takeFileName)
import Web.Spock.Core ((<//>))
import qualified Hasura.GraphQL.Execute as E import qualified Hasura.GraphQL.Execute as E
import qualified Hasura.GraphQL.Execute.LiveQuery as EL import qualified Hasura.GraphQL.Execute.LiveQuery as EL
import qualified Hasura.GraphQL.Explain as GE import qualified Hasura.GraphQL.Explain as GE
@ -104,6 +103,7 @@ data ServerCtx
, scEnableAllowlist :: !Bool , scEnableAllowlist :: !Bool
, scEkgStore :: !EKG.Store , scEkgStore :: !EKG.Store
, scResponseInternalErrorsConfig :: !ResponseInternalErrorsConfig , scResponseInternalErrorsConfig :: !ResponseInternalErrorsConfig
, scEnvironment :: !Env.Environment
} }
data HandlerCtx data HandlerCtx
@ -144,7 +144,7 @@ withSCUpdate
:: (MonadIO m, MonadBaseControl IO m) :: (MonadIO m, MonadBaseControl IO m)
=> SchemaCacheRef -> L.Logger L.Hasura -> m (a, RebuildableSchemaCache Run) -> m a => SchemaCacheRef -> L.Logger L.Hasura -> m (a, RebuildableSchemaCache Run) -> m a
withSCUpdate scr logger action = do withSCUpdate scr logger action = do
withMVarMasked lk $ \()-> do withMVarMasked lk $ \() -> do
(!res, !newSC) <- action (!res, !newSC) <- action
liftIO $ do liftIO $ do
-- update schemacache in IO reference -- update schemacache in IO reference
@ -197,10 +197,10 @@ onlyAdmin = do
buildQCtx :: (MonadIO m) => Handler m QCtx buildQCtx :: (MonadIO m) => Handler m QCtx
buildQCtx = do buildQCtx = do
scRef <- scCacheRef . hcServerCtx <$> ask scRef <- asks (scCacheRef . hcServerCtx)
userInfo <- asks hcUser userInfo <- asks hcUser
cache <- getSCFromRef scRef cache <- getSCFromRef scRef
sqlGenCtx <- scSQLGenCtx . hcServerCtx <$> ask sqlGenCtx <- asks (scSQLGenCtx . hcServerCtx)
return $ QCtx userInfo cache sqlGenCtx return $ QCtx userInfo cache sqlGenCtx
setHeader :: MonadIO m => HTTP.Header -> Spock.ActionT m () setHeader :: MonadIO m => HTTP.Header -> Spock.ActionT m ()
@ -208,7 +208,7 @@ setHeader (headerName, headerValue) =
Spock.setHeader (bsToTxt $ CI.original headerName) (bsToTxt headerValue) Spock.setHeader (bsToTxt $ CI.original headerName) (bsToTxt headerValue)
-- | Typeclass representing the metadata API authorization effect -- | Typeclass representing the metadata API authorization effect
class MetadataApiAuthorization m where class Monad m => MetadataApiAuthorization m where
authorizeMetadataApi :: HasVersion => RQLQuery -> UserInfo -> Handler m () authorizeMetadataApi :: HasVersion => RQLQuery -> UserInfo -> Handler m ()
-- | The config API (/v1alpha1/config) handler -- | The config API (/v1alpha1/config) handler
@ -242,7 +242,7 @@ mkSpockAction serverCtx qErrEncoder qErrModifier apiHandler = do
userInfoE <- fmap fst <$> lift (resolveUserInfo logger manager headers authMode) userInfoE <- fmap fst <$> lift (resolveUserInfo logger manager headers authMode)
userInfo <- either (logErrorAndResp Nothing requestId req (Left reqBody) False headers . qErrModifier) userInfo <- either (logErrorAndResp Nothing requestId req (Left reqBody) False headers . qErrModifier)
return userInfoE return userInfoE
let handlerState = HandlerCtx serverCtx userInfo headers requestId ipAddress let handlerState = HandlerCtx serverCtx userInfo headers requestId ipAddress
includeInternal = shouldIncludeInternal (_uiRole userInfo) $ includeInternal = shouldIncludeInternal (_uiRole userInfo) $
@ -265,7 +265,7 @@ mkSpockAction serverCtx qErrEncoder qErrModifier apiHandler = do
-- log and return result -- log and return result
case modResult of case modResult of
Left err -> let jErr = maybe (Left reqBody) (Right . toJSON) q Left err -> let jErr = maybe (Left reqBody) (Right . toJSON) q
in logErrorAndResp (Just userInfo) requestId req jErr includeInternal headers err in logErrorAndResp (Just userInfo) requestId req jErr includeInternal headers err
Right res -> logSuccessAndResp (Just userInfo) requestId req (fmap toJSON q) res (Just (ioWaitTime, serviceTime)) headers Right res -> logSuccessAndResp (Just userInfo) requestId req (fmap toJSON q) res (Just (ioWaitTime, serviceTime)) headers
where where
@ -304,50 +304,55 @@ mkSpockAction serverCtx qErrEncoder qErrModifier apiHandler = do
mapM_ setHeader allRespHeaders mapM_ setHeader allRespHeaders
Spock.lazyBytes compressedResp Spock.lazyBytes compressedResp
v1QueryHandler v1QueryHandler
:: (HasVersion, MonadIO m, MonadUnique m, MonadBaseControl IO m, MetadataApiAuthorization m) :: (HasVersion, MonadIO m, MonadBaseControl IO m, MetadataApiAuthorization m)
=> RQLQuery -> Handler m (HttpResponse EncJSON) => RQLQuery
-> Handler m (HttpResponse EncJSON)
v1QueryHandler query = do v1QueryHandler query = do
userInfo <- asks hcUser userInfo <- asks hcUser
authorizeMetadataApi query userInfo authorizeMetadataApi query userInfo
scRef <- scCacheRef . hcServerCtx <$> ask scRef <- asks (scCacheRef . hcServerCtx)
logger <- scLogger . hcServerCtx <$> ask logger <- asks (scLogger . hcServerCtx)
res <- bool (fst <$> dbAction) (withSCUpdate scRef logger dbAction) $ res <- bool (fst <$> dbAction) (withSCUpdate scRef logger dbAction) $ queryModifiesSchemaCache query
queryModifiesSchemaCache query
return $ HttpResponse res [] return $ HttpResponse res []
where where
-- Hit postgres -- Hit postgres
dbAction = do dbAction = do
userInfo <- asks hcUser userInfo <- asks hcUser
scRef <- scCacheRef . hcServerCtx <$> ask scRef <- asks (scCacheRef . hcServerCtx)
schemaCache <- fmap fst $ liftIO $ readIORef $ _scrCache scRef schemaCache <- fmap fst $ liftIO $ readIORef $ _scrCache scRef
httpMgr <- scManager . hcServerCtx <$> ask httpMgr <- asks (scManager . hcServerCtx)
sqlGenCtx <- scSQLGenCtx . hcServerCtx <$> ask sqlGenCtx <- asks (scSQLGenCtx . hcServerCtx)
pgExecCtx <- scPGExecCtx . hcServerCtx <$> ask pgExecCtx <- asks (scPGExecCtx . hcServerCtx)
instanceId <- scInstanceId . hcServerCtx <$> ask instanceId <- asks (scInstanceId . hcServerCtx)
runQuery pgExecCtx instanceId userInfo schemaCache httpMgr sqlGenCtx (SystemDefined False) query env <- asks (scEnvironment . hcServerCtx)
runQuery env pgExecCtx instanceId userInfo schemaCache httpMgr sqlGenCtx (SystemDefined False) query
v1Alpha1GQHandler v1Alpha1GQHandler
:: (HasVersion, MonadIO m, E.MonadGQLExecutionCheck m, MonadQueryLog m) :: (HasVersion, MonadIO m, E.MonadGQLExecutionCheck m, MonadQueryLog m)
=> E.GraphQLQueryType -> GH.GQLBatchedReqs GH.GQLQueryText -> Handler m (HttpResponse EncJSON) => E.GraphQLQueryType -> GH.GQLBatchedReqs GH.GQLQueryText -> Handler m (HttpResponse EncJSON)
v1Alpha1GQHandler queryType query = do v1Alpha1GQHandler queryType query = do
userInfo <- asks hcUser userInfo <- asks hcUser
reqHeaders <- asks hcReqHeaders reqHeaders <- asks hcReqHeaders
ipAddress <- asks hcSourceIpAddress ipAddress <- asks hcSourceIpAddress
requestId <- asks hcRequestId requestId <- asks hcRequestId
manager <- scManager . hcServerCtx <$> ask manager <- asks (scManager . hcServerCtx)
scRef <- scCacheRef . hcServerCtx <$> ask scRef <- asks (scCacheRef . hcServerCtx)
(sc, scVer) <- liftIO $ readIORef $ _scrCache scRef (sc, scVer) <- liftIO $ readIORef $ _scrCache scRef
pgExecCtx <- scPGExecCtx . hcServerCtx <$> ask pgExecCtx <- asks (scPGExecCtx . hcServerCtx)
sqlGenCtx <- scSQLGenCtx . hcServerCtx <$> ask sqlGenCtx <- asks (scSQLGenCtx . hcServerCtx)
planCache <- scPlanCache . hcServerCtx <$> ask planCache <- asks (scPlanCache . hcServerCtx)
enableAL <- scEnableAllowlist . hcServerCtx <$> ask enableAL <- asks (scEnableAllowlist . hcServerCtx)
logger <- scLogger . hcServerCtx <$> ask logger <- asks (scLogger . hcServerCtx)
responseErrorsConfig <- asks (scResponseInternalErrorsConfig . hcServerCtx) responseErrorsConfig <- asks (scResponseInternalErrorsConfig . hcServerCtx)
env <- asks (scEnvironment . hcServerCtx)
let execCtx = E.ExecutionCtx logger sqlGenCtx pgExecCtx planCache let execCtx = E.ExecutionCtx logger sqlGenCtx pgExecCtx planCache
(lastBuiltSchemaCache sc) scVer manager enableAL (lastBuiltSchemaCache sc) scVer manager enableAL
flip runReaderT execCtx $ flip runReaderT execCtx $
GH.runGQBatched requestId responseErrorsConfig userInfo ipAddress reqHeaders queryType query GH.runGQBatched env requestId responseErrorsConfig userInfo ipAddress reqHeaders queryType query
v1GQHandler v1GQHandler
:: (HasVersion, MonadIO m, E.MonadGQLExecutionCheck m, MonadQueryLog m) :: (HasVersion, MonadIO m, E.MonadGQLExecutionCheck m, MonadQueryLog m)
@ -357,12 +362,17 @@ v1GQHandler = v1Alpha1GQHandler E.QueryHasura
v1GQRelayHandler v1GQRelayHandler
:: (HasVersion, MonadIO m, E.MonadGQLExecutionCheck m, MonadQueryLog m) :: (HasVersion, MonadIO m, E.MonadGQLExecutionCheck m, MonadQueryLog m)
=> GH.GQLBatchedReqs GH.GQLQueryText -> Handler m (HttpResponse EncJSON) => GH.GQLBatchedReqs GH.GQLQueryText
-> Handler m (HttpResponse EncJSON)
v1GQRelayHandler = v1Alpha1GQHandler E.QueryRelay v1GQRelayHandler = v1Alpha1GQHandler E.QueryRelay
gqlExplainHandler gqlExplainHandler
:: (HasVersion, MonadIO m) :: forall m
=> GE.GQLExplain -> Handler m (HttpResponse EncJSON) . ( HasVersion
, MonadIO m
)
=> GE.GQLExplain
-> Handler m (HttpResponse EncJSON)
gqlExplainHandler query = do gqlExplainHandler query = do
onlyAdmin onlyAdmin
scRef <- scCacheRef . hcServerCtx <$> ask scRef <- scCacheRef . hcServerCtx <$> ask
@ -375,7 +385,7 @@ gqlExplainHandler query = do
v1Alpha1PGDumpHandler :: (MonadIO m) => PGD.PGDumpReqBody -> Handler m APIResp v1Alpha1PGDumpHandler :: (MonadIO m) => PGD.PGDumpReqBody -> Handler m APIResp
v1Alpha1PGDumpHandler b = do v1Alpha1PGDumpHandler b = do
onlyAdmin onlyAdmin
ci <- scConnInfo . hcServerCtx <$> ask ci <- asks (scConnInfo . hcServerCtx)
output <- PGD.execPGDump b ci output <- PGD.execPGDump b ci
return $ RawResp $ HttpResponse output [sqlHeader] return $ RawResp $ HttpResponse output [sqlHeader]
@ -438,7 +448,7 @@ queryParsers =
return $ f q return $ f q
legacyQueryHandler legacyQueryHandler
:: (HasVersion, MonadIO m, MonadUnique m, MonadBaseControl IO m, MetadataApiAuthorization m) :: (HasVersion, MonadIO m, MonadBaseControl IO m, MetadataApiAuthorization m)
=> TableName -> T.Text -> Object => TableName -> T.Text -> Object
-> Handler m (HttpResponse EncJSON) -> Handler m (HttpResponse EncJSON)
legacyQueryHandler tn queryType req = legacyQueryHandler tn queryType req =
@ -460,58 +470,67 @@ configApiGetHandler serverCtx@ServerCtx{..} consoleAssetsDir =
(EL._lqsOptions $ scLQState) consoleAssetsDir (EL._lqsOptions $ scLQState) consoleAssetsDir
return $ JSONResp $ HttpResponse (encJFromJValue res) [] return $ JSONResp $ HttpResponse (encJFromJValue res) []
initErrExit :: QErr -> IO a
initErrExit e = do
putStrLn $
"failed to build schema-cache because of inconsistent metadata: "
<> (show e)
exitFailure
data HasuraApp data HasuraApp
= HasuraApp = HasuraApp
{ _hapApplication :: !Wai.Application { _hapApplication :: !Wai.Application
, _hapSchemaRef :: !SchemaCacheRef , _hapSchemaRef :: !SchemaCacheRef
, _hapCacheBuildTime :: !(Maybe UTCTime) , _hapCacheBuildTime :: !(Maybe UTCTime)
, _hapShutdown :: !(IO ()) , _hapShutdownWsServer :: !(IO ())
} }
-- TODO: Put Env into ServerCtx?
mkWaiApp mkWaiApp
:: forall m. :: forall m.
( HasVersion ( HasVersion
, MonadIO m , MonadIO m
, MonadUnique m -- , MonadUnique m
, MonadStateless IO m , MonadStateless IO m
, LA.Forall (LA.Pure m) , LA.Forall (LA.Pure m)
, ConsoleRenderer m , ConsoleRenderer m
, HttpLog m , HttpLog m
, MonadQueryLog m
, UserAuthentication m , UserAuthentication m
, MetadataApiAuthorization m , MetadataApiAuthorization m
, E.MonadGQLExecutionCheck m , E.MonadGQLExecutionCheck m
, MonadConfigApiHandler m , MonadConfigApiHandler m
, MonadQueryLog m
, WS.MonadWSLog m , WS.MonadWSLog m
) )
=> Q.TxIsolation => Env.Environment
-- ^ Set of environment variables for reference in UIs
-> Q.TxIsolation
-- ^ postgres transaction isolation to be used in the entire app
-> L.Logger L.Hasura -> L.Logger L.Hasura
-- ^ a 'L.Hasura' specific logger
-> SQLGenCtx -> SQLGenCtx
-> Bool -> Bool
-- ^ is AllowList enabled - TODO: change this boolean to sumtype
-> Q.PGPool -> Q.PGPool
-> Maybe PGExecCtx -> Maybe PGExecCtx
-> Q.ConnInfo -> Q.ConnInfo
-- ^ postgres connection parameters
-> HTTP.Manager -> HTTP.Manager
-- ^ HTTP manager so that we can re-use sessions
-> AuthMode -> AuthMode
-- ^ 'AuthMode' in which the application should operate in
-> CorsConfig -> CorsConfig
-> Bool -> Bool
-- ^ is console enabled - TODO: better type
-> Maybe Text -> Maybe Text
-- ^ filepath to the console static assets directory - TODO: better type
-> Bool -> Bool
-- ^ is telemetry enabled
-> InstanceId -> InstanceId
-- ^ each application, when run, gets an 'InstanceId'. this is used at various places including
-- schema syncing and telemetry
-> S.HashSet API -> S.HashSet API
-- ^ set of the enabled 'API's
-> EL.LiveQueriesOptions -> EL.LiveQueriesOptions
-> E.PlanCacheOptions -> E.PlanCacheOptions
-> ResponseInternalErrorsConfig -> ResponseInternalErrorsConfig
-> (RebuildableSchemaCache Run, Maybe UTCTime) -> (RebuildableSchemaCache Run, Maybe UTCTime)
-> m HasuraApp -> m HasuraApp
mkWaiApp isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpManager mode corsCfg enableConsole consoleAssetsDir mkWaiApp env isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpManager mode corsCfg enableConsole consoleAssetsDir
enableTelemetry instanceId apis lqOpts planCacheOptions responseErrorsConfig (schemaCache, cacheBuiltTime) = do enableTelemetry instanceId apis lqOpts planCacheOptions responseErrorsConfig (schemaCache, cacheBuiltTime) = do
(planCache, schemaCacheRef) <- initialiseCache (planCache, schemaCacheRef) <- initialiseCache
@ -540,6 +559,7 @@ mkWaiApp isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpManager
, scLQState = lqState , scLQState = lqState
, scEnableAllowlist = enableAL , scEnableAllowlist = enableAL
, scEkgStore = ekgStore , scEkgStore = ekgStore
, scEnvironment = env
, scResponseInternalErrorsConfig = responseErrorsConfig , scResponseInternalErrorsConfig = responseErrorsConfig
} }
@ -551,7 +571,7 @@ mkWaiApp isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpManager
Spock.spockAsApp $ Spock.spockT lowerIO $ Spock.spockAsApp $ Spock.spockT lowerIO $
httpApp corsCfg serverCtx enableConsole consoleAssetsDir enableTelemetry httpApp corsCfg serverCtx enableConsole consoleAssetsDir enableTelemetry
let wsServerApp = WS.createWSServerApp mode wsServerEnv let wsServerApp = WS.createWSServerApp env mode wsServerEnv -- TODO: Lyndon: Can we pass environment through wsServerEnv?
stopWSServer = WS.stopWSServerApp wsServerEnv stopWSServer = WS.stopWSServerApp wsServerEnv
waiApp <- liftWithStateless $ \lowerIO -> waiApp <- liftWithStateless $ \lowerIO ->
@ -570,10 +590,11 @@ mkWaiApp isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpManager
let cacheRef = SchemaCacheRef cacheLock cacheCell (E.clearPlanCache planCache) let cacheRef = SchemaCacheRef cacheLock cacheCell (E.clearPlanCache planCache)
pure (planCache, cacheRef) pure (planCache, cacheRef)
httpApp httpApp
:: ( HasVersion :: ( HasVersion
, MonadIO m , MonadIO m
, MonadUnique m -- , MonadUnique m
, MonadBaseControl IO m , MonadBaseControl IO m
, ConsoleRenderer m , ConsoleRenderer m
, HttpLog m , HttpLog m
@ -626,7 +647,6 @@ httpApp corsCfg serverCtx enableConsole consoleAssetsDir enableTelemetry = do
mkAPIRespHandler $ legacyQueryHandler (TableName tableName) queryType mkAPIRespHandler $ legacyQueryHandler (TableName tableName) queryType
when enablePGDump $ when enablePGDump $
Spock.post "v1alpha1/pg_dump" $ spockAction encodeQErr id $ Spock.post "v1alpha1/pg_dump" $ spockAction encodeQErr id $
mkPostHandler v1Alpha1PGDumpHandler mkPostHandler v1Alpha1PGDumpHandler
@ -640,7 +660,7 @@ httpApp corsCfg serverCtx enableConsole consoleAssetsDir enableTelemetry = do
mkPostHandler $ mkAPIRespHandler v1GQHandler mkPostHandler $ mkAPIRespHandler v1GQHandler
Spock.post "v1beta1/relay" $ spockAction GH.encodeGQErr allMod200 $ Spock.post "v1beta1/relay" $ spockAction GH.encodeGQErr allMod200 $
mkPostHandler $ mkAPIRespHandler v1GQRelayHandler mkPostHandler $ mkAPIRespHandler $ v1GQRelayHandler
when (isDeveloperAPIEnabled serverCtx) $ do when (isDeveloperAPIEnabled serverCtx) $ do
Spock.get "dev/ekg" $ spockAction encodeQErr id $ Spock.get "dev/ekg" $ spockAction encodeQErr id $
@ -679,18 +699,13 @@ httpApp corsCfg serverCtx enableConsole consoleAssetsDir enableTelemetry = do
-> (QErr -> QErr) -> APIHandler m a -> Spock.ActionT m () -> (QErr -> QErr) -> APIHandler m a -> Spock.ActionT m ()
spockAction = mkSpockAction serverCtx spockAction = mkSpockAction serverCtx
-- all graphql errors should be of type 200 -- all graphql errors should be of type 200
allMod200 qe = qe { qeStatus = HTTP.status200 } allMod200 qe = qe { qeStatus = HTTP.status200 }
gqlExplainAction = spockAction encodeQErr id $ mkPostHandler $ mkAPIRespHandler gqlExplainHandler
gqlExplainAction = enableGraphQL = isGraphQLEnabled serverCtx
spockAction encodeQErr id $ mkPostHandler $ enableMetadata = isMetadataEnabled serverCtx
mkAPIRespHandler gqlExplainHandler enablePGDump = isPGDumpEnabled serverCtx
enableConfig = isConfigEnabled serverCtx
enableGraphQL = isGraphQLEnabled serverCtx
enableMetadata = isMetadataEnabled serverCtx
enablePGDump = isPGDumpEnabled serverCtx
enableConfig = isConfigEnabled serverCtx
serveApiConsole = do serveApiConsole = do
-- redirect / to /console -- redirect / to /console

View File

@ -1,4 +1,4 @@
{-# LANGUAGE RecordWildCards #-} {-# LANGUAGE DerivingStrategies #-}
module Hasura.Server.Auth module Hasura.Server.Auth
( getUserInfo ( getUserInfo
@ -24,22 +24,24 @@ module Hasura.Server.Auth
, getUserInfoWithExpTime_ , getUserInfoWithExpTime_
) where ) where
import Control.Concurrent.Extended (forkImmortal) import qualified Control.Concurrent.Async.Lifted.Safe as LA
import Data.IORef (newIORef) import Control.Concurrent.Extended (forkImmortal)
import Data.Time.Clock (UTCTime) import Control.Monad.Trans.Control (MonadBaseControl)
import Hasura.Server.Version (HasVersion) import Data.IORef (newIORef)
import Data.Time.Clock (UTCTime)
import Hasura.Server.Version (HasVersion)
import qualified Crypto.Hash as Crypto import qualified Crypto.Hash as Crypto
import qualified Data.Text as T import qualified Data.Text as T
import qualified Data.Text.Encoding as T import qualified Data.Text.Encoding as T
import qualified Network.HTTP.Client as H import qualified Network.HTTP.Client as H
import qualified Network.HTTP.Types as N import qualified Network.HTTP.Types as N
import Hasura.Logging import Hasura.Logging
import Hasura.Prelude import Hasura.Prelude
import Hasura.RQL.Types import Hasura.RQL.Types
import Hasura.Server.Auth.JWT hiding (processJwt_) import Hasura.Server.Auth.JWT hiding (processJwt_)
import Hasura.Server.Auth.WebHook import Hasura.Server.Auth.WebHook
import Hasura.Server.Utils import Hasura.Server.Utils
import Hasura.Session import Hasura.Session
@ -63,10 +65,10 @@ class (Monad m) => UserAuthentication m where
-- --
-- Although this exists only in memory we store only a hash of the admin secret -- Although this exists only in memory we store only a hash of the admin secret
-- primarily in order to: -- primarily in order to:
-- -- --
-- -- - prevent theoretical timing attacks from a naive `==` check -- - prevent theoretical timing attacks from a naive `==` check
-- -- - prevent misuse or inadvertent leaking of the secret -- - prevent misuse or inadvertent leaking of the secret
-- -- --
newtype AdminSecretHash = AdminSecretHash (Crypto.Digest Crypto.SHA512) newtype AdminSecretHash = AdminSecretHash (Crypto.Digest Crypto.SHA512)
deriving (Ord, Eq) deriving (Ord, Eq)
@ -99,7 +101,8 @@ data AuthMode
setupAuthMode setupAuthMode
:: ( HasVersion :: ( HasVersion
, MonadIO m , MonadIO m
, MonadError T.Text m , MonadBaseControl IO m
, LA.Forall (LA.Pure m)
) )
=> Maybe AdminSecretHash => Maybe AdminSecretHash
-> Maybe AuthHook -> Maybe AuthHook
@ -107,7 +110,7 @@ setupAuthMode
-> Maybe RoleName -> Maybe RoleName
-> H.Manager -> H.Manager
-> Logger Hasura -> Logger Hasura
-> m AuthMode -> ExceptT Text m AuthMode
setupAuthMode mAdminSecretHash mWebHook mJwtSecret mUnAuthRole httpManager logger = setupAuthMode mAdminSecretHash mWebHook mJwtSecret mUnAuthRole httpManager logger =
case (mAdminSecretHash, mWebHook, mJwtSecret) of case (mAdminSecretHash, mWebHook, mJwtSecret) of
(Just hash, Nothing, Nothing) -> return $ AMAdminSecret hash mUnAuthRole (Just hash, Nothing, Nothing) -> return $ AMAdminSecret hash mUnAuthRole
@ -139,7 +142,15 @@ setupAuthMode mAdminSecretHash mWebHook mJwtSecret mUnAuthRole httpManager logge
-- | Given the 'JWTConfig' (the user input of JWT configuration), create -- | Given the 'JWTConfig' (the user input of JWT configuration), create
-- the 'JWTCtx' (the runtime JWT config used) -- the 'JWTCtx' (the runtime JWT config used)
mkJwtCtx :: (HasVersion, MonadIO m, MonadError T.Text m) => JWTConfig -> m JWTCtx -- mkJwtCtx :: HasVersion => JWTConfig -> m JWTCtx
mkJwtCtx
:: ( HasVersion
, MonadIO m
, MonadBaseControl IO m
, LA.Forall (LA.Pure m)
)
=> JWTConfig
-> ExceptT T.Text m JWTCtx
mkJwtCtx JWTConfig{..} = do mkJwtCtx JWTConfig{..} = do
jwkRef <- case jcKeyOrUrl of jwkRef <- case jcKeyOrUrl of
Left jwk -> liftIO $ newIORef (JWKSet [jwk]) Left jwk -> liftIO $ newIORef (JWKSet [jwk])
@ -155,7 +166,7 @@ setupAuthMode mAdminSecretHash mWebHook mJwtSecret mUnAuthRole httpManager logge
case maybeExpiry of case maybeExpiry of
Nothing -> return ref Nothing -> return ref
Just time -> do Just time -> do
void $ liftIO $ forkImmortal "jwkRefreshCtrl" logger $ void . lift $ forkImmortal "jwkRefreshCtrl" logger $
jwkRefreshCtrl logger httpManager url ref (convertDuration time) jwkRefreshCtrl logger httpManager url ref (convertDuration time)
return ref return ref
@ -171,7 +182,7 @@ setupAuthMode mAdminSecretHash mWebHook mJwtSecret mUnAuthRole httpManager logge
JFEExpiryParseError _ _ -> return Nothing JFEExpiryParseError _ _ -> return Nothing
getUserInfo getUserInfo
:: (HasVersion, MonadIO m, MonadError QErr m) :: (HasVersion, MonadIO m, MonadBaseControl IO m, MonadError QErr m)
=> Logger Hasura => Logger Hasura
-> H.Manager -> H.Manager
-> [N.Header] -> [N.Header]
@ -181,7 +192,7 @@ getUserInfo l m r a = fst <$> getUserInfoWithExpTime l m r a
-- | Authenticate the request using the headers and the configured 'AuthMode'. -- | Authenticate the request using the headers and the configured 'AuthMode'.
getUserInfoWithExpTime getUserInfoWithExpTime
:: forall m. (HasVersion, MonadIO m, MonadError QErr m) :: forall m. (HasVersion, MonadIO m, MonadBaseControl IO m, MonadError QErr m)
=> Logger Hasura => Logger Hasura
-> H.Manager -> H.Manager
-> [N.Header] -> [N.Header]

View File

@ -17,8 +17,9 @@ module Hasura.Server.Auth.JWT
, defaultRoleClaim , defaultRoleClaim
) where ) where
import Control.Exception (try) import Control.Exception.Lifted (try)
import Control.Lens import Control.Lens
import Control.Monad.Trans.Control (MonadBaseControl)
import Control.Monad.Trans.Maybe import Control.Monad.Trans.Maybe
import Data.IORef (IORef, readIORef, writeIORef) import Data.IORef (IORef, readIORef, writeIORef)
@ -126,21 +127,22 @@ defaultClaimNs = "https://hasura.io/jwt/claims"
-- | An action that refreshes the JWK at intervals in an infinite loop. -- | An action that refreshes the JWK at intervals in an infinite loop.
jwkRefreshCtrl jwkRefreshCtrl
:: HasVersion :: (HasVersion, MonadIO m, MonadBaseControl IO m)
=> Logger Hasura => Logger Hasura
-> HTTP.Manager -> HTTP.Manager
-> URI -> URI
-> IORef Jose.JWKSet -> IORef Jose.JWKSet
-> DiffTime -> DiffTime
-> IO void -> m void
jwkRefreshCtrl logger manager url ref time = liftIO $ do jwkRefreshCtrl logger manager url ref time = do
C.sleep time liftIO $ C.sleep time
forever $ do forever do
res <- runExceptT $ updateJwkRef logger manager url ref res <- runExceptT $ updateJwkRef logger manager url ref
mTime <- either (const $ logNotice >> return Nothing) return res mTime <- either (const $ logNotice >> return Nothing) return res
-- if can't parse time from header, defaults to 1 min -- if can't parse time from header, defaults to 1 min
-- let delay = maybe (minutes 1) fromUnits mTime
let delay = maybe (minutes 1) (convertDuration) mTime let delay = maybe (minutes 1) (convertDuration) mTime
C.sleep delay liftIO $ C.sleep delay
where where
logNotice = do logNotice = do
let err = JwkRefreshLog LevelInfo (Just "retrying again in 60 secs") Nothing let err = JwkRefreshLog LevelInfo (Just "retrying again in 60 secs") Nothing
@ -150,6 +152,7 @@ jwkRefreshCtrl logger manager url ref time = liftIO $ do
updateJwkRef updateJwkRef
:: ( HasVersion :: ( HasVersion
, MonadIO m , MonadIO m
, MonadBaseControl IO m
, MonadError JwkFetchError m , MonadError JwkFetchError m
) )
=> Logger Hasura => Logger Hasura
@ -158,11 +161,13 @@ updateJwkRef
-> IORef Jose.JWKSet -> IORef Jose.JWKSet
-> m (Maybe NominalDiffTime) -> m (Maybe NominalDiffTime)
updateJwkRef (Logger logger) manager url jwkRef = do updateJwkRef (Logger logger) manager url jwkRef = do
let options = wreqOptions manager [] let urlT = T.pack $ show url
urlT = T.pack $ show url
infoMsg = "refreshing JWK from endpoint: " <> urlT infoMsg = "refreshing JWK from endpoint: " <> urlT
liftIO $ logger $ JwkRefreshLog LevelInfo (Just infoMsg) Nothing liftIO $ logger $ JwkRefreshLog LevelInfo (Just infoMsg) Nothing
res <- liftIO $ try $ Wreq.getWith options $ show url res <- try do
initReq <- liftIO $ HTTP.parseRequest $ show url
let req = initReq { HTTP.requestHeaders = addDefaultHeaders (HTTP.requestHeaders initReq) }
liftIO $ HTTP.httpLbs req manager
resp <- either logAndThrowHttp return res resp <- either logAndThrowHttp return res
let status = resp ^. Wreq.responseStatus let status = resp ^. Wreq.responseStatus
respBody = resp ^. Wreq.responseBody respBody = resp ^. Wreq.responseBody
@ -311,9 +316,9 @@ processAuthZHeader jwtCtx@JWTCtx{jcxClaimNs, jcxClaimsFormat} authzHeader = do
ClaimNsPath path -> parseIValueJsonValue $ executeJSONPath path (J.toJSON $ claims ^. Jose.unregisteredClaims) ClaimNsPath path -> parseIValueJsonValue $ executeJSONPath path (J.toJSON $ claims ^. Jose.unregisteredClaims)
hasuraClaimsV <- maybe claimsNotFound return mHasuraClaims hasuraClaimsV <- maybe claimsNotFound return mHasuraClaims
-- return hasura claims value as an object. parse from string possibly -- return hasura claims value as an object. parse from string possibly
(, expTimeM) <$> parseObjectFromString hasuraClaimsV (, expTimeM) <$> parseObjectFromString hasuraClaimsV
where where
parseAuthzHeader = do parseAuthzHeader = do
let tokenParts = BLC.words authzHeader let tokenParts = BLC.words authzHeader

View File

@ -5,13 +5,12 @@ module Hasura.Server.Auth.JWT.Logging
where where
import Data.Aeson import Data.Aeson
import Network.URI (URI)
import Hasura.HTTP import Hasura.HTTP
import Hasura.Logging (EngineLogType (..), Hasura, InternalLogTypes (..), import Hasura.Logging (EngineLogType (..), Hasura, InternalLogTypes (..),
LogLevel (..), ToEngineLog (..)) LogLevel (..), ToEngineLog (..))
import Hasura.Prelude import Hasura.Prelude
import Hasura.Server.Logging () import Hasura.Server.Logging ()
import Network.URI (URI)
import qualified Data.ByteString.Lazy as BL import qualified Data.ByteString.Lazy as BL
import qualified Data.Text as T import qualified Data.Text as T

View File

@ -5,20 +5,21 @@ module Hasura.Server.Auth.WebHook
, userInfoFromAuthHook , userInfoFromAuthHook
) where ) where
import Control.Exception (try) import Control.Exception.Lifted (try)
import Control.Lens import Control.Lens
import Control.Monad.Trans.Control (MonadBaseControl)
import Control.Monad.Trans.Maybe import Control.Monad.Trans.Maybe
import Data.Aeson import Data.Aeson
import Data.Time.Clock (UTCTime, addUTCTime, getCurrentTime) import Data.Time.Clock (UTCTime, addUTCTime, getCurrentTime)
import Hasura.Server.Version (HasVersion) import Hasura.Server.Version (HasVersion)
import qualified Data.Aeson as J import qualified Data.Aeson as J
import qualified Data.ByteString.Lazy as BL import qualified Data.ByteString.Lazy as BL
import qualified Data.HashMap.Strict as Map import qualified Data.HashMap.Strict as Map
import qualified Data.Text as T import qualified Data.Text as T
import qualified Network.HTTP.Client as H import qualified Network.HTTP.Client as H
import qualified Network.HTTP.Types as N import qualified Network.HTTP.Types as N
import qualified Network.Wreq as Wreq import qualified Network.Wreq as Wreq
import Data.Parser.CacheControl import Data.Parser.CacheControl
import Data.Parser.Expires import Data.Parser.Expires
@ -58,31 +59,38 @@ hookMethod authHook = case ahType authHook of
-- UserInfo parsed from the response, plus an expiration time if one -- UserInfo parsed from the response, plus an expiration time if one
-- was returned. -- was returned.
userInfoFromAuthHook userInfoFromAuthHook
:: (HasVersion, MonadIO m, MonadError QErr m) :: forall m
. (HasVersion, MonadIO m, MonadBaseControl IO m, MonadError QErr m)
=> Logger Hasura => Logger Hasura
-> H.Manager -> H.Manager
-> AuthHook -> AuthHook
-> [N.Header] -> [N.Header]
-> m (UserInfo, Maybe UTCTime) -> m (UserInfo, Maybe UTCTime)
userInfoFromAuthHook logger manager hook reqHeaders = do userInfoFromAuthHook logger manager hook reqHeaders = do
resp <- (`onLeft` logAndThrow) =<< liftIO (try performHTTPRequest) resp <- (`onLeft` logAndThrow) =<< try performHTTPRequest
let status = resp ^. Wreq.responseStatus let status = resp ^. Wreq.responseStatus
respBody = resp ^. Wreq.responseBody respBody = resp ^. Wreq.responseBody
mkUserInfoFromResp logger (ahUrl hook) (hookMethod hook) status respBody mkUserInfoFromResp logger (ahUrl hook) (hookMethod hook) status respBody
where where
performHTTPRequest :: m (Wreq.Response BL.ByteString)
performHTTPRequest = do performHTTPRequest = do
let url = T.unpack $ ahUrl hook let url = T.unpack $ ahUrl hook
mkOptions = wreqOptions manager req <- liftIO $ H.parseRequest url
case ahType hook of liftIO do
AHTGet -> do case ahType hook of
let isCommonHeader = (`elem` commonClientHeadersIgnored) AHTGet -> do
filteredHeaders = filter (not . isCommonHeader . fst) reqHeaders let isCommonHeader = (`elem` commonClientHeadersIgnored)
Wreq.getWith (mkOptions filteredHeaders) url filteredHeaders = filter (not . isCommonHeader . fst) reqHeaders
AHTPost -> do H.httpLbs (req { H.requestHeaders = addDefaultHeaders filteredHeaders }) manager
let contentType = ("Content-Type", "application/json") AHTPost -> do
headersPayload = J.toJSON $ Map.fromList $ hdrsToText reqHeaders let contentType = ("Content-Type", "application/json")
Wreq.postWith (mkOptions [contentType]) url $ object ["headers" J..= headersPayload] headersPayload = J.toJSON $ Map.fromList $ hdrsToText reqHeaders
H.httpLbs (req { H.method = "POST"
, H.requestHeaders = addDefaultHeaders [contentType]
, H.requestBody = H.RequestBodyLBS . J.encode $ object ["headers" J..= headersPayload]
}) manager
logAndThrow :: H.HttpException -> m a
logAndThrow err = do logAndThrow err = do
unLogger logger $ unLogger logger $
WebHookLog LevelError Nothing (ahUrl hook) (hookMethod hook) WebHookLog LevelError Nothing (ahUrl hook) (hookMethod hook)

View File

@ -17,6 +17,7 @@ import Hasura.Prelude
import Hasura.Server.Utils (fmapL) import Hasura.Server.Utils (fmapL)
import Control.Applicative (optional) import Control.Applicative (optional)
import Data.Aeson
import qualified Data.Aeson as J import qualified Data.Aeson as J
import qualified Data.Aeson.Casing as J import qualified Data.Aeson.Casing as J
@ -33,7 +34,7 @@ data DomainParts =
, wdPort :: !(Maybe Int) , wdPort :: !(Maybe Int)
} deriving (Show, Eq, Generic, Hashable) } deriving (Show, Eq, Generic, Hashable)
$(J.deriveToJSON (J.aesonDrop 2 J.snakeCase) ''DomainParts) $(J.deriveJSON (J.aesonDrop 2 J.snakeCase) ''DomainParts)
data Domains data Domains
= Domains = Domains
@ -41,7 +42,7 @@ data Domains
, dmWildcards :: !(Set.HashSet DomainParts) , dmWildcards :: !(Set.HashSet DomainParts)
} deriving (Show, Eq) } deriving (Show, Eq)
$(J.deriveToJSON (J.aesonDrop 2 J.snakeCase) ''Domains) $(J.deriveJSON (J.aesonDrop 2 J.snakeCase) ''Domains)
data CorsConfig data CorsConfig
= CCAllowAll = CCAllowAll
@ -62,6 +63,16 @@ instance J.ToJSON CorsConfig where
, "allowed_origins" J..= origs , "allowed_origins" J..= origs
] ]
instance J.FromJSON CorsConfig where
parseJSON = J.withObject "cors config" \o -> do
let parseAllowAll "*" = pure CCAllowAll
parseAllowAll _ = fail "unexpected string"
o .: "disabled" >>= \case
True -> CCDisabled <$> o .: "ws_read_cookie"
False -> o .: "allowed_origins" >>= \v ->
J.withText "origins" parseAllowAll v
<|> CCAllowedOrigins <$> J.parseJSON v
isCorsDisabled :: CorsConfig -> Bool isCorsDisabled :: CorsConfig -> Bool
isCorsDisabled = \case isCorsDisabled = \case
CCDisabled _ -> True CCDisabled _ -> True

View File

@ -165,11 +165,16 @@ mkServeOptions rso = do
| adminInternalErrors -> InternalErrorsAdminOnly | adminInternalErrors -> InternalErrorsAdminOnly
| otherwise -> InternalErrorsDisabled | otherwise -> InternalErrorsDisabled
eventsHttpPoolSize <- withEnv (rsoEventsHttpPoolSize rso) (fst eventsHttpPoolSizeEnv)
eventsFetchInterval <- withEnv (rsoEventsFetchInterval rso) (fst eventsFetchIntervalEnv)
logHeadersFromEnv <- withEnvBool (rsoLogHeadersFromEnv rso) (fst logHeadersFromEnvEnv)
return $ ServeOptions port host connParams txIso adminScrt authHook jwtSecret return $ ServeOptions port host connParams txIso adminScrt authHook jwtSecret
unAuthRole corsCfg enableConsole consoleAssetsDir unAuthRole corsCfg enableConsole consoleAssetsDir
enableTelemetry strfyNum enabledAPIs lqOpts enableAL enableTelemetry strfyNum enabledAPIs lqOpts enableAL
enabledLogs serverLogLevel planCacheOptions enabledLogs serverLogLevel planCacheOptions
internalErrorsConfig internalErrorsConfig eventsHttpPoolSize eventsFetchInterval
logHeadersFromEnv
where where
#ifdef DeveloperAPIs #ifdef DeveloperAPIs
defaultAPIs = [METADATA,GRAPHQL,PGDUMP,CONFIG,DEVELOPER] defaultAPIs = [METADATA,GRAPHQL,PGDUMP,CONFIG,DEVELOPER]
@ -218,7 +223,6 @@ mkServeOptions rso = do
mxBatchSizeM <- withEnv (rsoMxBatchSize rso) $ fst mxBatchSizeEnv mxBatchSizeM <- withEnv (rsoMxBatchSize rso) $ fst mxBatchSizeEnv
return $ LQ.mkLiveQueriesOptions mxBatchSizeM mxRefetchIntM return $ LQ.mkLiveQueriesOptions mxBatchSizeM mxRefetchIntM
mkExamplesDoc :: [[String]] -> PP.Doc mkExamplesDoc :: [[String]] -> PP.Doc
mkExamplesDoc exampleLines = mkExamplesDoc exampleLines =
PP.text "Examples: " PP.<$> PP.indent 2 (PP.vsep examples) PP.text "Examples: " PP.<$> PP.indent 2 (PP.vsep examples)
@ -312,15 +316,25 @@ serveCmdFooter =
, adminInternalErrorsEnv , adminInternalErrorsEnv
] ]
eventEnvs = eventEnvs = [ eventsHttpPoolSizeEnv, eventsFetchIntervalEnv ]
[ ( "HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE"
, "Max event threads" eventsHttpPoolSizeEnv :: (String, String)
) eventsHttpPoolSizeEnv =
, ( "HASURA_GRAPHQL_EVENTS_FETCH_INTERVAL" ( "HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE"
, "Interval in milliseconds to sleep before trying to fetch events again after a " , "Max event threads"
<> "fetch returned no events from postgres." )
)
] eventsFetchIntervalEnv :: (String, String)
eventsFetchIntervalEnv =
( "HASURA_GRAPHQL_EVENTS_FETCH_INTERVAL"
, "Interval in milliseconds to sleep before trying to fetch events again after a fetch returned no events from postgres."
)
logHeadersFromEnvEnv :: (String, String)
logHeadersFromEnvEnv =
( "HASURA_GRAPHQL_LOG_HEADERS_FROM_ENV"
, "Log headers sent instead of logging referenced environment variables."
)
retriesNumEnv :: (String, String) retriesNumEnv :: (String, String)
retriesNumEnv = retriesNumEnv =
@ -785,6 +799,28 @@ parseGraphqlAdminInternalErrors = optional $
help (snd adminInternalErrorsEnv) help (snd adminInternalErrorsEnv)
) )
parseGraphqlEventsHttpPoolSize :: Parser (Maybe Int)
parseGraphqlEventsHttpPoolSize = optional $
option (eitherReader fromEnv)
( long "events-http-pool-size" <>
metavar (fst eventsHttpPoolSizeEnv) <>
help (snd eventsHttpPoolSizeEnv)
)
parseGraphqlEventsFetchInterval :: Parser (Maybe Milliseconds)
parseGraphqlEventsFetchInterval = optional $
option (eitherReader readEither)
( long "events-fetch-interval" <>
metavar (fst eventsFetchIntervalEnv) <>
help (snd eventsFetchIntervalEnv)
)
parseLogHeadersFromEnv :: Parser Bool
parseLogHeadersFromEnv =
switch ( long "log-headers-from-env" <>
help (snd devModeEnv)
)
mxRefetchDelayEnv :: (String, String) mxRefetchDelayEnv :: (String, String)
mxRefetchDelayEnv = mxRefetchDelayEnv =
( "HASURA_GRAPHQL_LIVE_QUERIES_MULTIPLEXED_REFETCH_INTERVAL" ( "HASURA_GRAPHQL_LIVE_QUERIES_MULTIPLEXED_REFETCH_INTERVAL"
@ -929,6 +965,9 @@ serveOptionsParser =
<*> parsePlanCacheSize <*> parsePlanCacheSize
<*> parseGraphqlDevMode <*> parseGraphqlDevMode
<*> parseGraphqlAdminInternalErrors <*> parseGraphqlAdminInternalErrors
<*> parseGraphqlEventsHttpPoolSize
<*> parseGraphqlEventsFetchInterval
<*> parseLogHeadersFromEnv
-- | This implements the mapping between application versions -- | This implements the mapping between application versions
-- and catalog schema versions. -- and catalog schema versions.

View File

@ -2,6 +2,7 @@
module Hasura.Server.Init.Config where module Hasura.Server.Init.Config where
import qualified Data.Aeson as J import qualified Data.Aeson as J
import qualified Data.Aeson.Casing as J
import qualified Data.Aeson.TH as J import qualified Data.Aeson.TH as J
import qualified Data.HashSet as Set import qualified Data.HashSet as Set
import qualified Data.String as DataString import qualified Data.String as DataString
@ -60,6 +61,9 @@ data RawServeOptions impl
, rsoPlanCacheSize :: !(Maybe Cache.CacheSize) , rsoPlanCacheSize :: !(Maybe Cache.CacheSize)
, rsoDevMode :: !Bool , rsoDevMode :: !Bool
, rsoAdminInternalErrors :: !(Maybe Bool) , rsoAdminInternalErrors :: !(Maybe Bool)
, rsoEventsHttpPoolSize :: !(Maybe Int)
, rsoEventsFetchInterval :: !(Maybe Milliseconds)
, rsoLogHeadersFromEnv :: !Bool
} }
-- | @'ResponseInternalErrorsConfig' represents the encoding of the internal -- | @'ResponseInternalErrorsConfig' represents the encoding of the internal
@ -99,6 +103,9 @@ data ServeOptions impl
, soLogLevel :: !L.LogLevel , soLogLevel :: !L.LogLevel
, soPlanCacheOptions :: !E.PlanCacheOptions , soPlanCacheOptions :: !E.PlanCacheOptions
, soResponseInternalErrorsConfig :: !ResponseInternalErrorsConfig , soResponseInternalErrorsConfig :: !ResponseInternalErrorsConfig
, soEventsHttpPoolSize :: !(Maybe Int)
, soEventsFetchInterval :: !(Maybe Milliseconds)
, soLogHeadersFromEnv :: !Bool
} }
data DowngradeOptions data DowngradeOptions
@ -135,11 +142,14 @@ data API
| DEVELOPER | DEVELOPER
| CONFIG | CONFIG
deriving (Show, Eq, Read, Generic) deriving (Show, Eq, Read, Generic)
$(J.deriveJSON (J.defaultOptions { J.constructorTagModifier = map toLower }) $(J.deriveJSON (J.defaultOptions { J.constructorTagModifier = map toLower })
''API) ''API)
instance Hashable API instance Hashable API
$(J.deriveJSON (J.aesonDrop 4 J.camelCase){J.omitNothingFields=True} ''RawConnInfo)
type HGECommand impl = HGECommandG (ServeOptions impl) type HGECommand impl = HGECommandG (ServeOptions impl)
type RawHGECommand impl = HGECommandG (RawServeOptions impl) type RawHGECommand impl = HGECommandG (RawServeOptions impl)
@ -252,6 +262,9 @@ instance FromEnv LQ.BatchSize where
instance FromEnv LQ.RefetchInterval where instance FromEnv LQ.RefetchInterval where
fromEnv = fmap (LQ.RefetchInterval . milliseconds . fromInteger) . readEither fromEnv = fmap (LQ.RefetchInterval . milliseconds . fromInteger) . readEither
instance FromEnv Milliseconds where
fromEnv = fmap fromInteger . readEither
instance FromEnv JWTConfig where instance FromEnv JWTConfig where
fromEnv = readJson fromEnv = readJson

View File

@ -153,7 +153,6 @@ class (Monad m) => HttpLog m where
-- ^ list of request headers -- ^ list of request headers
-> m () -> m ()
-- | Log information about the HTTP request -- | Log information about the HTTP request
data HttpInfoLog data HttpInfoLog
= HttpInfoLog = HttpInfoLog

View File

@ -28,6 +28,7 @@ import Hasura.Prelude
import qualified Data.Aeson as A import qualified Data.Aeson as A
import qualified Data.HashMap.Strict as HM import qualified Data.HashMap.Strict as HM
import qualified Data.Text as T import qualified Data.Text as T
import qualified Data.Environment as Env
import qualified Data.Text.IO as TIO import qualified Data.Text.IO as TIO
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
import qualified Database.PG.Query.Connection as Q import qualified Database.PG.Query.Connection as Q
@ -92,9 +93,10 @@ migrateCatalog
, HasHttpManager m , HasHttpManager m
, HasSQLGenCtx m , HasSQLGenCtx m
) )
=> UTCTime => Env.Environment
-> UTCTime
-> m (MigrationResult, RebuildableSchemaCache m) -> m (MigrationResult, RebuildableSchemaCache m)
migrateCatalog migrationTime = do migrateCatalog env migrationTime = do
doesSchemaExist (SchemaName "hdb_catalog") >>= \case doesSchemaExist (SchemaName "hdb_catalog") >>= \case
False -> initialize True False -> initialize True
True -> doesTableExist (SchemaName "hdb_catalog") (TableName "hdb_version") >>= \case True -> doesTableExist (SchemaName "hdb_catalog") (TableName "hdb_version") >>= \case
@ -144,7 +146,7 @@ migrateCatalog migrationTime = do
migrateFrom :: T.Text -> m (MigrationResult, RebuildableSchemaCache m) migrateFrom :: T.Text -> m (MigrationResult, RebuildableSchemaCache m)
migrateFrom previousVersion migrateFrom previousVersion
| previousVersion == latestCatalogVersionString = do | previousVersion == latestCatalogVersionString = do
schemaCache <- buildRebuildableSchemaCache schemaCache <- buildRebuildableSchemaCache env
pure (MRNothingToDo, schemaCache) pure (MRNothingToDo, schemaCache)
| [] <- neededMigrations = | [] <- neededMigrations =
throw400 NotSupported $ throw400 NotSupported $
@ -163,7 +165,7 @@ migrateCatalog migrationTime = do
buildCacheAndRecreateSystemMetadata :: m (RebuildableSchemaCache m) buildCacheAndRecreateSystemMetadata :: m (RebuildableSchemaCache m)
buildCacheAndRecreateSystemMetadata = do buildCacheAndRecreateSystemMetadata = do
schemaCache <- buildRebuildableSchemaCache schemaCache <- buildRebuildableSchemaCache env
view _2 <$> runCacheRWT schemaCache recreateSystemMetadata view _2 <$> runCacheRWT schemaCache recreateSystemMetadata
doesSchemaExist schemaName = doesSchemaExist schemaName =

View File

@ -45,7 +45,7 @@ import qualified Database.PG.Query as Q
import qualified Network.HTTP.Types as HTTP import qualified Network.HTTP.Types as HTTP
newtype RoleName newtype RoleName
= RoleName {getRoleTxt :: NonEmptyText} = RoleName { getRoleTxt :: NonEmptyText }
deriving ( Show, Eq, Ord, Hashable, FromJSONKey, ToJSONKey, FromJSON deriving ( Show, Eq, Ord, Hashable, FromJSONKey, ToJSONKey, FromJSON
, ToJSON, Q.FromCol, Q.ToPrepArg, Lift, Generic, Arbitrary, NFData, Cacheable ) , ToJSON, Q.FromCol, Q.ToPrepArg, Lift, Generic, Arbitrary, NFData, Cacheable )

View File

@ -14,6 +14,7 @@ import Test.Hspec.Core.Spec
import Test.Hspec.Expectations.Lifted import Test.Hspec.Expectations.Lifted
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
import qualified Data.Environment as Env
import Hasura.RQL.DDL.Metadata (ClearMetadata (..), runClearMetadata) import Hasura.RQL.DDL.Metadata (ClearMetadata (..), runClearMetadata)
import Hasura.RQL.DDL.Schema import Hasura.RQL.DDL.Schema
@ -24,11 +25,6 @@ import Hasura.Server.Migrate
import Hasura.Server.Version (HasVersion) import Hasura.Server.Version (HasVersion)
-- -- NOTE: downgrade test disabled for now (see #5273) -- -- NOTE: downgrade test disabled for now (see #5273)
-- import Data.List.Split (splitOn)
-- import Data.List (isPrefixOf, stripPrefix)
-- import System.Process (readProcess)
-- import qualified Safe
-- import Hasura.Server.Init (downgradeShortcuts)
newtype CacheRefT m a newtype CacheRefT m a
= CacheRefT { runCacheRefT :: MVar (RebuildableSchemaCache m) -> m a } = CacheRefT { runCacheRefT :: MVar (RebuildableSchemaCache m) -> m a }
@ -69,33 +65,37 @@ spec
) )
=> Q.ConnInfo -> SpecWithCache m => Q.ConnInfo -> SpecWithCache m
spec pgConnInfo = do spec pgConnInfo = do
let dropAndInit time = CacheRefT $ flip modifyMVar \_ -> let dropAndInit env time = CacheRefT $ flip modifyMVar \_ ->
dropCatalog *> (swap <$> migrateCatalog time) dropCatalog *> (swap <$> migrateCatalog env time)
describe "migrateCatalog" $ do describe "migrateCatalog" $ do
it "initializes the catalog" $ singleTransaction do it "initializes the catalog" $ singleTransaction do
(dropAndInit =<< liftIO getCurrentTime) `shouldReturn` MRInitialized env <- liftIO Env.getEnvironment
time <- liftIO getCurrentTime
(dropAndInit env time) `shouldReturn` MRInitialized
it "is idempotent" \(NT transact) -> do it "is idempotent" \(NT transact) -> do
let dumpSchema = execPGDump (PGDumpReqBody ["--schema-only"] (Just False)) pgConnInfo let dumpSchema = execPGDump (PGDumpReqBody ["--schema-only"] (Just False)) pgConnInfo
env <- Env.getEnvironment
time <- getCurrentTime time <- getCurrentTime
transact (dropAndInit time) `shouldReturn` MRInitialized transact (dropAndInit env time) `shouldReturn` MRInitialized
firstDump <- transact dumpSchema firstDump <- transact dumpSchema
transact (dropAndInit time) `shouldReturn` MRInitialized transact (dropAndInit env time) `shouldReturn` MRInitialized
secondDump <- transact dumpSchema secondDump <- transact dumpSchema
secondDump `shouldBe` firstDump secondDump `shouldBe` firstDump
it "supports upgrades after downgrade to version 12" \(NT transact) -> do it "supports upgrades after downgrade to version 12" \(NT transact) -> do
let downgradeTo v = downgradeCatalog DowngradeOptions{ dgoDryRun = False, dgoTargetVersion = v } let downgradeTo v = downgradeCatalog DowngradeOptions{ dgoDryRun = False, dgoTargetVersion = v }
upgradeToLatest time = CacheRefT $ flip modifyMVar \_ -> upgradeToLatest env time = CacheRefT $ flip modifyMVar \_ ->
swap <$> migrateCatalog time swap <$> migrateCatalog env time
env <- Env.getEnvironment
time <- getCurrentTime time <- getCurrentTime
transact (dropAndInit time) `shouldReturn` MRInitialized transact (dropAndInit env time) `shouldReturn` MRInitialized
downgradeResult <- (transact . lift) (downgradeTo "12" time) downgradeResult <- (transact . lift) (downgradeTo "12" time)
downgradeResult `shouldSatisfy` \case downgradeResult `shouldSatisfy` \case
MRMigrated{} -> True MRMigrated{} -> True
_ -> False _ -> False
transact (upgradeToLatest time) `shouldReturn` MRMigrated "12" transact (upgradeToLatest env time) `shouldReturn` MRMigrated "12"
-- -- NOTE: this has been problematic in CI and we're not quite sure how to -- -- NOTE: this has been problematic in CI and we're not quite sure how to
-- -- make this work reliably given the way we do releases and create -- -- make this work reliably given the way we do releases and create
@ -114,14 +114,18 @@ spec pgConnInfo = do
let dumpMetadata = execPGDump (PGDumpReqBody ["--schema=hdb_catalog"] (Just False)) pgConnInfo let dumpMetadata = execPGDump (PGDumpReqBody ["--schema=hdb_catalog"] (Just False)) pgConnInfo
it "is idempotent" \(NT transact) -> do it "is idempotent" \(NT transact) -> do
(transact . dropAndInit =<< getCurrentTime) `shouldReturn` MRInitialized env <- Env.getEnvironment
time <- getCurrentTime
(transact $ dropAndInit env time) `shouldReturn` MRInitialized
firstDump <- transact dumpMetadata firstDump <- transact dumpMetadata
transact recreateSystemMetadata transact recreateSystemMetadata
secondDump <- transact dumpMetadata secondDump <- transact dumpMetadata
secondDump `shouldBe` firstDump secondDump `shouldBe` firstDump
it "does not create any objects affected by ClearMetadata" \(NT transact) -> do it "does not create any objects affected by ClearMetadata" \(NT transact) -> do
(transact . dropAndInit =<< getCurrentTime) `shouldReturn` MRInitialized env <- Env.getEnvironment
time <- getCurrentTime
(transact $ dropAndInit env time) `shouldReturn` MRInitialized
firstDump <- transact dumpMetadata firstDump <- transact dumpMetadata
transact (runClearMetadata ClearMetadata) `shouldReturn` successMsg transact (runClearMetadata ClearMetadata) `shouldReturn` successMsg
secondDump <- transact dumpMetadata secondDump <- transact dumpMetadata

View File

@ -12,6 +12,7 @@ import Test.Hspec
import qualified Data.Aeson as A import qualified Data.Aeson as A
import qualified Data.ByteString.Lazy.Char8 as BL import qualified Data.ByteString.Lazy.Char8 as BL
import qualified Data.Environment as Env
import qualified Database.PG.Query as Q import qualified Database.PG.Query as Q
import qualified Network.HTTP.Client as HTTP import qualified Network.HTTP.Client as HTTP
import qualified Network.HTTP.Client.TLS as HTTP import qualified Network.HTTP.Client.TLS as HTTP
@ -34,7 +35,6 @@ import qualified Hasura.IncrementalSpec as IncrementalSpec
-- import qualified Hasura.RQL.MetadataSpec as MetadataSpec -- import qualified Hasura.RQL.MetadataSpec as MetadataSpec
import qualified Hasura.Server.MigrateSpec as MigrateSpec import qualified Hasura.Server.MigrateSpec as MigrateSpec
import qualified Hasura.Server.TelemetrySpec as TelemetrySpec import qualified Hasura.Server.TelemetrySpec as TelemetrySpec
import qualified Hasura.Server.AuthSpec as AuthSpec
data TestSuites data TestSuites
= AllSuites !RawConnInfo = AllSuites !RawConnInfo
@ -65,7 +65,6 @@ unitSpecs = do
-- describe "Hasura.RQL.Metadata" MetadataSpec.spec -- Commenting until optimizing the test in CI -- describe "Hasura.RQL.Metadata" MetadataSpec.spec -- Commenting until optimizing the test in CI
describe "Data.Time" TimeSpec.spec describe "Data.Time" TimeSpec.spec
describe "Hasura.Server.Telemetry" TelemetrySpec.spec describe "Hasura.Server.Telemetry" TelemetrySpec.spec
describe "Hasura.Server.Auth" AuthSpec.spec
buildPostgresSpecs :: (HasVersion) => RawConnInfo -> IO Spec buildPostgresSpecs :: (HasVersion) => RawConnInfo -> IO Spec
buildPostgresSpecs pgConnOptions = do buildPostgresSpecs pgConnOptions = do
@ -76,10 +75,9 @@ buildPostgresSpecs pgConnOptions = do
let setupCacheRef = do let setupCacheRef = do
pgPool <- Q.initPGPool pgConnInfo Q.defaultConnParams { Q.cpConns = 1 } print pgPool <- Q.initPGPool pgConnInfo Q.defaultConnParams { Q.cpConns = 1 } print
let pgContext = mkPGExecCtx Q.Serializable pgPool
httpManager <- HTTP.newManager HTTP.tlsManagerSettings httpManager <- HTTP.newManager HTTP.tlsManagerSettings
let runContext = RunCtx adminUserInfo httpManager (SQLGenCtx False) let runContext = RunCtx adminUserInfo httpManager (SQLGenCtx False)
pgContext = mkPGExecCtx Q.Serializable pgPool
runAsAdmin :: Run a -> IO a runAsAdmin :: Run a -> IO a
runAsAdmin = runAsAdmin =
@ -87,7 +85,7 @@ buildPostgresSpecs pgConnOptions = do
>>> runExceptT >>> runExceptT
>=> flip onLeft printErrJExit >=> flip onLeft printErrJExit
schemaCache <- snd <$> runAsAdmin (migrateCatalog =<< liftIO getCurrentTime) schemaCache <- snd <$> runAsAdmin (migrateCatalog (Env.mkEnvironment env) =<< liftIO getCurrentTime)
cacheRef <- newMVar schemaCache cacheRef <- newMVar schemaCache
pure $ NT (runAsAdmin . flip MigrateSpec.runCacheRefT cacheRef) pure $ NT (runAsAdmin . flip MigrateSpec.runCacheRefT cacheRef)