server: add a flag to specify header precedence when calling webhook in actions and input validations

PR-URL: https://github.com/hasura/graphql-engine-mono/pull/10594
GitOrigin-RevId: 84f58b7b957630e2fc527ece09c026bf07f3027a
This commit is contained in:
Krushan Bauva 2024-01-11 18:54:56 +05:30 committed by hasura-bot
parent 4c9189441b
commit 8b34100606
30 changed files with 216 additions and 65 deletions

View File

@ -250,6 +250,17 @@ X-Hasura-Role: admin
:::info Note
In case there are multiple headers with the same name, the order of precedence is: client headers \> resolved user
(`x-hasura-*`) variables \> configuration headers.
If you want to change the order of precedence to: configuration headers \> resolved user (`x-hasura-*`) variables
\> client headers, use the [configured header
precedence](deployment/graphql-engine-flags/reference.mdx/#configured-header-precedence) flag or environment variable.
:::
:::info Note
Before creating an action via the
[create_action Metadata API](/api-reference/metadata-api/actions.mdx#metadata-create-action), all custom types need to
be defined via the [set_custom_types](/api-reference/metadata-api/custom-types.mdx#metadata-set-custom-types) Metadata

View File

@ -235,6 +235,21 @@ that all clients reconnect to the latest metadata.
| **Default** | `true` |
| **Supported in** | CE, Enterprise Edition, Cloud |
### Configured Header Precedence
Whether the metadata configured headers are given higher precedence than client headers for
[Actions](actions/action-handlers.mdx/#add-a-header-to-your-action) and
[postgres input validations](schema/postgres/input-validations.mdx/#request).
| | |
| ------------------- | --------------------------------------------- |
| **Flag** | `--configured-header-precedence` |
| **Env var** | `HASURA_GRAPHQL_CONFIGURED_HEADER_PRECEDENCE` |
| **Accepted values** | Boolean |
| **Options** | `true` or `false` |
| **Default** | `false` |
| **Supported in** | CE, Enterprise Edition, Cloud |
### Connections per Read-Replica
The maximum number of Postgres connections per [read-replica](databases/database-config/read-replicas.mdx) that can be

View File

@ -234,6 +234,17 @@ The request payload is of the format:
`x-hasura-*`.
- `data.input`: The schema of `data.input` varies per mutation type. This schema is defined below.
:::info Note
In case there are multiple headers with the same name, the order of precedence is: client headers \> resolved user
(`x-hasura-*`) variables \> configuration headers.
If you want to change the order of precedence to: configuration headers \> resolved user (`x-hasura-*`) variables
\> client headers, use the [configured header
precedence](deployment/graphql-engine-flags/reference.mdx/#configured-header-precedence) flag or environment variable.
:::
#### Insert Mutations
```json

View File

@ -1,6 +1,7 @@
module Data.List.Extended
( duplicates,
uniques,
uniquesOn,
getDifference,
getDifferenceOn,
getOverlapWith,
@ -10,7 +11,7 @@ module Data.List.Extended
)
where
import Data.Containers.ListUtils (nubOrd)
import Data.Containers.ListUtils (nubOrd, nubOrdOn)
import Data.Function (on)
import Data.HashMap.Strict.Extended qualified as HashMap
import Data.HashSet qualified as Set
@ -30,6 +31,13 @@ duplicates =
uniques :: (Ord a) => [a] -> [a]
uniques = nubOrd
--- | Remove duplicates from a list not based on the original datatype, but
--- on a user-specified projection from that datatype.
-- >>> uniquesOn fst [("foo", 1), ("bar", 1), ("bar", 2), ("foo", 2), ("auth", 1), ("auth", 2)]
-- [("foo",1),("bar",1),("auth",1)]
uniquesOn :: (Ord b) => (a -> b) -> [a] -> [a]
uniquesOn = nubOrdOn
getDifference :: (Hashable a) => [a] -> [a] -> Set.HashSet a
getDifference = Set.difference `on` Set.fromList

View File

@ -317,7 +317,8 @@ serveOptions =
soAsyncActionsFetchBatchSize = Init._default Init.asyncActionsFetchBatchSizeOption,
soPersistedQueries = Init._default Init.persistedQueriesOption,
soPersistedQueriesTtl = Init._default Init.persistedQueriesTtlOption,
soRemoteSchemaResponsePriority = Init._default Init.remoteSchemaResponsePriorityOption
soRemoteSchemaResponsePriority = Init._default Init.remoteSchemaResponsePriorityOption,
soHeaderPrecedence = Init._default Init.configuredHeaderPrecedenceOption
}
-- | What log level should be used by the engine; this is not exported, and

View File

@ -1343,6 +1343,7 @@ mkHGEServer setupHook appStateRef consoleType ekgStore = do
(leActionEvents lockedEventsCtx)
Nothing
appEnvAsyncActionsFetchBatchSize
(acHeaderPrecedence <$> getAppContext appStateRef)
-- start a background thread to handle async action live queries
void

View File

@ -172,7 +172,8 @@ data AppContext = AppContext
acApolloFederationStatus :: ApolloFederationStatus,
acCloseWebsocketsOnMetadataChangeStatus :: CloseWebsocketsOnMetadataChangeStatus,
acSchemaSampledFeatureFlags :: SchemaSampledFeatureFlags,
acRemoteSchemaResponsePriority :: RemoteSchemaResponsePriority
acRemoteSchemaResponsePriority :: RemoteSchemaResponsePriority,
acHeaderPrecedence :: HeaderPrecedence
}
-- | Collection of the LoggerCtx, the regular Logger and the PGLogger
@ -294,7 +295,8 @@ buildAppContextRule = proc (ServeOptions {..}, env, _keys, checkFeatureFlag) ->
acApolloFederationStatus = soApolloFederationStatus,
acCloseWebsocketsOnMetadataChangeStatus = soCloseWebsocketsOnMetadataChangeStatus,
acSchemaSampledFeatureFlags = schemaSampledFeatureFlags,
acRemoteSchemaResponsePriority = soRemoteSchemaResponsePriority
acRemoteSchemaResponsePriority = soRemoteSchemaResponsePriority,
acHeaderPrecedence = soHeaderPrecedence
}
where
buildEventEngineCtx = Inc.cache proc (httpPoolSize, fetchInterval, fetchBatchSize) -> do

View File

@ -38,6 +38,7 @@ import Hasura.RQL.Types.Column
import Hasura.RQL.Types.Common
import Hasura.RQL.Types.Schema.Options qualified as Options
import Hasura.SQL.AnyBackend qualified as AB
import Hasura.Server.Types (HeaderPrecedence)
import Hasura.Session
import Language.GraphQL.Draft.Syntax qualified as G
import Network.HTTP.Client as HTTP
@ -145,8 +146,9 @@ bqDBMutationPlan ::
[HTTP.Header] ->
Maybe G.Name ->
Maybe (HashMap G.Name (G.Value G.Variable)) ->
HeaderPrecedence ->
m (DBStepInfo 'BigQuery, [ModelInfoPart])
bqDBMutationPlan _env _manager _logger _userInfo _stringifyNum _sourceName _sourceConfig _mrf _headers _gName _maybeSelSetArgs =
bqDBMutationPlan _env _manager _logger _userInfo _stringifyNum _sourceName _sourceConfig _mrf _headers _gName _maybeSelSetArgs _ =
throw500 "mutations are not supported in BigQuery; this should be unreachable"
-- explain

View File

@ -84,7 +84,7 @@ instance BackendExecute 'DataConnector where
dbsiResolvedConnectionTemplate = ()
}
mkDBMutationPlan _env _manager _logger UserInfo {..} _stringifyNum sourceName sourceConfig mutationDB _headers _gName _maybeSelSetArgs = do
mkDBMutationPlan _env _manager _logger UserInfo {..} _stringifyNum sourceName sourceConfig mutationDB _headers _gName _maybeSelSetArgs _ = do
(mutationPlan@Plan {..}, modelNames) <- flip runReaderT (API._cScalarTypes $ _scCapabilities sourceConfig, _uiSession) $ Plan.mkMutationPlan sourceName ModelSourceTypeDataConnector mutationDB
transformedSourceConfig <- transformSourceConfig sourceConfig (Just _uiSession)
let modelInfo = getModelInfoPartfromModelNames modelNames (ModelOperationType G.OperationTypeMutation)

View File

@ -55,6 +55,7 @@ import Hasura.RQL.Types.Column qualified as RQLColumn
import Hasura.RQL.Types.Common as RQLTypes
import Hasura.RQL.Types.Schema.Options qualified as Options
import Hasura.SQL.AnyBackend qualified as AB
import Hasura.Server.Types (HeaderPrecedence)
import Hasura.Session
import Language.GraphQL.Draft.Syntax qualified as G
import Network.HTTP.Client as HTTP
@ -317,8 +318,9 @@ msDBMutationPlan ::
[HTTP.Header] ->
Maybe G.Name ->
Maybe (HashMap G.Name (G.Value G.Variable)) ->
HeaderPrecedence ->
m (DBStepInfo 'MSSQL, [ModelInfoPart])
msDBMutationPlan _env _manager _logger userInfo stringifyNum sourceName sourceConfig mrf _headers _gName _maybeSelSetArgs = do
msDBMutationPlan _env _manager _logger userInfo stringifyNum sourceName sourceConfig mrf _headers _gName _maybeSelSetArgs _ = do
go <$> case mrf of
MDBInsert annInsert -> executeInsert userInfo stringifyNum sourceName ModelSourceTypeMSSQL sourceConfig annInsert
MDBDelete annDelete -> executeDelete userInfo stringifyNum sourceName ModelSourceTypeMSSQL sourceConfig annDelete

View File

@ -49,6 +49,7 @@ import Hasura.RQL.Types.Headers
import Hasura.RQL.Types.NamingCase (NamingCase)
import Hasura.RQL.Types.Relationships.Local
import Hasura.RQL.Types.Schema.Options qualified as Options
import Hasura.Server.Types (HeaderPrecedence)
import Hasura.Session
import Hasura.Tracing qualified as Tracing
import Network.HTTP.Client.Transformable qualified as HTTP
@ -452,10 +453,11 @@ validateInsertRows ::
Bool ->
[HTTP.Header] ->
[IR.AnnotatedInsertRow ('Postgres pgKind) (IR.UnpreparedValue ('Postgres pgKind))] ->
HeaderPrecedence ->
m ()
validateInsertRows env manager logger userInfo resolvedWebHook confHeaders timeout forwardClientHeaders reqHeaders rows = do
validateInsertRows env manager logger userInfo resolvedWebHook confHeaders timeout forwardClientHeaders reqHeaders rows headerPrecedence = do
let inputData = J.object ["input" J..= map convertInsertRow rows]
PGE.validateMutation env manager logger userInfo resolvedWebHook confHeaders timeout forwardClientHeaders reqHeaders inputData
PGE.validateMutation env manager logger userInfo resolvedWebHook confHeaders timeout forwardClientHeaders reqHeaders inputData headerPrecedence
where
convertInsertRow :: IR.AnnotatedInsertRow ('Postgres pgKind) (IR.UnpreparedValue ('Postgres pgKind)) -> J.Value
convertInsertRow fields = J.object $ flip mapMaybe fields $ \field ->

View File

@ -37,6 +37,7 @@ import Data.Aeson.TH qualified as J
import Data.Environment qualified as Env
import Data.HashMap.Strict qualified as HashMap
import Data.HashMap.Strict.InsOrd qualified as InsOrdHashMap
import Data.List.Extended qualified as LE
import Data.Sequence qualified as DS
import Database.PG.Query qualified as PG
import Hasura.Backends.Postgres.Connection
@ -75,6 +76,7 @@ import Hasura.RQL.Types.Headers (HeaderConf)
import Hasura.RQL.Types.NamingCase (NamingCase)
import Hasura.RQL.Types.Permission
import Hasura.RQL.Types.Schema.Options qualified as Options
import Hasura.Server.Types (HeaderPrecedence (..))
import Hasura.Server.Utils
import Hasura.Session
import Hasura.Tracing (b3TraceContextPropagator)
@ -402,8 +404,9 @@ validateUpdateMutation ::
[HTTP.Header] ->
IR.AnnotatedUpdateG ('Postgres pgKind) Void (IR.UnpreparedValue ('Postgres pgKind)) ->
Maybe (HashMap G.Name (G.Value G.Variable)) ->
HeaderPrecedence ->
m ()
validateUpdateMutation env manager logger userInfo resolvedWebHook confHeaders timeout forwardClientHeaders reqHeaders updateOperation maybeSelSetArgs = do
validateUpdateMutation env manager logger userInfo resolvedWebHook confHeaders timeout forwardClientHeaders reqHeaders updateOperation maybeSelSetArgs headerPrecedence = do
inputData <-
case maybeSelSetArgs of
Just arguments -> do
@ -429,7 +432,7 @@ validateUpdateMutation env manager logger userInfo resolvedWebHook confHeaders t
Nothing -> return $ J.Null
Just val -> (return $ J.object ["input" J..= graphQLToJSON val])
Nothing -> return J.Null
validateMutation env manager logger userInfo resolvedWebHook confHeaders timeout forwardClientHeaders reqHeaders inputData
validateMutation env manager logger userInfo resolvedWebHook confHeaders timeout forwardClientHeaders reqHeaders inputData headerPrecedence
validateDeleteMutation ::
forall m pgKind.
@ -445,8 +448,9 @@ validateDeleteMutation ::
[HTTP.Header] ->
IR.AnnDelG ('Postgres pgKind) Void (IR.UnpreparedValue ('Postgres pgKind)) ->
Maybe (HashMap G.Name (G.Value G.Variable)) ->
HeaderPrecedence ->
m ()
validateDeleteMutation env manager logger userInfo resolvedWebHook confHeaders timeout forwardClientHeaders reqHeaders deleteOperation maybeSelSetArgs = do
validateDeleteMutation env manager logger userInfo resolvedWebHook confHeaders timeout forwardClientHeaders reqHeaders deleteOperation maybeSelSetArgs headerPrecedence = do
inputData <-
case maybeSelSetArgs of
Just arguments -> do
@ -473,7 +477,7 @@ validateDeleteMutation env manager logger userInfo resolvedWebHook confHeaders t
return (J.object ["input" J..= [deleteInputValByPk]])
else return (J.object ["input" J..= [deleteInputVal]])
Nothing -> return J.Null
validateMutation env manager logger userInfo resolvedWebHook confHeaders timeout forwardClientHeaders reqHeaders inputData
validateMutation env manager logger userInfo resolvedWebHook confHeaders timeout forwardClientHeaders reqHeaders inputData headerPrecedence
validateMutation ::
forall m.
@ -491,8 +495,9 @@ validateMutation ::
Bool ->
[HTTP.Header] ->
J.Value ->
HeaderPrecedence ->
m ()
validateMutation env manager logger userInfo (ResolvedWebhook urlText) confHeaders timeout forwardClientHeaders reqHeaders inputData = do
validateMutation env manager logger userInfo (ResolvedWebhook urlText) confHeaders timeout forwardClientHeaders reqHeaders inputData headerPrecedence = do
let requestBody =
J.object
[ "version" J..= validateInputPayloadVersion,
@ -502,9 +507,12 @@ validateMutation env manager logger userInfo (ResolvedWebhook urlText) confHeade
]
resolvedConfHeaders <- makeHeadersFromConf env confHeaders
let clientHeaders = if forwardClientHeaders then mkClientHeadersForward reqHeaders else mempty
-- Using HashMap to avoid duplicate headers between configuration headers
-- and client headers where configuration headers are preferred
hdrs = (HashMap.toList . HashMap.fromList) (resolvedConfHeaders <> defaultHeaders <> clientHeaders)
hdrs = case headerPrecedence of
-- preserves old behaviour (default)
-- avoids duplicates and forwards client headers with higher precedence than configuration headers
ClientHeadersFirst -> LE.uniquesOn fst (clientHeaders <> defaultHeaders <> resolvedConfHeaders)
-- avoids duplicates and forwards configuration headers with higher precedence than client headers
ConfiguredHeadersFirst -> LE.uniquesOn fst (resolvedConfHeaders <> defaultHeaders <> clientHeaders)
initRequest <- liftIO $ HTTP.mkRequestThrow urlText
let request =
initRequest

View File

@ -93,6 +93,7 @@ import Hasura.RQL.Types.Common (FieldName (..), JsonAggSelect (..), SourceName (
import Hasura.RQL.Types.Permission (ValidateInput (..), ValidateInputHttpDefinition (..))
import Hasura.RQL.Types.Schema.Options qualified as Options
import Hasura.SQL.AnyBackend qualified as AB
import Hasura.Server.Types (HeaderPrecedence)
import Hasura.Session (UserInfo (..))
import Hasura.Tracing qualified as Tracing
import Language.GraphQL.Draft.Syntax qualified as G
@ -258,10 +259,11 @@ convertDelete ::
Options.StringifyNumbers ->
[HTTP.Header] ->
Maybe (HashMap G.Name (G.Value G.Variable)) ->
HeaderPrecedence ->
m (OnBaseMonad (PG.TxET QErr) EncJSON, [ModelNameInfo])
convertDelete sourceName modelSourceType env manager logger userInfo deleteOperation stringifyNum reqHeaders selSetArguments = do
convertDelete sourceName modelSourceType env manager logger userInfo deleteOperation stringifyNum reqHeaders selSetArguments headerPrecedence = do
for_ (_adValidateInput deleteOperation) $ \(VIHttp ValidateInputHttpDefinition {..}) -> do
PGE.validateDeleteMutation env manager logger userInfo _vihdUrl _vihdHeaders _vihdTimeout _vihdForwardClientHeaders reqHeaders deleteOperation selSetArguments
PGE.validateDeleteMutation env manager logger userInfo _vihdUrl _vihdHeaders _vihdTimeout _vihdForwardClientHeaders reqHeaders deleteOperation selSetArguments headerPrecedence
queryTags <- ask
preparedDelete <- traverse (prepareWithoutPlan userInfo) deleteOperation
let (modelName, modelType) = (qualifiedObjectToText (_adTable deleteOperation), ModelTypeTable)
@ -298,10 +300,11 @@ convertUpdate ::
Options.StringifyNumbers ->
[HTTP.Header] ->
Maybe (HashMap G.Name (G.Value G.Variable)) ->
HeaderPrecedence ->
m (OnBaseMonad (PG.TxET QErr) EncJSON, [ModelNameInfo])
convertUpdate sourceName modelSourceType env manager logger userInfo updateOperation stringifyNum reqHeaders selSetArguments = do
convertUpdate sourceName modelSourceType env manager logger userInfo updateOperation stringifyNum reqHeaders selSetArguments headerPrecedence = do
for_ (_auValidateInput updateOperation) $ \(VIHttp ValidateInputHttpDefinition {..}) -> do
PGE.validateUpdateMutation env manager logger userInfo _vihdUrl _vihdHeaders _vihdTimeout _vihdForwardClientHeaders reqHeaders updateOperation selSetArguments
PGE.validateUpdateMutation env manager logger userInfo _vihdUrl _vihdHeaders _vihdTimeout _vihdForwardClientHeaders reqHeaders updateOperation selSetArguments headerPrecedence
queryTags <- ask
preparedUpdate <- traverse (prepareWithoutPlan userInfo) updateOperation
let (modelName, modelType) = (qualifiedObjectToText (_auTable updateOperation), ModelTypeTable)
@ -353,12 +356,13 @@ convertInsert ::
IR.AnnotatedInsert ('Postgres pgKind) Void (UnpreparedValue ('Postgres pgKind)) ->
Options.StringifyNumbers ->
[HTTP.Header] ->
HeaderPrecedence ->
m (OnBaseMonad (PG.TxET QErr) EncJSON, [ModelNameInfo])
convertInsert sourceName modelSourceType env manager logger userInfo insertOperation stringifyNum reqHeaders = do
convertInsert sourceName modelSourceType env manager logger userInfo insertOperation stringifyNum reqHeaders headerPrecedence = do
-- Validate insert data
(_, res) <- flip runStateT InsOrdHashMap.empty $ validateInsertInput env manager logger userInfo (IR._aiData insertOperation) reqHeaders
for_ res $ \(rows, VIHttp ValidateInputHttpDefinition {..}) -> do
validateInsertRows env manager logger userInfo _vihdUrl _vihdHeaders _vihdTimeout _vihdForwardClientHeaders reqHeaders rows
validateInsertRows env manager logger userInfo _vihdUrl _vihdHeaders _vihdTimeout _vihdForwardClientHeaders reqHeaders rows headerPrecedence
queryTags <- ask
preparedInsert <- traverse (prepareWithoutPlan userInfo) insertOperation
argModels <- do
@ -439,8 +443,9 @@ pgDBMutationPlan ::
[HTTP.Header] ->
Maybe G.Name ->
Maybe (HashMap G.Name (G.Value G.Variable)) ->
HeaderPrecedence ->
m (DBStepInfo ('Postgres pgKind), [ModelInfoPart])
pgDBMutationPlan env manager logger userInfo stringifyNum sourceName sourceConfig mrf reqHeaders operationName selSetArguments = do
pgDBMutationPlan env manager logger userInfo stringifyNum sourceName sourceConfig mrf reqHeaders operationName selSetArguments headerPrecedence = do
resolvedConnectionTemplate <-
let connectionTemplateResolver =
connectionTemplateConfigResolver (_pscConnectionTemplateConfig sourceConfig)
@ -450,9 +455,9 @@ pgDBMutationPlan env manager logger userInfo stringifyNum sourceName sourceConfi
$ QueryOperationType G.OperationTypeMutation
in applyConnectionTemplateResolverNonAdmin connectionTemplateResolver userInfo reqHeaders queryContext
go resolvedConnectionTemplate <$> case mrf of
MDBInsert s -> convertInsert sourceName ModelSourceTypePostgres env manager logger userInfo s stringifyNum reqHeaders
MDBUpdate s -> convertUpdate sourceName ModelSourceTypePostgres env manager logger userInfo s stringifyNum reqHeaders selSetArguments
MDBDelete s -> convertDelete sourceName ModelSourceTypePostgres env manager logger userInfo s stringifyNum reqHeaders selSetArguments
MDBInsert s -> convertInsert sourceName ModelSourceTypePostgres env manager logger userInfo s stringifyNum reqHeaders headerPrecedence
MDBUpdate s -> convertUpdate sourceName ModelSourceTypePostgres env manager logger userInfo s stringifyNum reqHeaders selSetArguments headerPrecedence
MDBDelete s -> convertDelete sourceName ModelSourceTypePostgres env manager logger userInfo s stringifyNum reqHeaders selSetArguments headerPrecedence
MDBFunction returnsSet s -> convertFunction sourceName ModelSourceTypePostgres userInfo returnsSet s
where
modelInfoList v = getModelInfoPartfromModelNames (snd v) (ModelOperationType G.OperationTypeMutation)

View File

@ -63,7 +63,7 @@ import Hasura.RQL.Types.Subscription
import Hasura.SQL.AnyBackend qualified as AB
import Hasura.Server.Init qualified as Init
import Hasura.Server.Prometheus (PrometheusMetrics)
import Hasura.Server.Types (MonadGetPolicies, ReadOnlyMode (..), RequestId (..))
import Hasura.Server.Types (HeaderPrecedence, MonadGetPolicies, ReadOnlyMode (..), RequestId (..))
import Hasura.Services
import Hasura.Session (BackendOnlyFieldAccess (..), UserInfo (..))
import Hasura.Tracing qualified as Tracing
@ -363,6 +363,7 @@ getResolvedExecPlan ::
Maybe G.Name ->
RequestId ->
Init.ResponseInternalErrorsConfig ->
HeaderPrecedence ->
m (ParameterizedQueryHash, ResolvedExecutionPlan, [ModelInfoPart])
getResolvedExecPlan
env
@ -378,7 +379,8 @@ getResolvedExecPlan
queryParts -- the first step of the execution plan
maybeOperationName
reqId
responseErrorsConfig = do
responseErrorsConfig
headerPrecedence = do
let gCtx = makeGQLContext userInfo sc queryType
tracesPropagator = getOtelTracesPropagator $ scOpenTelemetryConfig sc
@ -404,6 +406,7 @@ getResolvedExecPlan
reqId
maybeOperationName
responseErrorsConfig
headerPrecedence
Tracing.attachMetadata [("graphql.operation.type", "query"), ("parameterized_query_hash", bsToTxt $ unParamQueryHash parameterizedQueryHash)]
pure (parameterizedQueryHash, QueryExecutionPlan executionPlan queryRootFields dirMap, modelInfoList)
G.TypedOperationDefinition G.OperationTypeMutation _ varDefs directives inlinedSelSet -> Tracing.newSpan "Resolve mutation execution plan" $ do
@ -426,6 +429,7 @@ getResolvedExecPlan
(scSetGraphqlIntrospectionOptions sc)
reqId
maybeOperationName
headerPrecedence
Tracing.attachMetadata [("graphql.operation.type", "mutation")]
pure (parameterizedQueryHash, MutationExecutionPlan executionPlan, modelInfoList)
G.TypedOperationDefinition G.OperationTypeSubscription _ varDefs directives inlinedSelSet -> Tracing.newSpan "Resolve subscription execution plan" $ do

View File

@ -34,6 +34,7 @@ import Data.CaseInsensitive qualified as CI
import Data.Environment qualified as Env
import Data.Has
import Data.HashMap.Strict qualified as HashMap
import Data.List.Extended qualified as LE
import Data.SerializableBlob qualified as SB
import Data.Set (Set)
import Data.Text.Extended
@ -81,6 +82,7 @@ import Hasura.RQL.Types.Schema.Options qualified as Options
import Hasura.RQL.Types.SchemaCache
import Hasura.Server.Init.Config (OptionalInterval (..), ResponseInternalErrorsConfig (..), shouldIncludeInternal)
import Hasura.Server.Prometheus (PrometheusMetrics (..))
import Hasura.Server.Types (HeaderPrecedence (..))
import Hasura.Server.Utils
( mkClientHeadersForward,
mkSetCookieHeaders,
@ -154,8 +156,9 @@ resolveActionExecution ::
IR.AnnActionExecution Void ->
ActionExecContext ->
Maybe GQLQueryText ->
HeaderPrecedence ->
ActionExecution
resolveActionExecution httpManager env logger tracesPropagator prometheusMetrics IR.AnnActionExecution {..} ActionExecContext {..} gqlQueryText =
resolveActionExecution httpManager env logger tracesPropagator prometheusMetrics IR.AnnActionExecution {..} ActionExecContext {..} gqlQueryText headerPrecedence =
ActionExecution $ first (encJFromOrderedValue . makeActionResponseNoRelations _aaeFields _aaeOutputType _aaeOutputFields True) <$> runWebhook
where
handlerPayload = ActionWebhookPayload (ActionContext _aaeName) _aecSessionVariables _aaePayload gqlQueryText
@ -181,6 +184,7 @@ resolveActionExecution httpManager env logger tracesPropagator prometheusMetrics
_aaeTimeOut
_aaeRequestTransform
_aaeResponseTransform
headerPrecedence
throwUnexpected :: (MonadError QErr m) => Text -> m ()
throwUnexpected = throw400 Unexpected
@ -484,8 +488,9 @@ asyncActionsProcessor ::
STM.TVar (Set LockedActionEventId) ->
Maybe GH.GQLQueryText ->
Int ->
IO HeaderPrecedence ->
m (Forever m)
asyncActionsProcessor getEnvHook logger getSCFromRef' getFetchInterval lockedActionEvents gqlQueryText fetchBatchSize =
asyncActionsProcessor getEnvHook logger getSCFromRef' getFetchInterval lockedActionEvents gqlQueryText fetchBatchSize getHeaderPrecedence =
return
$ Forever ()
$ const
@ -508,6 +513,7 @@ asyncActionsProcessor getEnvHook logger getSCFromRef' getFetchInterval lockedAct
-- one async action present in the schema cache
asyncInvocationsE <- fetchUndeliveredActionEvents fetchBatchSize
asyncInvocations <- liftIO $ onLeft asyncInvocationsE mempty
headerPrecedence <- liftIO getHeaderPrecedence
-- save the actions that are currently fetched from the DB to
-- be processed in a TVar (Set LockedActionEventId) and when
-- the action is processed we remove it from the set. This set
@ -517,11 +523,11 @@ asyncActionsProcessor getEnvHook logger getSCFromRef' getFetchInterval lockedAct
-- locked action events set TVar is empty, it will mean that there are
-- no events that are in the 'processing' state
saveLockedEvents (map (EventId . actionIdToText . _aliId) asyncInvocations) lockedActionEvents
LA.mapConcurrently_ (callHandler actionCache tracesPropagator) asyncInvocations
LA.mapConcurrently_ (callHandler actionCache tracesPropagator headerPrecedence) asyncInvocations
liftIO $ sleep $ milliseconds (unrefine sleepTime)
where
callHandler :: ActionCache -> Tracing.HttpPropagator -> ActionLogItem -> m ()
callHandler actionCache tracesPropagator actionLogItem =
callHandler :: ActionCache -> Tracing.HttpPropagator -> HeaderPrecedence -> ActionLogItem -> m ()
callHandler actionCache tracesPropagator headerPrecedence actionLogItem =
Tracing.newTrace Tracing.sampleAlways "async actions processor" do
let ActionLogItem
actionId
@ -562,6 +568,7 @@ asyncActionsProcessor getEnvHook logger getSCFromRef' getFetchInterval lockedAct
timeout
metadataRequestTransform
metadataResponseTransform
headerPrecedence
resE <-
setActionStatus actionId $ case eitherRes of
Left e -> AASError e
@ -591,6 +598,7 @@ callWebhook ::
Timeout ->
Maybe RequestTransform ->
Maybe MetadataResponseTransform ->
HeaderPrecedence ->
m (ActionWebhookResponse, HTTP.ResponseHeaders)
callWebhook
env
@ -606,12 +614,16 @@ callWebhook
actionWebhookPayload
timeoutSeconds
metadataRequestTransform
metadataResponseTransform = do
metadataResponseTransform
headerPrecedence = do
resolvedConfHeaders <- makeHeadersFromConf env confHeaders
let clientHeaders = if forwardClientHeaders then mkClientHeadersForward reqHeaders else mempty
-- Using HashMap to avoid duplicate headers between configuration headers
-- and client headers where configuration headers are preferred
hdrs = (HashMap.toList . HashMap.fromList) (resolvedConfHeaders <> defaultHeaders <> clientHeaders)
hdrs = case headerPrecedence of
-- preserves old behaviour (default)
-- avoids duplicates and forwards client headers with higher precedence than configuration headers
ClientHeadersFirst -> LE.uniquesOn fst (clientHeaders <> defaultHeaders <> resolvedConfHeaders)
-- avoids duplicates and forwards configuration headers with higher precedence than client headers
ConfiguredHeadersFirst -> LE.uniquesOn fst (resolvedConfHeaders <> defaultHeaders <> clientHeaders)
postPayload = J.toJSON actionWebhookPayload
requestBody = J.encode postPayload
requestBodySize = BL.length requestBody

View File

@ -102,6 +102,7 @@ class
[HTTP.Header] ->
Maybe G.Name ->
Maybe (HashMap G.Name (G.Value G.Variable)) ->
HeaderPrecedence ->
m (DBStepInfo b, [ModelInfoPart])
mkLiveQuerySubscriptionPlan ::
forall m.

View File

@ -58,12 +58,13 @@ convertMutationAction ::
HTTP.RequestHeaders ->
Maybe GH.GQLQueryText ->
ActionMutation Void ->
HeaderPrecedence ->
m ActionExecutionPlan
convertMutationAction env logger tracesPropagator prometheusMetrics userInfo reqHeaders gqlQueryText action = do
convertMutationAction env logger tracesPropagator prometheusMetrics userInfo reqHeaders gqlQueryText action headerPrecedence = do
httpManager <- askHTTPManager
case action of
AMSync s ->
pure $ AEPSync $ resolveActionExecution httpManager env logger tracesPropagator prometheusMetrics s actionExecContext gqlQueryText
pure $ AEPSync $ resolveActionExecution httpManager env logger tracesPropagator prometheusMetrics s actionExecContext gqlQueryText headerPrecedence
AMAsync s ->
AEPAsyncMutation <$> resolveActionMutationAsync s reqHeaders userSession
where
@ -96,6 +97,7 @@ convertMutationSelectionSet ::
RequestId ->
-- | Graphql Operation Name
Maybe G.Name ->
HeaderPrecedence ->
m (ExecutionPlan, ParameterizedQueryHash, [ModelInfoPart])
convertMutationSelectionSet
env
@ -112,7 +114,8 @@ convertMutationSelectionSet
gqlUnparsed
introspectionDisabledRoles
reqId
maybeOperationName = do
maybeOperationName
headerPrecedence = do
mutationParser <-
onNothing (gqlMutationParser gqlContext)
$ throw400 ValidationFailed "no mutations exist"
@ -143,7 +146,7 @@ convertMutationSelectionSet
httpManager <- askHTTPManager
let selSetArguments = getSelSetArgsFromRootField resolvedSelSet rootFieldName
(dbStepInfo, dbModelInfoList) <- flip runReaderT queryTagsComment $ mkDBMutationPlan @b env httpManager logger userInfo stringifyNum sourceName sourceConfig noRelsDBAST reqHeaders maybeOperationName selSetArguments
(dbStepInfo, dbModelInfoList) <- flip runReaderT queryTagsComment $ mkDBMutationPlan @b env httpManager logger userInfo stringifyNum sourceName sourceConfig noRelsDBAST reqHeaders maybeOperationName selSetArguments headerPrecedence
pure $ (ExecStepDB [] (AB.mkAnyBackend dbStepInfo) remoteJoins, dbModelInfoList)
RFRemote (RemoteSchemaName rName) remoteField -> do
RemoteSchemaRootField remoteSchemaInfo resultCustomizer resolvedRemoteField <- runVariableCache $ resolveRemoteField userInfo remoteField
@ -156,7 +159,7 @@ convertMutationSelectionSet
(actionName, _fch) <- pure $ case noRelsDBAST of
AMSync s -> (_aaeName s, _aaeForwardClientHeaders s)
AMAsync s -> (_aamaName s, _aamaForwardClientHeaders s)
plan <- convertMutationAction env logger tracesPropagator prometheusMetrics userInfo reqHeaders (Just (GH._grQuery gqlUnparsed)) noRelsDBAST
plan <- convertMutationAction env logger tracesPropagator prometheusMetrics userInfo reqHeaders (Just (GH._grQuery gqlUnparsed)) noRelsDBAST headerPrecedence
let actionsModel = ModelInfoPart (toTxt actionName) ModelTypeAction Nothing Nothing (ModelOperationType G.OperationTypeMutation)
pure $ (ExecStepAction plan (ActionsInfo actionName _fch) remoteJoins, [actionsModel]) -- `_fch` represents the `forward_client_headers` option from the action
-- definition which is currently being ignored for actions that are mutations

View File

@ -39,7 +39,7 @@ import Hasura.RemoteSchema.Metadata.Base (RemoteSchemaName (..))
import Hasura.SQL.AnyBackend qualified as AB
import Hasura.Server.Init.Config (ResponseInternalErrorsConfig (..))
import Hasura.Server.Prometheus (PrometheusMetrics (..))
import Hasura.Server.Types (MonadGetPolicies, RequestId (..))
import Hasura.Server.Types (HeaderPrecedence, MonadGetPolicies, RequestId (..))
import Hasura.Services.Network
import Hasura.Session
import Hasura.Tracing (MonadTrace)
@ -93,6 +93,7 @@ convertQuerySelSet ::
-- | Graphql Operation Name
Maybe G.Name ->
ResponseInternalErrorsConfig ->
HeaderPrecedence ->
m (ExecutionPlan, [QueryRootField UnpreparedValue], DirectiveMap, ParameterizedQueryHash, [ModelInfoPart])
convertQuerySelSet
env
@ -110,7 +111,8 @@ convertQuerySelSet
introspectionDisabledRoles
reqId
maybeOperationName
responseErrorsConfig = do
responseErrorsConfig
headerPrecedence = do
-- 1. Parse the GraphQL query into the 'RootFieldMap' and a 'SelectionSet'
(unpreparedQueries, normalizedDirectives, normalizedSelectionSet) <-
Tracing.newSpan "Parse query IR" $ parseGraphQLQuery nullInNonNullableVariables gqlContext varDefs (GH._grVariables gqlUnparsed) directives fields
@ -161,7 +163,8 @@ convertQuerySelSet
prometheusMetrics
s
(ActionExecContext reqHeaders (_uiSession userInfo))
(Just (GH._grQuery gqlUnparsed)),
(Just (GH._grQuery gqlUnparsed))
headerPrecedence,
_aaeName s,
_aaeForwardClientHeaders s
)

View File

@ -91,7 +91,7 @@ import Hasura.Server.Prometheus
PrometheusMetrics (..),
)
import Hasura.Server.Telemetry.Counters qualified as Telem
import Hasura.Server.Types (ModelInfoLogState (..), MonadGetPolicies (..), ReadOnlyMode (..), RemoteSchemaResponsePriority (..), RequestId (..))
import Hasura.Server.Types (HeaderPrecedence, ModelInfoLogState (..), MonadGetPolicies (..), ReadOnlyMode (..), RemoteSchemaResponsePriority (..), RequestId (..))
import Hasura.Services
import Hasura.Session (SessionVariable, SessionVariableValue, SessionVariables, UserInfo (..), filterSessionVariables)
import Hasura.Tracing (MonadTrace, attachMetadata)
@ -312,6 +312,7 @@ runGQ ::
Init.AllowListStatus ->
ReadOnlyMode ->
RemoteSchemaResponsePriority ->
HeaderPrecedence ->
PrometheusMetrics ->
L.Logger L.Hasura ->
Maybe (CredentialCache AgentLicenseKey) ->
@ -323,7 +324,7 @@ runGQ ::
GQLReqUnparsed ->
ResponseInternalErrorsConfig ->
m (GQLQueryOperationSuccessLog, HttpResponse (Maybe GQResponse, EncJSON))
runGQ env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePriority prometheusMetrics logger agentLicenseKey reqId userInfo ipAddress reqHeaders queryType reqUnparsed responseErrorsConfig = do
runGQ env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePriority headerPrecedence prometheusMetrics logger agentLicenseKey reqId userInfo ipAddress reqHeaders queryType reqUnparsed responseErrorsConfig = do
getModelInfoLogStatus' <- runGetModelInfoLogStatus
modelInfoLogStatus <- liftIO getModelInfoLogStatus'
let gqlMetrics = pmGraphQLRequestMetrics prometheusMetrics
@ -365,6 +366,7 @@ runGQ env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePriority promet
maybeOperationName
reqId
responseErrorsConfig
headerPrecedence
-- 4. Execute the execution plan producing a 'AnnotatedResponse'.
(response, queryCachedStatus, modelInfoFromExecution) <- executePlan reqParsed runLimits execPlan
@ -794,6 +796,7 @@ runGQBatched ::
RequestId ->
ResponseInternalErrorsConfig ->
RemoteSchemaResponsePriority ->
HeaderPrecedence ->
UserInfo ->
Wai.IpAddress ->
[HTTP.Header] ->
@ -801,10 +804,10 @@ runGQBatched ::
-- | the batched request with unparsed GraphQL query
GQLBatchedReqs (GQLReq GQLQueryText) ->
m (HttpLogGraphQLInfo, HttpResponse EncJSON)
runGQBatched env sqlGenCtx sc enableAL readOnlyMode prometheusMetrics logger agentLicenseKey reqId responseErrorsConfig remoteSchemaResponsePriority userInfo ipAddress reqHdrs queryType query =
runGQBatched env sqlGenCtx sc enableAL readOnlyMode prometheusMetrics logger agentLicenseKey reqId responseErrorsConfig remoteSchemaResponsePriority headerPrecedence userInfo ipAddress reqHdrs queryType query =
case query of
GQLSingleRequest req -> do
(gqlQueryOperationLog, httpResp) <- runGQ env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePriority prometheusMetrics logger agentLicenseKey reqId userInfo ipAddress reqHdrs queryType req responseErrorsConfig
(gqlQueryOperationLog, httpResp) <- runGQ env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePriority headerPrecedence prometheusMetrics logger agentLicenseKey reqId userInfo ipAddress reqHdrs queryType req responseErrorsConfig
let httpLoggingGQInfo = (CommonHttpLogMetadata L.RequestModeSingle (Just (GQLSingleRequest (GQLQueryOperationSuccess gqlQueryOperationLog))), (PQHSetSingleton (gqolParameterizedQueryHash gqlQueryOperationLog)))
pure (httpLoggingGQInfo, snd <$> httpResp)
GQLBatchedReqs reqs -> do
@ -817,7 +820,7 @@ runGQBatched env sqlGenCtx sc enableAL readOnlyMode prometheusMetrics logger age
flip HttpResponse []
. encJFromList
. map (either (encJFromJEncoding . encodeGQErr includeInternal) _hrBody)
responses <- for reqs \req -> fmap (req,) $ try $ (fmap . fmap . fmap) snd $ runGQ env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePriority prometheusMetrics logger agentLicenseKey reqId userInfo ipAddress reqHdrs queryType req responseErrorsConfig
responses <- for reqs \req -> fmap (req,) $ try $ (fmap . fmap . fmap) snd $ runGQ env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePriority headerPrecedence prometheusMetrics logger agentLicenseKey reqId userInfo ipAddress reqHdrs queryType req responseErrorsConfig
let requestsOperationLogs = map fst $ rights $ map snd responses
batchOperationLogs =
map

View File

@ -97,9 +97,10 @@ createWSServerApp enabledLogTypes serverEnv connInitTimeout licenseKeyCache = \
flip runReaderT serverEnv $ onConn rid rh ip (wsActions sp)
onMessageHandler conn bs sp = do
headerPrecedence <- liftIO $ acHeaderPrecedence <$> getAppContext (_wseAppStateRef serverEnv)
responseErrorsConfig <- liftIO $ acResponseInternalErrorsConfig <$> getAppContext (_wseAppStateRef serverEnv)
mask_
$ onMessage enabledLogTypes getAuthMode serverEnv conn bs (wsActions sp) licenseKeyCache responseErrorsConfig
$ onMessage enabledLogTypes getAuthMode serverEnv conn bs (wsActions sp) licenseKeyCache responseErrorsConfig headerPrecedence
onCloseHandler conn = mask_ do
granularPrometheusMetricsState <- runGetPrometheusMetricsGranularity

View File

@ -103,7 +103,7 @@ import Hasura.Server.Prometheus
PrometheusMetrics (..),
)
import Hasura.Server.Telemetry.Counters qualified as Telem
import Hasura.Server.Types (GranularPrometheusMetricsState (..), ModelInfoLogState (..), MonadGetPolicies (..), RemoteSchemaResponsePriority, RequestId, getRequestId)
import Hasura.Server.Types (GranularPrometheusMetricsState (..), HeaderPrecedence, ModelInfoLogState (..), MonadGetPolicies (..), RemoteSchemaResponsePriority, RequestId, getRequestId)
import Hasura.Services.Network
import Hasura.Session
import Hasura.Tracing qualified as Tracing
@ -446,8 +446,9 @@ onStart ::
StartMsg ->
WS.WSActions WSConnData ->
ResponseInternalErrorsConfig ->
HeaderPrecedence ->
m ()
onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables (StartMsg opId q) onMessageActions responseErrorsConfig = catchAndIgnore $ do
onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables (StartMsg opId q) onMessageActions responseErrorsConfig headerPrecedence = catchAndIgnore $ do
modelInfoLogStatus' <- runGetModelInfoLogStatus
modelInfoLogStatus <- liftIO modelInfoLogStatus'
timerTot <- startTimer
@ -516,6 +517,7 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
maybeOperationName
requestId
responseErrorsConfig
headerPrecedence
(parameterizedQueryHash, execPlan, modelInfoList) <- onLeft execPlanE (withComplete . preExecErr requestId (Just gqlOpType))
@ -1095,8 +1097,9 @@ onMessage ::
WS.WSActions WSConnData ->
Maybe (CredentialCache AgentLicenseKey) ->
ResponseInternalErrorsConfig ->
HeaderPrecedence ->
m ()
onMessage enabledLogTypes authMode serverEnv wsConn msgRaw onMessageActions agentLicenseKey responseErrorsConfig = do
onMessage enabledLogTypes authMode serverEnv wsConn msgRaw onMessageActions agentLicenseKey responseErrorsConfig headerPrecedence = do
Tracing.newTrace (_wseTraceSamplingPolicy serverEnv) "websocket" do
case J.eitherDecode msgRaw of
Left e -> do
@ -1120,7 +1123,7 @@ onMessage enabledLogTypes authMode serverEnv wsConn msgRaw onMessageActions agen
if _mcAnalyzeQueryVariables (scMetricsConfig schemaCache)
then CaptureQueryVariables
else DoNotCaptureQueryVariables
onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables startMsg onMessageActions responseErrorsConfig
onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables startMsg onMessageActions responseErrorsConfig headerPrecedence
CMStop stopMsg -> do
granularPrometheusMetricsState <- runGetPrometheusMetricsGranularity
onStop serverEnv wsConn stopMsg granularPrometheusMetricsState

View File

@ -588,7 +588,7 @@ v1Alpha1GQHandler queryType query = do
reqHeaders <- asks hcReqHeaders
ipAddress <- asks hcSourceIpAddress
requestId <- asks hcRequestId
GH.runGQBatched acEnvironment acSQLGenCtx schemaCache acEnableAllowlist appEnvEnableReadOnlyMode appEnvPrometheusMetrics (_lsLogger appEnvLoggers) appEnvLicenseKeyCache requestId acResponseInternalErrorsConfig acRemoteSchemaResponsePriority userInfo ipAddress reqHeaders queryType query
GH.runGQBatched acEnvironment acSQLGenCtx schemaCache acEnableAllowlist appEnvEnableReadOnlyMode appEnvPrometheusMetrics (_lsLogger appEnvLoggers) appEnvLicenseKeyCache requestId acResponseInternalErrorsConfig acRemoteSchemaResponsePriority acHeaderPrecedence userInfo ipAddress reqHeaders queryType query
v1GQHandler ::
( MonadIO m,
@ -954,7 +954,7 @@ httpApp setupHook appStateRef AppEnv {..} consoleType ekgStore closeWebsocketsOn
Spock.PATCH -> pure EP.PATCH
other -> throw400 BadRequest $ "Method " <> tshow other <> " not supported."
_ -> throw400 BadRequest $ "Nonstandard method not allowed for REST endpoints"
fmap JSONResp <$> runCustomEndpoint acEnvironment acSQLGenCtx schemaCache acEnableAllowlist appEnvEnableReadOnlyMode acRemoteSchemaResponsePriority appEnvPrometheusMetrics (_lsLogger appEnvLoggers) appEnvLicenseKeyCache requestId userInfo reqHeaders ipAddress req endpoints responseErrorsConfig
fmap JSONResp <$> runCustomEndpoint acEnvironment acSQLGenCtx schemaCache acEnableAllowlist appEnvEnableReadOnlyMode acRemoteSchemaResponsePriority acHeaderPrecedence appEnvPrometheusMetrics (_lsLogger appEnvLoggers) appEnvLicenseKeyCache requestId userInfo reqHeaders ipAddress req endpoints responseErrorsConfig
-- See Issue #291 for discussion around restified feature
Spock.hookRouteAll ("api" <//> "rest" <//> Spock.wildcard) $ \wildcard -> do

View File

@ -224,6 +224,7 @@ mkServeOptions sor@ServeOptionsRaw {..} = do
soPersistedQueries <- withOptionDefault rsoPersistedQueries persistedQueriesOption
soPersistedQueriesTtl <- withOptionDefault rsoPersistedQueriesTtl persistedQueriesTtlOption
soRemoteSchemaResponsePriority <- withOptionDefault rsoRemoteSchemaResponsePriority remoteSchemaResponsePriorityOption
soHeaderPrecedence <- withOptionDefault rsoHeaderPrecedence configuredHeaderPrecedenceOption
pure ServeOptions {..}
-- | Fetch Postgres 'Query.ConnParams' components from the environment

View File

@ -69,6 +69,7 @@ module Hasura.Server.Init.Arg.Command.Serve
persistedQueriesOption,
persistedQueriesTtlOption,
remoteSchemaResponsePriorityOption,
configuredHeaderPrecedenceOption,
-- * Pretty Printer
serveCmdFooter,
@ -164,6 +165,7 @@ serveCommandParser =
<*> parsePersistedQueries
<*> parsePersistedQueriesTtl
<*> parseRemoteSchemaResponsePriority
<*> parseConfiguredHeaderPrecedence
--------------------------------------------------------------------------------
-- Serve Options
@ -1329,6 +1331,25 @@ parseRemoteSchemaResponsePriority =
<> Opt.help (Config._helpMessage remoteSchemaResponsePriorityOption)
)
parseConfiguredHeaderPrecedence :: Opt.Parser (Maybe Types.HeaderPrecedence)
parseConfiguredHeaderPrecedence =
Opt.optional
$ Opt.option
(Opt.eitherReader Env.fromEnv)
( Opt.long "configured-header-precedence"
<> Opt.help (Config._helpMessage configuredHeaderPrecedenceOption)
)
configuredHeaderPrecedenceOption :: Config.Option Types.HeaderPrecedence
configuredHeaderPrecedenceOption =
Config.Option
{ Config._default = Types.ClientHeadersFirst,
Config._envVar = "HASURA_GRAPHQL_CONFIGURED_HEADER_PRECEDENCE",
Config._helpMessage =
"Forward configured metadata headers with higher precedence than client headers"
<> "when delivering payload to webhook for actions and input validations. (default: false)"
}
--------------------------------------------------------------------------------
-- Pretty Printer
@ -1434,6 +1455,7 @@ serveCmdFooter =
Config.optionPP triggersErrorLogLevelStatusOption,
Config.optionPP asyncActionsFetchBatchSizeOption,
Config.optionPP persistedQueriesOption,
Config.optionPP persistedQueriesTtlOption
Config.optionPP persistedQueriesTtlOption,
Config.optionPP configuredHeaderPrecedenceOption
]
eventEnvs = [Config.optionPP graphqlEventsHttpPoolSizeOption, Config.optionPP graphqlEventsFetchIntervalOption]

View File

@ -329,7 +329,8 @@ data ServeOptionsRaw impl = ServeOptionsRaw
rsoAsyncActionsFetchBatchSize :: Maybe Int,
rsoPersistedQueries :: Maybe Server.Types.PersistedQueriesState,
rsoPersistedQueriesTtl :: Maybe Int,
rsoRemoteSchemaResponsePriority :: Maybe Server.Types.RemoteSchemaResponsePriority
rsoRemoteSchemaResponsePriority :: Maybe Server.Types.RemoteSchemaResponsePriority,
rsoHeaderPrecedence :: Maybe Server.Types.HeaderPrecedence
}
-- | Whether or not to serve Console assets.
@ -636,7 +637,8 @@ data ServeOptions impl = ServeOptions
soAsyncActionsFetchBatchSize :: Int,
soPersistedQueries :: Server.Types.PersistedQueriesState,
soPersistedQueriesTtl :: Int,
soRemoteSchemaResponsePriority :: Server.Types.RemoteSchemaResponsePriority
soRemoteSchemaResponsePriority :: Server.Types.RemoteSchemaResponsePriority,
soHeaderPrecedence :: Server.Types.HeaderPrecedence
}
-- | 'ResponseInternalErrorsConfig' represents the encoding of the

View File

@ -390,3 +390,6 @@ instance FromEnv Server.Types.PersistedQueriesState where
instance FromEnv Server.Types.RemoteSchemaResponsePriority where
fromEnv = fmap (bool Server.Types.RemoteSchemaResponseErrors Server.Types.RemoteSchemaResponseData) . fromEnv @Bool
instance FromEnv Server.Types.HeaderPrecedence where
fromEnv = fmap (bool Server.Types.ClientHeadersFirst Server.Types.ConfiguredHeadersFirst) . fromEnv @Bool

View File

@ -119,6 +119,7 @@ runCustomEndpoint ::
Init.AllowListStatus ->
ReadOnlyMode ->
RemoteSchemaResponsePriority ->
HeaderPrecedence ->
PrometheusMetrics ->
L.Logger L.Hasura ->
Maybe (CredentialCache AgentLicenseKey) ->
@ -130,7 +131,7 @@ runCustomEndpoint ::
EndpointTrie GQLQueryWithText ->
Init.ResponseInternalErrorsConfig ->
m (HttpLogGraphQLInfo, HttpResponse EncJSON)
runCustomEndpoint env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePriority prometheusMetrics logger agentLicenseKey requestId userInfo reqHeaders ipAddress RestRequest {..} endpoints responseErrorsConfig = do
runCustomEndpoint env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePriority headerPrecedence prometheusMetrics logger agentLicenseKey requestId userInfo reqHeaders ipAddress RestRequest {..} endpoints responseErrorsConfig = do
-- First match the path to an endpoint.
case matchPath reqMethod (T.split (== '/') reqPath) endpoints of
MatchFound (queryx :: EndpointMetadata GQLQueryWithText) matches ->
@ -160,7 +161,7 @@ runCustomEndpoint env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePri
-- with the query string from the schema cache, and pass it
-- through to the /v1/graphql endpoint.
(httpLoggingMetadata, handlerResp) <- do
(gqlOperationLog, resp) <- GH.runGQ env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePriority prometheusMetrics logger agentLicenseKey requestId userInfo ipAddress reqHeaders E.QueryHasura (mkPassthroughRequest queryx resolvedVariables) responseErrorsConfig
(gqlOperationLog, resp) <- GH.runGQ env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePriority headerPrecedence prometheusMetrics logger agentLicenseKey requestId userInfo ipAddress reqHeaders E.QueryHasura (mkPassthroughRequest queryx resolvedVariables) responseErrorsConfig
let httpLoggingGQInfo = (CommonHttpLogMetadata RequestModeNonBatchable Nothing, (PQHSetSingleton (gqolParameterizedQueryHash gqlOperationLog)))
return (httpLoggingGQInfo, fst <$> resp)
case sequence handlerResp of

View File

@ -32,6 +32,7 @@ module Hasura.Server.Types
ExtQueryReqs (..),
MonadGetPolicies (..),
RemoteSchemaResponsePriority (..),
HeaderPrecedence (..),
)
where
@ -375,3 +376,24 @@ data RemoteSchemaResponsePriority
RemoteSchemaResponseData
| -- | Errors from the remote schema is sent
RemoteSchemaResponseErrors
-- | The precedence of the headers when delivering payload to the webhook for actions.
-- Default is `ClientHeadersFirst` to preserve the old behaviour where client headers are
-- given higher precedence than configured metadata headers.
data HeaderPrecedence
= ConfiguredHeadersFirst
| ClientHeadersFirst
deriving (Eq, Show, Generic)
instance FromJSON HeaderPrecedence where
parseJSON =
withBool "HeaderPrecedence"
$ pure
. \case
True -> ConfiguredHeadersFirst
False -> ClientHeadersFirst
instance ToJSON HeaderPrecedence where
toJSON = \case
ConfiguredHeadersFirst -> Bool True
ClientHeadersFirst -> Bool False

View File

@ -100,7 +100,8 @@ emptyServeOptionsRaw =
rsoAsyncActionsFetchBatchSize = Nothing,
rsoPersistedQueries = Nothing,
rsoPersistedQueriesTtl = Nothing,
rsoRemoteSchemaResponsePriority = Nothing
rsoRemoteSchemaResponsePriority = Nothing,
rsoHeaderPrecedence = Nothing
}
mkServeOptionsSpec :: Hspec.Spec

View File

@ -99,7 +99,8 @@ serveOptions =
soAsyncActionsFetchBatchSize = Init._default Init.asyncActionsFetchBatchSizeOption,
soPersistedQueries = Init._default Init.persistedQueriesOption,
soPersistedQueriesTtl = Init._default Init.persistedQueriesTtlOption,
soRemoteSchemaResponsePriority = Init._default Init.remoteSchemaResponsePriorityOption
soRemoteSchemaResponsePriority = Init._default Init.remoteSchemaResponsePriorityOption,
soHeaderPrecedence = Init._default Init.configuredHeaderPrecedenceOption
}
-- | What log level should be used by the engine; this is not exported, and