mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-15 01:12:56 +03:00
server: log additional info in the livequery poller logs
https://github.com/hasura/graphql-engine-mono/pull/1529 GitOrigin-RevId: 27060632d5ac0da3f695c6755350f4e32dc0efc1
This commit is contained in:
parent
c651c9b2ff
commit
62e7fe62db
@ -3,6 +3,8 @@
|
|||||||
## Next release
|
## Next release
|
||||||
(Add entries below in the order of server, console, cli, docs, others)
|
(Add entries below in the order of server, console, cli, docs, others)
|
||||||
|
|
||||||
|
- server: make improvements in the `livequery-poller-log`
|
||||||
|
|
||||||
## v2.0.0-beta.2
|
## v2.0.0-beta.2
|
||||||
|
|
||||||
### Bug fixes and improvements
|
### Bug fixes and improvements
|
||||||
|
@ -542,6 +542,7 @@ library
|
|||||||
, Hasura.GraphQL.Execute.Resolve
|
, Hasura.GraphQL.Execute.Resolve
|
||||||
, Hasura.GraphQL.Execute.Types
|
, Hasura.GraphQL.Execute.Types
|
||||||
, Hasura.GraphQL.Explain
|
, Hasura.GraphQL.Explain
|
||||||
|
, Hasura.GraphQL.ParameterizedQueryHash
|
||||||
, Hasura.GraphQL.Parser
|
, Hasura.GraphQL.Parser
|
||||||
, Hasura.GraphQL.Parser.Class
|
, Hasura.GraphQL.Parser.Class
|
||||||
, Hasura.GraphQL.Parser.Class.Parse
|
, Hasura.GraphQL.Parser.Class.Parse
|
||||||
|
@ -46,10 +46,10 @@ import qualified Hasura.Tracing as Tracing
|
|||||||
|
|
||||||
import Hasura.Base.Error
|
import Hasura.Base.Error
|
||||||
import Hasura.EncJSON
|
import Hasura.EncJSON
|
||||||
|
import Hasura.GraphQL.ParameterizedQueryHash
|
||||||
import Hasura.GraphQL.Parser.Column (UnpreparedValue)
|
import Hasura.GraphQL.Parser.Column (UnpreparedValue)
|
||||||
import Hasura.GraphQL.Parser.Directives
|
import Hasura.GraphQL.Parser.Directives
|
||||||
import Hasura.GraphQL.Parser.Monad
|
import Hasura.GraphQL.Parser.Monad
|
||||||
import Hasura.GraphQL.Parser.Schema (Variable)
|
|
||||||
import Hasura.GraphQL.RemoteServer (execRemoteGQ)
|
import Hasura.GraphQL.RemoteServer (execRemoteGQ)
|
||||||
import Hasura.GraphQL.Transport.HTTP.Protocol
|
import Hasura.GraphQL.Transport.HTTP.Protocol
|
||||||
import Hasura.Metadata.Class
|
import Hasura.Metadata.Class
|
||||||
@ -263,7 +263,7 @@ getResolvedExecPlan
|
|||||||
-> HTTP.Manager
|
-> HTTP.Manager
|
||||||
-> [HTTP.Header]
|
-> [HTTP.Header]
|
||||||
-> (GQLReqUnparsed, GQLReqParsed)
|
-> (GQLReqUnparsed, GQLReqParsed)
|
||||||
-> m (Telem.CacheHit, (G.SelectionSet G.NoFragments Variable, ResolvedExecutionPlan))
|
-> m (Telem.CacheHit, (ParameterizedQueryHash, ResolvedExecutionPlan))
|
||||||
getResolvedExecPlan env logger {- planCache-} userInfo sqlGenCtx
|
getResolvedExecPlan env logger {- planCache-} userInfo sqlGenCtx
|
||||||
sc _scVer queryType httpManager reqHeaders (reqUnparsed, reqParsed) = -- do
|
sc _scVer queryType httpManager reqHeaders (reqUnparsed, reqParsed) = -- do
|
||||||
|
|
||||||
@ -285,7 +285,7 @@ getResolvedExecPlan env logger {- planCache-} userInfo sqlGenCtx
|
|||||||
-- addPlanToCache plan =
|
-- addPlanToCache plan =
|
||||||
-- liftIO $ EP.addPlan scVer (userRole userInfo)
|
-- liftIO $ EP.addPlan scVer (userRole userInfo)
|
||||||
-- opNameM queryStr plan planCache
|
-- opNameM queryStr plan planCache
|
||||||
noExistingPlan :: m (G.SelectionSet G.NoFragments Variable, ResolvedExecutionPlan)
|
noExistingPlan :: m (ParameterizedQueryHash, ResolvedExecutionPlan)
|
||||||
noExistingPlan = do
|
noExistingPlan = do
|
||||||
-- GraphQL requests may incorporate fragments which insert a pre-defined
|
-- GraphQL requests may incorporate fragments which insert a pre-defined
|
||||||
-- part of a GraphQL query. Here we make sure to remember those
|
-- part of a GraphQL query. Here we make sure to remember those
|
||||||
@ -296,42 +296,47 @@ getResolvedExecPlan env logger {- planCache-} userInfo sqlGenCtx
|
|||||||
mapMaybe takeFragment $ unGQLExecDoc $ _grQuery reqParsed
|
mapMaybe takeFragment $ unGQLExecDoc $ _grQuery reqParsed
|
||||||
(gCtx, queryParts) <- getExecPlanPartial userInfo sc queryType reqParsed
|
(gCtx, queryParts) <- getExecPlanPartial userInfo sc queryType reqParsed
|
||||||
|
|
||||||
case queryParts of
|
(normalizedSelectionSet, resolvedExecPlan) <-
|
||||||
G.TypedOperationDefinition G.OperationTypeQuery _ varDefs directives selSet -> do
|
case queryParts of
|
||||||
-- (Here the above fragment inlining is actually executed.)
|
G.TypedOperationDefinition G.OperationTypeQuery _ varDefs directives selSet -> do
|
||||||
inlinedSelSet <- EI.inlineSelectionSet fragments selSet
|
-- (Here the above fragment inlining is actually executed.)
|
||||||
(executionPlan, queryRootFields, normalizedSelectionSet) <-
|
inlinedSelSet <- EI.inlineSelectionSet fragments selSet
|
||||||
EQ.convertQuerySelSet env logger gCtx userInfo httpManager reqHeaders directives inlinedSelSet varDefs (_grVariables reqUnparsed) (scSetGraphqlIntrospectionOptions sc)
|
(executionPlan, queryRootFields, normalizedSelectionSet) <-
|
||||||
pure $ (normalizedSelectionSet, QueryExecutionPlan executionPlan queryRootFields)
|
EQ.convertQuerySelSet env logger gCtx userInfo httpManager reqHeaders directives inlinedSelSet varDefs (_grVariables reqUnparsed) (scSetGraphqlIntrospectionOptions sc)
|
||||||
|
pure $ (normalizedSelectionSet, QueryExecutionPlan executionPlan queryRootFields)
|
||||||
|
|
||||||
-- See Note [Temporarily disabling query plan caching]
|
-- See Note [Temporarily disabling query plan caching]
|
||||||
-- traverse_ (addPlanToCache . EP.RPQuery) plan
|
-- traverse_ (addPlanToCache . EP.RPQuery) plan
|
||||||
G.TypedOperationDefinition G.OperationTypeMutation _ varDefs directives selSet -> do
|
G.TypedOperationDefinition G.OperationTypeMutation _ varDefs directives selSet -> do
|
||||||
-- (Here the above fragment inlining is actually executed.)
|
-- (Here the above fragment inlining is actually executed.)
|
||||||
inlinedSelSet <- EI.inlineSelectionSet fragments selSet
|
inlinedSelSet <- EI.inlineSelectionSet fragments selSet
|
||||||
(executionPlan, normalizedSelectionSet) <-
|
(executionPlan, normalizedSelectionSet) <-
|
||||||
EM.convertMutationSelectionSet env logger gCtx sqlGenCtx userInfo httpManager reqHeaders directives inlinedSelSet varDefs (_grVariables reqUnparsed) (scSetGraphqlIntrospectionOptions sc)
|
EM.convertMutationSelectionSet env logger gCtx sqlGenCtx userInfo httpManager reqHeaders directives inlinedSelSet varDefs (_grVariables reqUnparsed) (scSetGraphqlIntrospectionOptions sc)
|
||||||
pure $ (normalizedSelectionSet, MutationExecutionPlan executionPlan)
|
pure $ (normalizedSelectionSet, MutationExecutionPlan executionPlan)
|
||||||
-- See Note [Temporarily disabling query plan caching]
|
-- See Note [Temporarily disabling query plan caching]
|
||||||
-- traverse_ (addPlanToCache . EP.RPQuery) plan
|
-- traverse_ (addPlanToCache . EP.RPQuery) plan
|
||||||
G.TypedOperationDefinition G.OperationTypeSubscription _ varDefs directives selSet -> do
|
G.TypedOperationDefinition G.OperationTypeSubscription _ varDefs directives selSet -> do
|
||||||
-- (Here the above fragment inlining is actually executed.)
|
-- (Here the above fragment inlining is actually executed.)
|
||||||
inlinedSelSet <- EI.inlineSelectionSet fragments selSet
|
inlinedSelSet <- EI.inlineSelectionSet fragments selSet
|
||||||
-- Parse as query to check correctness
|
-- Parse as query to check correctness
|
||||||
(unpreparedAST, _reusability, normalizedDirectives, normalizedSelectionSet) <-
|
(unpreparedAST, _reusability, normalizedDirectives, normalizedSelectionSet) <-
|
||||||
EQ.parseGraphQLQuery gCtx varDefs (_grVariables reqUnparsed) directives inlinedSelSet
|
EQ.parseGraphQLQuery gCtx varDefs (_grVariables reqUnparsed) directives inlinedSelSet
|
||||||
-- Process directives on the subscription
|
-- Process directives on the subscription
|
||||||
(dirMap, _) <- (`onLeft` reportParseErrors) =<<
|
(dirMap, _) <- (`onLeft` reportParseErrors) =<<
|
||||||
runParseT (parseDirectives customDirectives (G.DLExecutable G.EDLSUBSCRIPTION) normalizedDirectives)
|
runParseT (parseDirectives customDirectives (G.DLExecutable G.EDLSUBSCRIPTION) normalizedDirectives)
|
||||||
-- A subscription should have exactly one root field.
|
-- A subscription should have exactly one root field.
|
||||||
-- However, for testing purposes, we may allow several root fields; we check for this by
|
-- However, for testing purposes, we may allow several root fields; we check for this by
|
||||||
-- looking for directive "_multiple_top_level_fields" on the subscription. THIS IS NOT A
|
-- looking for directive "_multiple_top_level_fields" on the subscription. THIS IS NOT A
|
||||||
-- SUPPORTED FEATURE. We might remove it in the future without warning. DO NOT USE THIS.
|
-- SUPPORTED FEATURE. We might remove it in the future without warning. DO NOT USE THIS.
|
||||||
allowMultipleRootFields <- withDirective dirMap multipleRootFields $ pure . isJust
|
allowMultipleRootFields <- withDirective dirMap multipleRootFields $ pure . isJust
|
||||||
case inlinedSelSet of
|
case inlinedSelSet of
|
||||||
[_] -> pure ()
|
[_] -> pure ()
|
||||||
[] -> throw500 "empty selset for subscription"
|
[] -> throw500 "empty selset for subscription"
|
||||||
_ -> unless allowMultipleRootFields $
|
_ -> unless allowMultipleRootFields $
|
||||||
throw400 ValidationFailed "subscriptions must select one top level field"
|
throw400 ValidationFailed "subscriptions must select one top level field"
|
||||||
subscriptionPlan <- buildSubscriptionPlan userInfo unpreparedAST
|
subscriptionPlan <- buildSubscriptionPlan userInfo unpreparedAST
|
||||||
pure (normalizedSelectionSet, SubscriptionExecutionPlan subscriptionPlan)
|
pure (normalizedSelectionSet, SubscriptionExecutionPlan subscriptionPlan)
|
||||||
|
-- the parameterized query hash is calculated here because it is used in multiple
|
||||||
|
-- places and instead of calculating it separately, this is a common place to calculate
|
||||||
|
-- the parameterized query hash and then thread it to the required places
|
||||||
|
pure $ (calculateParameterizedQueryHash normalizedSelectionSet, resolvedExecPlan)
|
||||||
|
@ -38,45 +38,52 @@ module Hasura.GraphQL.Execute.LiveQuery.Poll (
|
|||||||
, LGQResponse
|
, LGQResponse
|
||||||
, LiveQueryResponse(..)
|
, LiveQueryResponse(..)
|
||||||
, LiveQueryMetadata(..)
|
, LiveQueryMetadata(..)
|
||||||
|
, SubscriberExecutionDetails (..)
|
||||||
|
|
||||||
|
-- * Batch
|
||||||
|
, BatchId (..)
|
||||||
) where
|
) where
|
||||||
|
|
||||||
import Data.List.Split (chunksOf)
|
import Data.List.Split (chunksOf)
|
||||||
#ifndef PROFILING
|
#ifndef PROFILING
|
||||||
import GHC.AssertNF
|
import GHC.AssertNF
|
||||||
#endif
|
#endif
|
||||||
import Hasura.Prelude
|
import Hasura.Prelude
|
||||||
|
|
||||||
import qualified Control.Concurrent.Async as A
|
import qualified Control.Concurrent.Async as A
|
||||||
import qualified Control.Concurrent.STM as STM
|
import qualified Control.Concurrent.STM as STM
|
||||||
import qualified Control.Immortal as Immortal
|
import qualified Control.Immortal as Immortal
|
||||||
import qualified Crypto.Hash as CH
|
import qualified Crypto.Hash as CH
|
||||||
import qualified Data.Aeson.Extended as J
|
import qualified Data.Aeson.Extended as J
|
||||||
import qualified Data.Aeson.TH as J
|
import qualified Data.ByteString as BS
|
||||||
import qualified Data.ByteString as BS
|
import qualified Data.HashMap.Strict as Map
|
||||||
import qualified Data.HashMap.Strict as Map
|
import qualified Data.Time.Clock as Clock
|
||||||
import qualified Data.Time.Clock as Clock
|
import qualified Data.UUID as UUID
|
||||||
import qualified Data.UUID as UUID
|
import qualified Data.UUID.V4 as UUID
|
||||||
import qualified Data.UUID.V4 as UUID
|
|
||||||
import qualified ListT
|
import qualified ListT
|
||||||
import qualified StmContainers.Map as STMMap
|
import qualified StmContainers.Map as STMMap
|
||||||
|
|
||||||
import Control.Lens
|
import Control.Lens
|
||||||
|
import Data.Monoid (Sum (..))
|
||||||
import Data.Text.Extended
|
import Data.Text.Extended
|
||||||
|
|
||||||
import qualified Hasura.GraphQL.Execute.LiveQuery.TMap as TMap
|
import qualified Hasura.GraphQL.Execute.LiveQuery.TMap as TMap
|
||||||
import qualified Hasura.Logging as L
|
import qualified Hasura.Logging as L
|
||||||
|
|
||||||
import Hasura.Base.Error
|
import Hasura.Base.Error
|
||||||
import Hasura.GraphQL.Execute.Backend
|
import Hasura.GraphQL.Execute.Backend
|
||||||
import Hasura.GraphQL.Execute.LiveQuery.Options
|
import Hasura.GraphQL.Execute.LiveQuery.Options
|
||||||
import Hasura.GraphQL.Execute.LiveQuery.Plan
|
import Hasura.GraphQL.Execute.LiveQuery.Plan
|
||||||
|
import Hasura.GraphQL.ParameterizedQueryHash (ParameterizedQueryHash)
|
||||||
import Hasura.GraphQL.Transport.Backend
|
import Hasura.GraphQL.Transport.Backend
|
||||||
import Hasura.GraphQL.Transport.HTTP.Protocol
|
import Hasura.GraphQL.Transport.HTTP.Protocol
|
||||||
|
import Hasura.GraphQL.Transport.WebSocket.Protocol (OperationId)
|
||||||
|
import qualified Hasura.GraphQL.Transport.WebSocket.Server as WS
|
||||||
import Hasura.RQL.Types.Backend
|
import Hasura.RQL.Types.Backend
|
||||||
import Hasura.RQL.Types.Common (SourceName, getNonNegativeInt)
|
import Hasura.RQL.Types.Common (SourceName, getNonNegativeInt)
|
||||||
|
import Hasura.Server.Types (RequestId)
|
||||||
import Hasura.Session
|
import Hasura.Session
|
||||||
|
|
||||||
|
|
||||||
-- ----------------------------------------------------------------------------------------------
|
-- ----------------------------------------------------------------------------------------------
|
||||||
-- Subscribers
|
-- Subscribers
|
||||||
|
|
||||||
@ -96,13 +103,21 @@ newtype SubscriberMetadata
|
|||||||
= SubscriberMetadata { unSubscriberMetadata :: J.Value }
|
= SubscriberMetadata { unSubscriberMetadata :: J.Value }
|
||||||
deriving (Show, Eq, J.ToJSON)
|
deriving (Show, Eq, J.ToJSON)
|
||||||
|
|
||||||
mkSubscriberMetadata :: J.Value -> SubscriberMetadata
|
mkSubscriberMetadata :: WS.WSId -> OperationId -> Maybe OperationName -> RequestId -> SubscriberMetadata
|
||||||
mkSubscriberMetadata = SubscriberMetadata
|
mkSubscriberMetadata websocketId operationId operationName reqId =
|
||||||
|
SubscriberMetadata $ J.object
|
||||||
|
[ "websocket_id" J..= websocketId
|
||||||
|
, "operation_id" J..= operationId
|
||||||
|
, "operation_name" J..= operationName
|
||||||
|
, "request_id" J..= reqId
|
||||||
|
]
|
||||||
|
|
||||||
data Subscriber
|
data Subscriber
|
||||||
= Subscriber
|
= Subscriber
|
||||||
{ _sId :: !SubscriberId
|
{ _sId :: !SubscriberId
|
||||||
, _sMetadata :: !SubscriberMetadata
|
, _sMetadata :: !SubscriberMetadata
|
||||||
|
, _sRequestId :: !RequestId
|
||||||
|
, _sOperationName :: !(Maybe OperationName)
|
||||||
, _sOnChangeCallback :: !OnChange
|
, _sOnChangeCallback :: !OnChange
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,6 +165,12 @@ data Cohort
|
|||||||
-- result changed, then merge them in the map of existing subscribers
|
-- result changed, then merge them in the map of existing subscribers
|
||||||
}
|
}
|
||||||
|
|
||||||
|
-- | The @BatchId@ is a number based ID to uniquely identify a batch in a single poll and
|
||||||
|
-- it's used to identify the batch to which a cohort belongs to.
|
||||||
|
newtype BatchId
|
||||||
|
= BatchId { _unBatchId :: Int }
|
||||||
|
deriving (Show, Eq, J.ToJSON)
|
||||||
|
|
||||||
{- Note [Blake2b faster than SHA-256]
|
{- Note [Blake2b faster than SHA-256]
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
At the time of writing, from https://blake2.net, it is stated,
|
At the time of writing, from https://blake2.net, it is stated,
|
||||||
@ -214,9 +235,7 @@ pushResultToCohort
|
|||||||
-> Maybe ResponseHash
|
-> Maybe ResponseHash
|
||||||
-> LiveQueryMetadata
|
-> LiveQueryMetadata
|
||||||
-> CohortSnapshot
|
-> CohortSnapshot
|
||||||
-> IO ( [(SubscriberId, SubscriberMetadata)]
|
-> IO ( [SubscriberExecutionDetails], [SubscriberExecutionDetails])
|
||||||
, [(SubscriberId, SubscriberMetadata)]
|
|
||||||
)
|
|
||||||
-- ^ subscribers to which data has been pushed, subscribers which already
|
-- ^ subscribers to which data has been pushed, subscribers which already
|
||||||
-- have this data (this information is exposed by metrics reporting)
|
-- have this data (this information is exposed by metrics reporting)
|
||||||
pushResultToCohort result !respHashM (LiveQueryMetadata dTime) cohortSnapshot = do
|
pushResultToCohort result !respHashM (LiveQueryMetadata dTime) cohortSnapshot = do
|
||||||
@ -233,14 +252,15 @@ pushResultToCohort result !respHashM (LiveQueryMetadata dTime) cohortSnapshot =
|
|||||||
else
|
else
|
||||||
return (newSinks, curSinks)
|
return (newSinks, curSinks)
|
||||||
pushResultToSubscribers subscribersToPush
|
pushResultToSubscribers subscribersToPush
|
||||||
pure $ over (each.each) (\Subscriber{..} -> (_sId, _sMetadata))
|
pure $ over (each.each) (\Subscriber{..}
|
||||||
(subscribersToPush, subscribersToIgnore)
|
-> SubscriberExecutionDetails _sId _sMetadata)
|
||||||
|
(subscribersToPush, subscribersToIgnore)
|
||||||
where
|
where
|
||||||
CohortSnapshot _ respRef curSinks newSinks = cohortSnapshot
|
CohortSnapshot _ respRef curSinks newSinks = cohortSnapshot
|
||||||
|
|
||||||
response = result <&> \payload -> LiveQueryResponse payload dTime
|
response = result <&> \payload -> LiveQueryResponse payload dTime
|
||||||
pushResultToSubscribers =
|
pushResultToSubscribers =
|
||||||
A.mapConcurrently_ $ \(Subscriber _ _ action) -> action response
|
A.mapConcurrently_ $ \Subscriber {..} -> _sOnChangeCallback response
|
||||||
|
|
||||||
-- -----------------------------------------------------------------------------
|
-- -----------------------------------------------------------------------------
|
||||||
-- Pollers
|
-- Pollers
|
||||||
@ -320,6 +340,12 @@ dumpPollerMap extended lqMap =
|
|||||||
newtype PollerId = PollerId { unPollerId :: UUID.UUID }
|
newtype PollerId = PollerId { unPollerId :: UUID.UUID }
|
||||||
deriving (Show, Eq, Generic, J.ToJSON)
|
deriving (Show, Eq, Generic, J.ToJSON)
|
||||||
|
|
||||||
|
data SubscriberExecutionDetails
|
||||||
|
= SubscriberExecutionDetails
|
||||||
|
{ _sedSubscriberId :: !SubscriberId
|
||||||
|
, _sedSubscriberMetadata :: !SubscriberMetadata
|
||||||
|
} deriving (Show, Eq)
|
||||||
|
|
||||||
-- | Execution information related to a cohort on a poll cycle
|
-- | Execution information related to a cohort on a poll cycle
|
||||||
data CohortExecutionDetails
|
data CohortExecutionDetails
|
||||||
= CohortExecutionDetails
|
= CohortExecutionDetails
|
||||||
@ -327,58 +353,66 @@ data CohortExecutionDetails
|
|||||||
, _cedVariables :: !CohortVariables
|
, _cedVariables :: !CohortVariables
|
||||||
, _cedResponseSize :: !(Maybe Int)
|
, _cedResponseSize :: !(Maybe Int)
|
||||||
-- ^ Nothing in case of an error
|
-- ^ Nothing in case of an error
|
||||||
, _cedPushedTo :: ![(SubscriberId, SubscriberMetadata)]
|
, _cedPushedTo :: ![SubscriberExecutionDetails]
|
||||||
-- ^ The response on this cycle has been pushed to these above subscribers
|
-- ^ The response on this cycle has been pushed to these above subscribers
|
||||||
-- New subscribers (those which haven't been around during the previous poll
|
-- New subscribers (those which haven't been around during the previous poll
|
||||||
-- cycle) will always be part of this
|
-- cycle) will always be part of this
|
||||||
, _cedIgnored :: ![(SubscriberId, SubscriberMetadata)]
|
, _cedIgnored :: ![SubscriberExecutionDetails]
|
||||||
-- ^ The response on this cycle has *not* been pushed to these above
|
-- ^ The response on this cycle has *not* been pushed to these above
|
||||||
-- subscribers. This would when the response hasn't changed from the previous
|
-- subscribers. This would when the response hasn't changed from the previous
|
||||||
-- polled cycle
|
-- polled cycle
|
||||||
|
, _cedBatchId :: !BatchId
|
||||||
} deriving (Show, Eq)
|
} deriving (Show, Eq)
|
||||||
|
|
||||||
$(J.deriveToJSON hasuraJSON ''CohortExecutionDetails)
|
|
||||||
|
|
||||||
-- | Execution information related to a single batched execution
|
-- | Execution information related to a single batched execution
|
||||||
data BatchExecutionDetails
|
data BatchExecutionDetails
|
||||||
= BatchExecutionDetails
|
= BatchExecutionDetails
|
||||||
{ _bedPgExecutionTime :: !Clock.DiffTime
|
{ _bedPgExecutionTime :: !Clock.DiffTime
|
||||||
-- ^ postgres execution time of each batch
|
-- ^ postgres execution time of each batch
|
||||||
, _bedPushTime :: !Clock.DiffTime
|
, _bedPushTime :: !Clock.DiffTime
|
||||||
-- ^ time to taken to push to all cohorts belonging to this batch
|
-- ^ time to taken to push to all cohorts belonging to this batch
|
||||||
, _bedCohorts :: ![CohortExecutionDetails]
|
, _bedBatchId :: !BatchId
|
||||||
|
-- ^ id of the batch
|
||||||
|
, _bedCohorts :: ![CohortExecutionDetails]
|
||||||
-- ^ execution details of the cohorts belonging to this batch
|
-- ^ execution details of the cohorts belonging to this batch
|
||||||
|
, _bedBatchResponseSizeBytes :: !(Maybe Int)
|
||||||
} deriving (Show, Eq)
|
} deriving (Show, Eq)
|
||||||
|
|
||||||
-- | see Note [Minimal LiveQuery Poller Log]
|
-- | see Note [Minimal LiveQuery Poller Log]
|
||||||
batchExecutionDetailMinimal :: BatchExecutionDetails -> J.Value
|
batchExecutionDetailMinimal :: BatchExecutionDetails -> J.Value
|
||||||
batchExecutionDetailMinimal BatchExecutionDetails{..} =
|
batchExecutionDetailMinimal BatchExecutionDetails{..} =
|
||||||
J.object [ "pg_execution_time" J..= _bedPgExecutionTime
|
let batchRespSize =
|
||||||
, "push_time" J..= _bedPushTime
|
maybe mempty
|
||||||
]
|
(\respSize -> ["batch_response_size_bytes" J..= respSize])
|
||||||
|
_bedBatchResponseSizeBytes
|
||||||
$(J.deriveToJSON hasuraJSON ''BatchExecutionDetails)
|
in
|
||||||
|
J.object ([ "pg_execution_time" J..= _bedPgExecutionTime
|
||||||
|
, "push_time" J..= _bedPushTime
|
||||||
|
]
|
||||||
|
-- log batch resp size only when there are no errors
|
||||||
|
<> batchRespSize)
|
||||||
|
|
||||||
data PollDetails
|
data PollDetails
|
||||||
= PollDetails
|
= PollDetails
|
||||||
{ _pdPollerId :: !PollerId
|
{ _pdPollerId :: !PollerId
|
||||||
-- ^ the unique ID (basically a thread that run as a 'Poller') for the
|
-- ^ the unique ID (basically a thread that run as a 'Poller') for the
|
||||||
-- 'Poller'
|
-- 'Poller'
|
||||||
, _pdGeneratedSql :: !Text
|
, _pdGeneratedSql :: !Text
|
||||||
-- ^ the multiplexed SQL query to be run against the database with all the
|
-- ^ the multiplexed SQL query to be run against the database with all the
|
||||||
-- variables together
|
-- variables together
|
||||||
, _pdSnapshotTime :: !Clock.DiffTime
|
, _pdSnapshotTime :: !Clock.DiffTime
|
||||||
-- ^ the time taken to get a snapshot of cohorts from our 'LiveQueriesState'
|
-- ^ the time taken to get a snapshot of cohorts from our 'LiveQueriesState'
|
||||||
-- data structure
|
-- data structure
|
||||||
, _pdBatches :: ![BatchExecutionDetails]
|
, _pdBatches :: ![BatchExecutionDetails]
|
||||||
-- ^ list of execution batches and their details
|
-- ^ list of execution batches and their details
|
||||||
, _pdTotalTime :: !Clock.DiffTime
|
, _pdTotalTime :: !Clock.DiffTime
|
||||||
-- ^ total time spent on a poll cycle
|
-- ^ total time spent on a poll cycle
|
||||||
, _pdLiveQueryOptions :: !LiveQueriesOptions
|
, _pdLiveQueryOptions :: !LiveQueriesOptions
|
||||||
|
, _pdSource :: !SourceName
|
||||||
|
, _pdRole :: !RoleName
|
||||||
|
, _pdParameterizedQueryHash :: !ParameterizedQueryHash
|
||||||
} deriving (Show, Eq)
|
} deriving (Show, Eq)
|
||||||
|
|
||||||
$(J.deriveToJSON hasuraJSON ''PollDetails)
|
|
||||||
|
|
||||||
{- Note [Minimal LiveQuery Poller Log]
|
{- Note [Minimal LiveQuery Poller Log]
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
We only want to log the minimal information in the livequery-poller-log as it
|
We only want to log the minimal information in the livequery-poller-log as it
|
||||||
@ -396,6 +430,8 @@ pollDetailMinimal PollDetails{..} =
|
|||||||
, "snapshot_time" J..= _pdSnapshotTime
|
, "snapshot_time" J..= _pdSnapshotTime
|
||||||
, "batches" J..= map batchExecutionDetailMinimal _pdBatches
|
, "batches" J..= map batchExecutionDetailMinimal _pdBatches
|
||||||
, "total_time" J..= _pdTotalTime
|
, "total_time" J..= _pdTotalTime
|
||||||
|
, "source" J..= _pdSource
|
||||||
|
, "role" J..= _pdRole
|
||||||
]
|
]
|
||||||
|
|
||||||
instance L.ToEngineLog PollDetails L.Hasura where
|
instance L.ToEngineLog PollDetails L.Hasura where
|
||||||
@ -414,12 +450,14 @@ pollQuery
|
|||||||
. BackendTransport b
|
. BackendTransport b
|
||||||
=> PollerId
|
=> PollerId
|
||||||
-> LiveQueriesOptions
|
-> LiveQueriesOptions
|
||||||
-> SourceConfig b
|
-> (SourceName, SourceConfig b)
|
||||||
|
-> RoleName
|
||||||
|
-> ParameterizedQueryHash
|
||||||
-> MultiplexedQuery b
|
-> MultiplexedQuery b
|
||||||
-> CohortMap
|
-> CohortMap
|
||||||
-> LiveQueryPostPollHook
|
-> LiveQueryPostPollHook
|
||||||
-> IO ()
|
-> IO ()
|
||||||
pollQuery pollerId lqOpts sourceConfig query cohortMap postPollHook = do
|
pollQuery pollerId lqOpts (sourceName, sourceConfig) roleName parameterizedQueryHash query cohortMap postPollHook = do
|
||||||
(totalTime, (snapshotTime, batchesDetails)) <- withElapsedTime $ do
|
(totalTime, (snapshotTime, batchesDetails)) <- withElapsedTime $ do
|
||||||
|
|
||||||
-- snapshot the current cohorts and split them into batches
|
-- snapshot the current cohorts and split them into batches
|
||||||
@ -429,15 +467,21 @@ pollQuery pollerId lqOpts sourceConfig query cohortMap postPollHook = do
|
|||||||
cohorts <- STM.atomically $ TMap.toList cohortMap
|
cohorts <- STM.atomically $ TMap.toList cohortMap
|
||||||
cohortSnapshots <- mapM (STM.atomically . getCohortSnapshot) cohorts
|
cohortSnapshots <- mapM (STM.atomically . getCohortSnapshot) cohorts
|
||||||
-- cohorts are broken down into batches specified by the batch size
|
-- cohorts are broken down into batches specified by the batch size
|
||||||
pure $ chunksOf (getNonNegativeInt (unBatchSize batchSize)) cohortSnapshots
|
let cohortBatches = chunksOf (getNonNegativeInt (unBatchSize batchSize)) cohortSnapshots
|
||||||
|
-- associating every batch with their BatchId
|
||||||
|
pure $ zip (BatchId <$> [1 .. ]) cohortBatches
|
||||||
|
|
||||||
-- concurrently process each batch
|
-- concurrently process each batch
|
||||||
batchesDetails <- A.forConcurrently cohortBatches $ \cohorts -> do
|
batchesDetails <- A.forConcurrently cohortBatches $ \(batchId, cohorts) -> do
|
||||||
(queryExecutionTime, mxRes) <- runDBSubscription @b sourceConfig query $ over (each._2) _csVariables cohorts
|
(queryExecutionTime, mxRes) <- runDBSubscription @b sourceConfig query $ over (each._2) _csVariables cohorts
|
||||||
|
|
||||||
let lqMeta = LiveQueryMetadata $ convertDuration queryExecutionTime
|
let lqMeta = LiveQueryMetadata $ convertDuration queryExecutionTime
|
||||||
operations = getCohortOperations cohorts mxRes
|
operations = getCohortOperations cohorts mxRes
|
||||||
|
-- batch response size is the sum of the response sizes of the cohorts
|
||||||
|
batchResponseSize =
|
||||||
|
case mxRes of
|
||||||
|
Left _ -> Nothing
|
||||||
|
Right resp -> Just $ getSum $ foldMap (Sum . BS.length . snd) resp
|
||||||
(pushTime, cohortsExecutionDetails) <- withElapsedTime $
|
(pushTime, cohortsExecutionDetails) <- withElapsedTime $
|
||||||
A.forConcurrently operations $ \(res, cohortId, respData, snapshot) -> do
|
A.forConcurrently operations $ \(res, cohortId, respData, snapshot) -> do
|
||||||
(pushedSubscribers, ignoredSubscribers) <-
|
(pushedSubscribers, ignoredSubscribers) <-
|
||||||
@ -448,8 +492,13 @@ pollQuery pollerId lqOpts sourceConfig query cohortMap postPollHook = do
|
|||||||
, _cedPushedTo = pushedSubscribers
|
, _cedPushedTo = pushedSubscribers
|
||||||
, _cedIgnored = ignoredSubscribers
|
, _cedIgnored = ignoredSubscribers
|
||||||
, _cedResponseSize = snd <$> respData
|
, _cedResponseSize = snd <$> respData
|
||||||
|
, _cedBatchId = batchId
|
||||||
}
|
}
|
||||||
pure $ BatchExecutionDetails queryExecutionTime pushTime cohortsExecutionDetails
|
pure $ BatchExecutionDetails queryExecutionTime
|
||||||
|
pushTime
|
||||||
|
batchId
|
||||||
|
cohortsExecutionDetails
|
||||||
|
batchResponseSize
|
||||||
|
|
||||||
pure (snapshotTime, batchesDetails)
|
pure (snapshotTime, batchesDetails)
|
||||||
|
|
||||||
@ -460,6 +509,9 @@ pollQuery pollerId lqOpts sourceConfig query cohortMap postPollHook = do
|
|||||||
, _pdBatches = batchesDetails
|
, _pdBatches = batchesDetails
|
||||||
, _pdLiveQueryOptions = lqOpts
|
, _pdLiveQueryOptions = lqOpts
|
||||||
, _pdTotalTime = totalTime
|
, _pdTotalTime = totalTime
|
||||||
|
, _pdSource = sourceName
|
||||||
|
, _pdRole = roleName
|
||||||
|
, _pdParameterizedQueryHash = parameterizedQueryHash
|
||||||
}
|
}
|
||||||
postPollHook pollDetails
|
postPollHook pollDetails
|
||||||
where
|
where
|
||||||
|
@ -46,11 +46,14 @@ import Hasura.GraphQL.Execute.Backend
|
|||||||
import Hasura.GraphQL.Execute.LiveQuery.Options
|
import Hasura.GraphQL.Execute.LiveQuery.Options
|
||||||
import Hasura.GraphQL.Execute.LiveQuery.Plan
|
import Hasura.GraphQL.Execute.LiveQuery.Plan
|
||||||
import Hasura.GraphQL.Execute.LiveQuery.Poll
|
import Hasura.GraphQL.Execute.LiveQuery.Poll
|
||||||
|
import Hasura.GraphQL.ParameterizedQueryHash (ParameterizedQueryHash)
|
||||||
import Hasura.GraphQL.Transport.Backend
|
import Hasura.GraphQL.Transport.Backend
|
||||||
|
import Hasura.GraphQL.Transport.HTTP.Protocol (OperationName)
|
||||||
import Hasura.GraphQL.Transport.WebSocket.Protocol
|
import Hasura.GraphQL.Transport.WebSocket.Protocol
|
||||||
import Hasura.RQL.Types.Action
|
import Hasura.RQL.Types.Action
|
||||||
import Hasura.RQL.Types.Common (SourceName, unNonNegativeDiffTime)
|
import Hasura.RQL.Types.Common (SourceName, unNonNegativeDiffTime)
|
||||||
import Hasura.Server.Init (ServerMetrics (..))
|
import Hasura.Server.Init (ServerMetrics (..))
|
||||||
|
import Hasura.Server.Types (RequestId)
|
||||||
|
|
||||||
|
|
||||||
-- | The top-level datatype that holds the state for all active live queries.
|
-- | The top-level datatype that holds the state for all active live queries.
|
||||||
@ -95,18 +98,23 @@ addLiveQuery
|
|||||||
-> SubscriberMetadata
|
-> SubscriberMetadata
|
||||||
-> LiveQueriesState
|
-> LiveQueriesState
|
||||||
-> SourceName
|
-> SourceName
|
||||||
|
-> ParameterizedQueryHash
|
||||||
|
-> Maybe OperationName
|
||||||
|
-- ^ operation name of the query
|
||||||
|
-> RequestId
|
||||||
-> LiveQueryPlan b (MultiplexedQuery b)
|
-> LiveQueryPlan b (MultiplexedQuery b)
|
||||||
-> OnChange
|
-> OnChange
|
||||||
-- ^ the action to be executed when result changes
|
-- ^ the action to be executed when result changes
|
||||||
-> IO LiveQueryId
|
-> IO LiveQueryId
|
||||||
addLiveQuery logger serverMetrics subscriberMetadata lqState source plan onResultAction = do
|
addLiveQuery logger serverMetrics subscriberMetadata lqState
|
||||||
|
source parameterizedQueryHash operationName requestId plan onResultAction = do
|
||||||
-- CAREFUL!: It's absolutely crucial that we can't throw any exceptions here!
|
-- CAREFUL!: It's absolutely crucial that we can't throw any exceptions here!
|
||||||
|
|
||||||
-- disposable UUIDs:
|
-- disposable UUIDs:
|
||||||
cohortId <- newCohortId
|
cohortId <- newCohortId
|
||||||
subscriberId <- newSubscriberId
|
subscriberId <- newSubscriberId
|
||||||
|
|
||||||
let !subscriber = Subscriber subscriberId subscriberMetadata onResultAction
|
let !subscriber = Subscriber subscriberId subscriberMetadata requestId operationName onResultAction
|
||||||
|
|
||||||
#ifndef PROFILING
|
#ifndef PROFILING
|
||||||
$assertNFHere subscriber -- so we don't write thunks to mutable vars
|
$assertNFHere subscriber -- so we don't write thunks to mutable vars
|
||||||
@ -131,7 +139,7 @@ addLiveQuery logger serverMetrics subscriberMetadata lqState source plan onResul
|
|||||||
onJust handlerM $ \handler -> do
|
onJust handlerM $ \handler -> do
|
||||||
pollerId <- PollerId <$> UUID.nextRandom
|
pollerId <- PollerId <$> UUID.nextRandom
|
||||||
threadRef <- forkImmortal ("pollQuery." <> show pollerId) logger $ forever $ do
|
threadRef <- forkImmortal ("pollQuery." <> show pollerId) logger $ forever $ do
|
||||||
pollQuery @b pollerId lqOpts sourceConfig query (_pCohorts handler) postPollHook
|
pollQuery @b pollerId lqOpts (source, sourceConfig) role parameterizedQueryHash query (_pCohorts handler) postPollHook
|
||||||
sleep $ unNonNegativeDiffTime $ unRefetchInterval refetchInterval
|
sleep $ unNonNegativeDiffTime $ unRefetchInterval refetchInterval
|
||||||
let !pState = PollerIOState threadRef pollerId
|
let !pState = PollerIOState threadRef pollerId
|
||||||
#ifndef PROFILING
|
#ifndef PROFILING
|
||||||
|
135
server/src-lib/Hasura/GraphQL/ParameterizedQueryHash.hs
Normal file
135
server/src-lib/Hasura/GraphQL/ParameterizedQueryHash.hs
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
{-|
|
||||||
|
This module calculates parameterized query hash, which is a way to
|
||||||
|
hash an incoming query (after resolving variables) with all leaf nodes
|
||||||
|
(i.e. scalar values) discarded. In other words, two queries having the same
|
||||||
|
parameterized query hash are essentially the same query but may differ in
|
||||||
|
leaf values.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
1. query {
|
||||||
|
authors (where: {id: {_eq: 2}}) {
|
||||||
|
id
|
||||||
|
name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
2. query {
|
||||||
|
authors (where: {id: {_eq: 203943}}) {
|
||||||
|
id
|
||||||
|
name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
3. query {
|
||||||
|
authors (where: {id: {_eq: $id}}) {
|
||||||
|
id
|
||||||
|
name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
For any value of `id`
|
||||||
|
|
||||||
|
4. query {
|
||||||
|
authors (where: $whereBoolExp) {
|
||||||
|
id
|
||||||
|
name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
only when `whereBoolExp` is of the form of
|
||||||
|
|
||||||
|
{
|
||||||
|
"id": {
|
||||||
|
"_eq": <id>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
All the above queries should result in the same parameterized query hash.
|
||||||
|
|
||||||
|
The following steps are done to calculate the parameterized query hash:
|
||||||
|
|
||||||
|
1. Normalize the GraphQL query by substituting the variables (if any) in appropriate places.
|
||||||
|
2. Substitute any scalar GraphQL values (Int, Float, Enum, String and Boolean) to null
|
||||||
|
3. For input objects and list, traverse through them and do step no 2.
|
||||||
|
4. Calculate the hash of the query obtained from step 3.
|
||||||
|
|
||||||
|
Note: Parameterized query hash is a PRO only feature
|
||||||
|
-}
|
||||||
|
|
||||||
|
module Hasura.GraphQL.ParameterizedQueryHash
|
||||||
|
( calculateParameterizedQueryHash
|
||||||
|
, ParameterizedQueryHash
|
||||||
|
)
|
||||||
|
where
|
||||||
|
|
||||||
|
import Hasura.Prelude
|
||||||
|
|
||||||
|
import qualified Data.Aeson as J
|
||||||
|
import qualified Data.ByteString as B
|
||||||
|
import qualified Data.HashMap.Strict as Map
|
||||||
|
import qualified Language.GraphQL.Draft.Printer as G
|
||||||
|
import qualified Language.GraphQL.Draft.Syntax as G
|
||||||
|
import qualified Text.Builder as Text
|
||||||
|
|
||||||
|
import Hasura.GraphQL.Parser (InputValue (..), Variable (..))
|
||||||
|
|
||||||
|
import Hasura.Server.Utils (cryptoHash)
|
||||||
|
|
||||||
|
newtype ParameterizedQueryHash
|
||||||
|
= ParameterizedQueryHash { unParamQueryHash :: B.ByteString }
|
||||||
|
deriving (Show, Eq)
|
||||||
|
|
||||||
|
instance J.ToJSON ParameterizedQueryHash where
|
||||||
|
toJSON = J.String . bsToTxt . unParamQueryHash
|
||||||
|
|
||||||
|
normalizeSelectionSet :: G.SelectionSet G.NoFragments Variable -> G.SelectionSet G.NoFragments Void
|
||||||
|
normalizeSelectionSet = (normalizeSelection =<<)
|
||||||
|
where
|
||||||
|
normalizeSelection :: G.Selection G.NoFragments Variable -> G.SelectionSet G.NoFragments Void
|
||||||
|
normalizeSelection (G.SelectionField fld) = pure $ G.SelectionField (normalizeField fld)
|
||||||
|
normalizeSelection (G.SelectionInlineFragment (G.InlineFragment _ _ selSet)) =
|
||||||
|
normalizeSelectionSet selSet
|
||||||
|
|
||||||
|
normalizeField (G.Field _alias name args _directives selSet) =
|
||||||
|
G.Field Nothing name (Map.map normalizeValue args) mempty $ normalizeSelectionSet selSet
|
||||||
|
|
||||||
|
normalizeConstValue :: G.Value Void -> G.Value Void
|
||||||
|
normalizeConstValue = \case
|
||||||
|
G.VNull -> G.VNull
|
||||||
|
G.VInt _ -> G.VNull
|
||||||
|
G.VFloat _ -> G.VNull
|
||||||
|
G.VString _ -> G.VNull
|
||||||
|
G.VBoolean _ -> G.VNull
|
||||||
|
G.VEnum _ -> G.VNull
|
||||||
|
G.VList l -> G.VList $ map normalizeConstValue l
|
||||||
|
G.VObject obj -> G.VObject $ Map.map normalizeConstValue obj
|
||||||
|
|
||||||
|
jsonToNormalizedGQLVal :: J.Value -> G.Value Void
|
||||||
|
jsonToNormalizedGQLVal = \case
|
||||||
|
J.Null -> G.VNull
|
||||||
|
J.Bool _ -> G.VNull
|
||||||
|
J.String _ -> G.VNull
|
||||||
|
J.Number _ -> G.VNull
|
||||||
|
J.Array l -> G.VList $ jsonToNormalizedGQLVal <$> toList l
|
||||||
|
J.Object vals -> G.VObject $ Map.fromList $
|
||||||
|
flip map (Map.toList vals) $ \(key, val) ->
|
||||||
|
(G.unsafeMkName key, jsonToNormalizedGQLVal val)
|
||||||
|
|
||||||
|
normalizeValue :: G.Value Variable -> G.Value Void
|
||||||
|
normalizeValue = \case
|
||||||
|
G.VNull -> G.VNull
|
||||||
|
G.VInt _ -> G.VNull
|
||||||
|
G.VFloat _ -> G.VNull
|
||||||
|
G.VString _ -> G.VNull
|
||||||
|
G.VBoolean _ -> G.VNull
|
||||||
|
G.VEnum _ -> G.VNull
|
||||||
|
G.VList l -> G.VList $ map normalizeValue l
|
||||||
|
G.VObject obj -> G.VObject $ Map.map normalizeValue obj
|
||||||
|
G.VVariable (Variable _info _type value) ->
|
||||||
|
case value of
|
||||||
|
GraphQLValue val -> normalizeConstValue val
|
||||||
|
JSONValue v -> jsonToNormalizedGQLVal v
|
||||||
|
|
||||||
|
calculateParameterizedQueryHash :: G.SelectionSet G.NoFragments Variable -> ParameterizedQueryHash
|
||||||
|
calculateParameterizedQueryHash = ParameterizedQueryHash . cryptoHash . Text.run . G.selectionSet . normalizeSelectionSet
|
@ -52,8 +52,8 @@ import Hasura.Base.Error
|
|||||||
import Hasura.EncJSON
|
import Hasura.EncJSON
|
||||||
import Hasura.GraphQL.Logging (MonadQueryLog (logQueryLog),
|
import Hasura.GraphQL.Logging (MonadQueryLog (logQueryLog),
|
||||||
QueryLog (..), QueryLogKind (..))
|
QueryLog (..), QueryLogKind (..))
|
||||||
|
import Hasura.GraphQL.ParameterizedQueryHash
|
||||||
import Hasura.GraphQL.Parser.Column (UnpreparedValue (..))
|
import Hasura.GraphQL.Parser.Column (UnpreparedValue (..))
|
||||||
import Hasura.GraphQL.Parser.Schema (Variable)
|
|
||||||
import Hasura.GraphQL.Transport.Backend
|
import Hasura.GraphQL.Transport.Backend
|
||||||
import Hasura.GraphQL.Transport.HTTP.Protocol
|
import Hasura.GraphQL.Transport.HTTP.Protocol
|
||||||
import Hasura.GraphQL.Transport.Instances ()
|
import Hasura.GraphQL.Transport.Instances ()
|
||||||
@ -203,16 +203,16 @@ runGQ
|
|||||||
-> [HTTP.Header]
|
-> [HTTP.Header]
|
||||||
-> E.GraphQLQueryType
|
-> E.GraphQLQueryType
|
||||||
-> GQLReqUnparsed
|
-> GQLReqUnparsed
|
||||||
-> m (G.SelectionSet G.NoFragments Variable, HttpResponse (Maybe GQResponse, EncJSON))
|
-> m (ParameterizedQueryHash, HttpResponse (Maybe GQResponse, EncJSON))
|
||||||
runGQ env logger reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
runGQ env logger reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
||||||
(telemTimeTot_DT, (telemCacheHit, (telemQueryType, telemTimeIO_DT, telemLocality, resp, normalizedSelectionSet))) <- withElapsedTime $ do
|
(telemTimeTot_DT, (telemCacheHit, (telemQueryType, telemTimeIO_DT, telemLocality, resp, parameterizedQueryHash))) <- withElapsedTime $ do
|
||||||
E.ExecutionCtx _ sqlGenCtx {- planCache -} sc scVer httpManager enableAL <- ask
|
E.ExecutionCtx _ sqlGenCtx {- planCache -} sc scVer httpManager enableAL <- ask
|
||||||
|
|
||||||
-- run system authorization on the GraphQL API
|
-- run system authorization on the GraphQL API
|
||||||
reqParsed <- E.checkGQLExecution userInfo (reqHeaders, ipAddress) enableAL sc reqUnparsed
|
reqParsed <- E.checkGQLExecution userInfo (reqHeaders, ipAddress) enableAL sc reqUnparsed
|
||||||
>>= flip onLeft throwError
|
>>= flip onLeft throwError
|
||||||
|
|
||||||
(telemCacheHit, (normalizedSelectionSet, execPlan)) <-
|
(telemCacheHit, (parameterizedQueryHash, execPlan)) <-
|
||||||
E.getResolvedExecPlan
|
E.getResolvedExecPlan
|
||||||
env logger {- planCache -}
|
env logger {- planCache -}
|
||||||
userInfo sqlGenCtx sc scVer queryType
|
userInfo sqlGenCtx sc scVer queryType
|
||||||
@ -236,7 +236,7 @@ runGQ env logger reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
|||||||
case fmap decodeGQResp cachedValue of
|
case fmap decodeGQResp cachedValue of
|
||||||
Just cachedResponseData -> do
|
Just cachedResponseData -> do
|
||||||
logQueryLog logger $ QueryLog reqUnparsed Nothing reqId QueryLogKindCached
|
logQueryLog logger $ QueryLog reqUnparsed Nothing reqId QueryLogKindCached
|
||||||
pure (Telem.Query, 0, Telem.Local, HttpResponse cachedResponseData responseHeaders, normalizedSelectionSet)
|
pure (Telem.Query, 0, Telem.Local, HttpResponse cachedResponseData responseHeaders, parameterizedQueryHash)
|
||||||
|
|
||||||
Nothing -> do
|
Nothing -> do
|
||||||
conclusion <- runExceptT $ forWithKey queryPlans $ \fieldName -> \case
|
conclusion <- runExceptT $ forWithKey queryPlans $ \fieldName -> \case
|
||||||
@ -278,7 +278,7 @@ runGQ env logger reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
|||||||
logQueryLog logger $ QueryLog reqUnparsed Nothing reqId QueryLogKindIntrospection
|
logQueryLog logger $ QueryLog reqUnparsed Nothing reqId QueryLogKindIntrospection
|
||||||
buildRaw json
|
buildRaw json
|
||||||
out@(_, _, _, HttpResponse responseData _, _) <-
|
out@(_, _, _, HttpResponse responseData _, _) <-
|
||||||
buildResultFromFragments Telem.Query conclusion responseHeaders normalizedSelectionSet
|
buildResultFromFragments Telem.Query conclusion responseHeaders parameterizedQueryHash
|
||||||
Tracing.interpTraceT (liftEitherM . runExceptT) $ cacheStore cacheKey $ snd responseData
|
Tracing.interpTraceT (liftEitherM . runExceptT) $ cacheStore cacheKey $ snd responseData
|
||||||
pure out
|
pure out
|
||||||
|
|
||||||
@ -296,7 +296,7 @@ runGQ env logger reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
|||||||
resp <- runExceptT $ doQErr $
|
resp <- runExceptT $ doQErr $
|
||||||
runPGMutationTransaction reqId reqUnparsed userInfo logger sourceConfig pgMutations
|
runPGMutationTransaction reqId reqUnparsed userInfo logger sourceConfig pgMutations
|
||||||
-- we do not construct result fragments since we have only one result
|
-- we do not construct result fragments since we have only one result
|
||||||
buildResult Telem.Mutation normalizedSelectionSet resp \(telemTimeIO_DT, results) ->
|
buildResult Telem.Mutation parameterizedQueryHash resp \(telemTimeIO_DT, results) ->
|
||||||
let responseData = Right $ encJToLBS $ encJFromInsOrdHashMap $ OMap.mapKeys G.unName results
|
let responseData = Right $ encJToLBS $ encJFromInsOrdHashMap $ OMap.mapKeys G.unName results
|
||||||
in ( Telem.Mutation
|
in ( Telem.Mutation
|
||||||
, telemTimeIO_DT
|
, telemTimeIO_DT
|
||||||
@ -304,7 +304,7 @@ runGQ env logger reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
|||||||
, HttpResponse
|
, HttpResponse
|
||||||
(Just responseData, encodeGQResp responseData)
|
(Just responseData, encodeGQResp responseData)
|
||||||
[]
|
[]
|
||||||
, normalizedSelectionSet
|
, parameterizedQueryHash
|
||||||
)
|
)
|
||||||
|
|
||||||
-- we are not in the transaction case; proceeding normally
|
-- we are not in the transaction case; proceeding normally
|
||||||
@ -347,7 +347,7 @@ runGQ env logger reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
|||||||
E.ExecStepRaw json -> do
|
E.ExecStepRaw json -> do
|
||||||
logQueryLog logger $ QueryLog reqUnparsed Nothing reqId QueryLogKindIntrospection
|
logQueryLog logger $ QueryLog reqUnparsed Nothing reqId QueryLogKindIntrospection
|
||||||
buildRaw json
|
buildRaw json
|
||||||
buildResultFromFragments Telem.Mutation conclusion [] normalizedSelectionSet
|
buildResultFromFragments Telem.Mutation conclusion [] parameterizedQueryHash
|
||||||
|
|
||||||
E.SubscriptionExecutionPlan _sub ->
|
E.SubscriptionExecutionPlan _sub ->
|
||||||
throw400 UnexpectedPayload "subscriptions are not supported over HTTP, use websockets instead"
|
throw400 UnexpectedPayload "subscriptions are not supported over HTTP, use websockets instead"
|
||||||
@ -356,7 +356,7 @@ runGQ env logger reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
|||||||
telemTimeTot = convertDuration telemTimeTot_DT
|
telemTimeTot = convertDuration telemTimeTot_DT
|
||||||
telemTransport = Telem.HTTP
|
telemTransport = Telem.HTTP
|
||||||
Telem.recordTimingMetric Telem.RequestDimensions{..} Telem.RequestTimings{..}
|
Telem.recordTimingMetric Telem.RequestDimensions{..} Telem.RequestTimings{..}
|
||||||
return (normalizedSelectionSet, resp)
|
return (parameterizedQueryHash, resp)
|
||||||
where
|
where
|
||||||
getExecStepActionWithActionInfo acc execStep = case execStep of
|
getExecStepActionWithActionInfo acc execStep = case execStep of
|
||||||
EB.ExecStepAction _ actionInfo _remoteJoins -> (actionInfo:acc)
|
EB.ExecStepAction _ actionInfo _remoteJoins -> (actionInfo:acc)
|
||||||
@ -377,15 +377,15 @@ runGQ env logger reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
|||||||
:: Telem.QueryType
|
:: Telem.QueryType
|
||||||
-> Either (Either GQExecError QErr) (InsOrdHashMap G.Name ResultsFragment)
|
-> Either (Either GQExecError QErr) (InsOrdHashMap G.Name ResultsFragment)
|
||||||
-> HTTP.ResponseHeaders
|
-> HTTP.ResponseHeaders
|
||||||
-> G.SelectionSet G.NoFragments Variable
|
-> ParameterizedQueryHash
|
||||||
-> m ( Telem.QueryType
|
-> m ( Telem.QueryType
|
||||||
, DiffTime
|
, DiffTime
|
||||||
, Telem.Locality
|
, Telem.Locality
|
||||||
, HttpResponse (Maybe GQResponse, EncJSON)
|
, HttpResponse (Maybe GQResponse, EncJSON)
|
||||||
, G.SelectionSet G.NoFragments Variable
|
, ParameterizedQueryHash
|
||||||
)
|
)
|
||||||
buildResultFromFragments telemType fragments cacheHeaders normalizedSelSet =
|
buildResultFromFragments telemType fragments cacheHeaders parameterizedQueryHash =
|
||||||
buildResult telemType normalizedSelSet fragments \results ->
|
buildResult telemType parameterizedQueryHash fragments \results ->
|
||||||
let responseData = Right $ encJToLBS $ encJFromInsOrdHashMap $ rfResponse <$> OMap.mapKeys G.unName results
|
let responseData = Right $ encJToLBS $ encJFromInsOrdHashMap $ rfResponse <$> OMap.mapKeys G.unName results
|
||||||
in ( telemType
|
in ( telemType
|
||||||
, sum (fmap rfTimeIO results)
|
, sum (fmap rfTimeIO results)
|
||||||
@ -393,28 +393,28 @@ runGQ env logger reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
|||||||
, HttpResponse
|
, HttpResponse
|
||||||
(Just responseData, encodeGQResp responseData)
|
(Just responseData, encodeGQResp responseData)
|
||||||
(cacheHeaders <> foldMap rfHeaders results)
|
(cacheHeaders <> foldMap rfHeaders results)
|
||||||
, normalizedSelSet
|
, parameterizedQueryHash
|
||||||
)
|
)
|
||||||
|
|
||||||
buildResult
|
buildResult
|
||||||
:: Telem.QueryType
|
:: Telem.QueryType
|
||||||
-> G.SelectionSet G.NoFragments Variable
|
-> ParameterizedQueryHash
|
||||||
-> Either (Either GQExecError QErr) a
|
-> Either (Either GQExecError QErr) a
|
||||||
-> (a ->
|
-> (a ->
|
||||||
( Telem.QueryType
|
( Telem.QueryType
|
||||||
, DiffTime
|
, DiffTime
|
||||||
, Telem.Locality
|
, Telem.Locality
|
||||||
, HttpResponse (Maybe GQResponse, EncJSON)
|
, HttpResponse (Maybe GQResponse, EncJSON)
|
||||||
, G.SelectionSet G.NoFragments Variable
|
, ParameterizedQueryHash
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
-> m ( Telem.QueryType
|
-> m ( Telem.QueryType
|
||||||
, DiffTime
|
, DiffTime
|
||||||
, Telem.Locality
|
, Telem.Locality
|
||||||
, HttpResponse (Maybe GQResponse, EncJSON)
|
, HttpResponse (Maybe GQResponse, EncJSON)
|
||||||
, G.SelectionSet G.NoFragments Variable
|
, ParameterizedQueryHash
|
||||||
)
|
)
|
||||||
buildResult telemType normalizedSelSet result f = case result of
|
buildResult telemType parameterizedQueryHash result f = case result of
|
||||||
Right a -> pure $ f a
|
Right a -> pure $ f a
|
||||||
Left (Right err) -> throwError err
|
Left (Right err) -> throwError err
|
||||||
Left (Left err) -> pure ( telemType
|
Left (Left err) -> pure ( telemType
|
||||||
@ -423,7 +423,7 @@ runGQ env logger reqId userInfo ipAddress reqHeaders queryType reqUnparsed = do
|
|||||||
, HttpResponse
|
, HttpResponse
|
||||||
(Just (Left err), encodeGQResp $ Left err)
|
(Just (Left err), encodeGQResp $ Left err)
|
||||||
[]
|
[]
|
||||||
, normalizedSelSet
|
, parameterizedQueryHash
|
||||||
)
|
)
|
||||||
|
|
||||||
coalescePostgresMutations
|
coalescePostgresMutations
|
||||||
@ -502,8 +502,8 @@ runGQBatched
|
|||||||
runGQBatched env logger reqId responseErrorsConfig userInfo ipAddress reqHdrs queryType query =
|
runGQBatched env logger reqId responseErrorsConfig userInfo ipAddress reqHdrs queryType query =
|
||||||
case query of
|
case query of
|
||||||
GQLSingleRequest req -> do
|
GQLSingleRequest req -> do
|
||||||
(normalizedSelectionSet, httpResp) <- runGQ env logger reqId userInfo ipAddress reqHdrs queryType req
|
(parameterizedQueryHash, httpResp) <- runGQ env logger reqId userInfo ipAddress reqHdrs queryType req
|
||||||
let httpLoggingMetadata = buildHTTPLoggingMetadata @m [normalizedSelectionSet]
|
let httpLoggingMetadata = buildHTTPLoggingMetadata @m [parameterizedQueryHash]
|
||||||
pure (httpLoggingMetadata, snd <$> httpResp)
|
pure (httpLoggingMetadata, snd <$> httpResp)
|
||||||
GQLBatchedReqs reqs -> do
|
GQLBatchedReqs reqs -> do
|
||||||
-- It's unclear what we should do if we receive multiple
|
-- It's unclear what we should do if we receive multiple
|
||||||
|
@ -378,7 +378,7 @@ onStart env serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
|||||||
userInfo sqlGenCtx sc scVer queryType
|
userInfo sqlGenCtx sc scVer queryType
|
||||||
httpMgr reqHdrs (q, reqParsed)
|
httpMgr reqHdrs (q, reqParsed)
|
||||||
|
|
||||||
(telemCacheHit, (_normalizeSelSet, execPlan)) <- onLeft execPlanE (withComplete . preExecErr requestId)
|
(telemCacheHit, (parameterizedQueryHash, execPlan)) <- onLeft execPlanE (withComplete . preExecErr requestId)
|
||||||
|
|
||||||
case execPlan of
|
case execPlan of
|
||||||
E.QueryExecutionPlan queryPlan asts -> Tracing.trace "Query" $ do
|
E.QueryExecutionPlan queryPlan asts -> Tracing.trace "Query" $ do
|
||||||
@ -539,7 +539,7 @@ onStart env serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
|||||||
E.SEOnSourceDB actionIds liveQueryBuilder -> do
|
E.SEOnSourceDB actionIds liveQueryBuilder -> do
|
||||||
actionLogMapE <- fmap fst <$> runExceptT (EA.fetchActionLogResponses actionIds)
|
actionLogMapE <- fmap fst <$> runExceptT (EA.fetchActionLogResponses actionIds)
|
||||||
actionLogMap <- onLeft actionLogMapE (withComplete . preExecErr requestId)
|
actionLogMap <- onLeft actionLogMapE (withComplete . preExecErr requestId)
|
||||||
lqIdE <- liftIO $ startLiveQuery liveQueryBuilder actionLogMap
|
lqIdE <- liftIO $ startLiveQuery liveQueryBuilder parameterizedQueryHash requestId actionLogMap
|
||||||
lqId <- onLeft lqIdE (withComplete . preExecErr requestId)
|
lqId <- onLeft lqIdE (withComplete . preExecErr requestId)
|
||||||
|
|
||||||
-- Update async action query subscription state
|
-- Update async action query subscription state
|
||||||
@ -552,7 +552,8 @@ onStart env serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
|||||||
logQueryLog logger $ QueryLog q Nothing requestId QueryLogKindAction
|
logQueryLog logger $ QueryLog q Nothing requestId QueryLogKindAction
|
||||||
liftIO $ do
|
liftIO $ do
|
||||||
let asyncActionQueryLive = LQ.LAAQOnSourceDB $
|
let asyncActionQueryLive = LQ.LAAQOnSourceDB $
|
||||||
LQ.LiveAsyncActionQueryOnSource lqId actionLogMap $ restartLiveQuery liveQueryBuilder
|
LQ.LiveAsyncActionQueryOnSource lqId actionLogMap $
|
||||||
|
restartLiveQuery parameterizedQueryHash requestId liveQueryBuilder
|
||||||
|
|
||||||
onUnexpectedException err = do
|
onUnexpectedException err = do
|
||||||
sendError requestId err
|
sendError requestId err
|
||||||
@ -659,24 +660,20 @@ onStart env serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
|||||||
liftIO $ sendCompleted Nothing
|
liftIO $ sendCompleted Nothing
|
||||||
throwError ()
|
throwError ()
|
||||||
|
|
||||||
restartLiveQuery liveQueryBuilder lqId actionLogMap = do
|
restartLiveQuery parameterizedQueryHash requestId liveQueryBuilder lqId actionLogMap = do
|
||||||
LQ.removeLiveQuery logger (_wseServerMetrics serverEnv) lqMap lqId
|
LQ.removeLiveQuery logger (_wseServerMetrics serverEnv) lqMap lqId
|
||||||
either (const Nothing) Just <$> startLiveQuery liveQueryBuilder actionLogMap
|
either (const Nothing) Just <$> startLiveQuery liveQueryBuilder parameterizedQueryHash requestId actionLogMap
|
||||||
|
|
||||||
startLiveQuery liveQueryBuilder actionLogMap = do
|
startLiveQuery liveQueryBuilder parameterizedQueryHash requestId actionLogMap = do
|
||||||
liveQueryE <- runExceptT $ liveQueryBuilder actionLogMap
|
liveQueryE <- runExceptT $ liveQueryBuilder actionLogMap
|
||||||
for liveQueryE $ \(sourceName, E.LQP exists) -> do
|
for liveQueryE $ \(sourceName, E.LQP exists) -> do
|
||||||
let subscriberMetadata = LQ.mkSubscriberMetadata $ J.object
|
let !opName = _grOperationName q
|
||||||
[ "websocket_id" J..= WS.getWSId wsConn
|
subscriberMetadata = LQ.mkSubscriberMetadata (WS.getWSId wsConn) opId opName requestId
|
||||||
, "operation_id" J..= opId
|
|
||||||
]
|
|
||||||
|
|
||||||
-- NOTE!: we mask async exceptions higher in the call stack, but it's
|
-- NOTE!: we mask async exceptions higher in the call stack, but it's
|
||||||
-- crucial we don't lose lqId after addLiveQuery returns successfully.
|
-- crucial we don't lose lqId after addLiveQuery returns successfully.
|
||||||
!lqId <- liftIO $ AB.dispatchAnyBackend @BackendTransport exists
|
!lqId <- liftIO $ AB.dispatchAnyBackend @BackendTransport exists
|
||||||
\(E.MultiplexedLiveQueryPlan liveQueryPlan) ->
|
\(E.MultiplexedLiveQueryPlan liveQueryPlan) ->
|
||||||
LQ.addLiveQuery logger (_wseServerMetrics serverEnv) subscriberMetadata lqMap sourceName liveQueryPlan liveQOnChange
|
LQ.addLiveQuery logger (_wseServerMetrics serverEnv) subscriberMetadata lqMap sourceName parameterizedQueryHash opName requestId liveQueryPlan liveQOnChange
|
||||||
let !opName = _grOperationName q
|
|
||||||
#ifndef PROFILING
|
#ifndef PROFILING
|
||||||
liftIO $ $assertNFHere (lqId, opName) -- so we don't write thunks to mutable vars
|
liftIO $ $assertNFHere (lqId, opName) -- so we don't write thunks to mutable vars
|
||||||
#endif
|
#endif
|
||||||
|
@ -20,32 +20,32 @@ module Hasura.Server.Logging
|
|||||||
|
|
||||||
import Hasura.Prelude
|
import Hasura.Prelude
|
||||||
|
|
||||||
import qualified Data.ByteString.Lazy as BL
|
import qualified Data.ByteString.Lazy as BL
|
||||||
import qualified Data.Environment as Env
|
import qualified Data.Environment as Env
|
||||||
import qualified Data.HashMap.Strict as HM
|
import qualified Data.HashMap.Strict as HM
|
||||||
import qualified Data.TByteString as TBS
|
import qualified Data.TByteString as TBS
|
||||||
import qualified Data.Text as T
|
import qualified Data.Text as T
|
||||||
import qualified Language.GraphQL.Draft.Syntax as G
|
import qualified Network.HTTP.Types as HTTP
|
||||||
import qualified Network.HTTP.Types as HTTP
|
import qualified Network.Wai.Extended as Wai
|
||||||
import qualified Network.Wai.Extended as Wai
|
|
||||||
|
|
||||||
import Data.Aeson
|
import Data.Aeson
|
||||||
import Data.Aeson.TH
|
import Data.Aeson.TH
|
||||||
import Data.Int (Int64)
|
import Data.Int (Int64)
|
||||||
import Data.Text.Extended
|
import Data.Text.Extended
|
||||||
|
|
||||||
import Hasura.Base.Error
|
import Hasura.Base.Error
|
||||||
import Hasura.GraphQL.Parser.Schema (Variable)
|
import Hasura.GraphQL.ParameterizedQueryHash
|
||||||
import Hasura.HTTP
|
import Hasura.HTTP
|
||||||
import Hasura.Logging
|
import Hasura.Logging
|
||||||
import Hasura.Metadata.Class
|
import Hasura.Metadata.Class
|
||||||
import Hasura.RQL.Types
|
import Hasura.RQL.Types
|
||||||
import Hasura.Server.Compression
|
import Hasura.Server.Compression
|
||||||
import Hasura.Server.Types
|
import Hasura.Server.Types
|
||||||
import Hasura.Server.Utils (DeprecatedEnvVars (..), EnvVarsMovedToMetadata (..),
|
import Hasura.Server.Utils (DeprecatedEnvVars (..),
|
||||||
deprecatedEnvVars, envVarsMovedToMetadata)
|
EnvVarsMovedToMetadata (..),
|
||||||
|
deprecatedEnvVars, envVarsMovedToMetadata)
|
||||||
import Hasura.Session
|
import Hasura.Session
|
||||||
import Hasura.Tracing (TraceT)
|
import Hasura.Tracing (TraceT)
|
||||||
|
|
||||||
|
|
||||||
data StartupLog
|
data StartupLog
|
||||||
@ -130,7 +130,7 @@ class (Monad m, Monoid (HTTPLoggingMetadata m)) => HttpLog m where
|
|||||||
|
|
||||||
type HTTPLoggingMetadata m
|
type HTTPLoggingMetadata m
|
||||||
|
|
||||||
buildHTTPLoggingMetadata :: [(G.SelectionSet G.NoFragments Variable)] -> HTTPLoggingMetadata m
|
buildHTTPLoggingMetadata :: [ParameterizedQueryHash] -> HTTPLoggingMetadata m
|
||||||
|
|
||||||
logHttpError
|
logHttpError
|
||||||
:: Logger Hasura
|
:: Logger Hasura
|
||||||
|
@ -156,8 +156,8 @@ runCustomEndpoint env execCtx requestId userInfo reqHeaders ipAddress RestReques
|
|||||||
-- with the query string from the schema cache, and pass it
|
-- with the query string from the schema cache, and pass it
|
||||||
-- through to the /v1/graphql endpoint.
|
-- through to the /v1/graphql endpoint.
|
||||||
(httpLoggingMetadata, handlerResp) <- flip runReaderT execCtx $ do
|
(httpLoggingMetadata, handlerResp) <- flip runReaderT execCtx $ do
|
||||||
(normalizedSelectionSet, resp) <- GH.runGQ env (E._ecxLogger execCtx) requestId userInfo ipAddress reqHeaders E.QueryHasura (mkPassthroughRequest queryx resolvedVariables)
|
(parameterizedQueryHash, resp) <- GH.runGQ env (E._ecxLogger execCtx) requestId userInfo ipAddress reqHeaders E.QueryHasura (mkPassthroughRequest queryx resolvedVariables)
|
||||||
let httpLoggingMetadata = buildHTTPLoggingMetadata @m [normalizedSelectionSet]
|
let httpLoggingMetadata = buildHTTPLoggingMetadata @m [parameterizedQueryHash]
|
||||||
return (httpLoggingMetadata, fst <$> resp)
|
return (httpLoggingMetadata, fst <$> resp)
|
||||||
case sequence handlerResp of
|
case sequence handlerResp of
|
||||||
Just resp -> pure $ (httpLoggingMetadata, fmap encodeHTTPResp resp)
|
Just resp -> pure $ (httpLoggingMetadata, fmap encodeHTTPResp resp)
|
||||||
|
@ -15,7 +15,7 @@ import Hasura.Server.Utils
|
|||||||
|
|
||||||
newtype RequestId
|
newtype RequestId
|
||||||
= RequestId { unRequestId :: Text }
|
= RequestId { unRequestId :: Text }
|
||||||
deriving (Show, Eq, ToJSON, FromJSON)
|
deriving (Show, Eq, ToJSON, FromJSON, Hashable)
|
||||||
|
|
||||||
getRequestId :: (MonadIO m) => [HTTP.Header] -> m (RequestId, [HTTP.Header])
|
getRequestId :: (MonadIO m) => [HTTP.Header] -> m (RequestId, [HTTP.Header])
|
||||||
getRequestId headers = do
|
getRequestId headers = do
|
||||||
|
Loading…
Reference in New Issue
Block a user