INFRA-832: logs OTLP export

https://hasurahq.atlassian.net/browse/INFRA-832

Foundational work already merged:
- #10171
- 0184ba8bfcae9b
- 699317ffd061d3

PR-URL: https://github.com/hasura/graphql-engine-mono/pull/10238
Co-authored-by: Varun Choudhary <68095256+Varun-Choudhary@users.noreply.github.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Toan Nguyen  <1615675+hgiasac@users.noreply.github.com>
GitOrigin-RevId: d88c6a1aafe74e7393873aacc61e6fce3bc7c068
This commit is contained in:
kodiakhq[bot] 2023-09-13 16:48:28 +00:00 committed by hasura-bot
parent f915c7d1a2
commit 07bad7c498
33 changed files with 280 additions and 117 deletions

View File

@ -107,6 +107,7 @@ X-Hasura-Role: admin
"exporter_otlp": {
"otlp_traces_endpoint": "http://localhost:4318/v1/traces",
"otlp_metrics_endpoint": "http://localhost:4318/v1/metrics",
"otlp_logs_endpoint": "http://localhost:4318/v1/logs",
"protocol": "http/protobuf",
"traces_propagators": ["tracecontext"],
"headers": [

View File

@ -116,7 +116,6 @@ keywords:
| datasets | true | `[String]` \| [FromEnv](#fromenv) | List of BigQuery datasets |
| global_select_limit | false | `Integer` | The maximum number of rows that can be returned, defaults to `1000` |
## PGSourceConnectionInfo {#pgsourceconnectioninfo}
| Key | Required | Schema | Description |
@ -1802,6 +1801,7 @@ Table columns can be referred by prefixing `$` e.g `$id`.
| --------------------- | -------- | --------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- |
| otlp_traces_endpoint | true | `String` | OpenTelemetry compliant receiver endpoint URL for traces (usually having path "/v1/traces") |
| otlp_metrics_endpoint | true | `String` | OpenTelemetry compliant receiver endpoint URL for metrics (usually having path "/v1/metrics") |
| otlp_logs_endpoint | true | `String` | OpenTelemetry compliant receiver endpoint URL for logs (usually having path "/v1/logs") |
| protocol | false | `String` | Protocol to be used for the communication with the receiver. Currently only supports `http/protobuf` |
| headers | false | \[[HeaderFromValue](#headerfromvalue) \| [HeaderFromEnv](#headerfromenv) \] | List of defined headers to be sent to the receiver |
| resource_attributes | false | \[[Attribute](#attribute)\] | List of resource attributes to be sent to the receiver |
@ -1809,6 +1809,6 @@ Table columns can be referred by prefixing `$` e.g `$id`.
## OpenTelemetryBatchSpanProcessor {#opentelemetrybatchspanprocessor}
| Key | required | Schema | Description |
| --------------------- | -------- | --------- | ------------------------------------------------------------------------ |
| max_export_batch_size | false | `Integer` | Maximum number of spans allowed per export request. Default value is 512 |
| Key | required | Schema | Description |
| --------------------- | -------- | --------- | -------------------------------------------------------------------------------- |
| max_export_batch_size | false | `Integer` | Maximum number of spans or logs allowed per export request. Default value is 512 |

View File

@ -18,7 +18,7 @@ import TabItem from '@theme/TabItem';
import Thumbnail from '@site/src/components/Thumbnail';
import ProductBadge from '@site/src/components/ProductBadge';
# Export Traces and Metrics to OpenTelemetry Compliant Receiver from Hasura
# Export Traces, Metrics and Logs to OpenTelemetry Compliant Receiver from Hasura
<div className="badge-container">
<ProductBadge free pro ee self />
@ -38,18 +38,13 @@ subscriptions with the [OpenTelemetry](https://opentelemetry.io/docs/concepts/si
be exported directly from your Hasura instances to your observability tool that supports OpenTelemetry traces. This can
be configured in the `Settings` section of the Hasura Console.
:::info Support for metrics and logs
Currently, the OpenTelemetry Integration exports traces and metrics. The support for logs will be added in the future.
:::
## Configure the OpenTelemetry receiver
:::info Supported from
OpenTelemetry traces are supported for Hasura GraphQL Engine versions `v2.18.0` and above on Self-Hosted Enterprise,
with support for metrics export added in `v2.31.0` and made available to all cloud tiers.
with support for metrics export added in `v2.31.0` and for logs export added in `v2.35.0`. Also, available to all cloud
tiers.
We have deprecated the previous Open Telemetry exporter integration on Hasura Cloud in favor of this native feature, and
you can now configure this directly via the Hasura Console's `Settings` page under the `Monitoring & Observability`
@ -94,6 +89,7 @@ exporter_otlp:
value: us-east
otlp_traces_endpoint: http://host.docker.internal:4318/v1/traces
otlp_metrics_endpoint: http://host.docker.internal:4318/v1/metrics
otlp_logs_endpoint: http://host.docker.internal:4318/v1/logs
protocol: http/protobuf
batch_span_processor:
max_export_batch_size: 512
@ -149,8 +145,8 @@ OpenTelemetry Protocol over HTTP (OTLP/HTTP with binary-encoded Protobuf payload
### Data Type
Selects the type of observability data points to be exported. `Traces` and `Metrics` are the only data types that are
currently supported.
Selects the type of observability data points to be exported. `Traces`, `Logs` and `Metrics` are the only data types
that are currently supported.
### Batch Size

Binary file not shown.

Before

Width:  |  Height:  |  Size: 173 KiB

After

Width:  |  Height:  |  Size: 206 KiB

View File

@ -54,6 +54,7 @@ export const DisabledWithoutLicense: StoryObj<typeof OpenTelemetry> = {
headers: [],
tracesEndpoint: '',
metricsEndpoint: '',
logsEndpoint: '',
batchSize: 512,
attributes: [],
dataType: ['traces'],
@ -88,6 +89,7 @@ export const Enabled: StoryObj<typeof OpenTelemetry> = {
headers: [],
tracesEndpoint: '',
metricsEndpoint: '',
logsEndpoint: '',
batchSize: 512,
attributes: [],
dataType: ['traces'],
@ -154,8 +156,9 @@ const metadataLoadedProps: ComponentPropsWithoutRef<typeof OpenTelemetry> = {
// Using non-default values
enabled: true,
batchSize: 99,
dataType: ['traces', 'metrics'],
dataType: ['traces', 'metrics', 'logs'],
connectionType: 'http/protobuf',
logsEndpoint: 'http://localhost:1234',
tracesEndpoint: 'http://localhost:1234',
metricsEndpoint: 'http://localhost:1234',
tracesPropagators: ['tracecontext'],
@ -219,9 +222,15 @@ export const DefaultValues: StoryObj = {
selector: 'input',
});
// STEP: check the value of the Logs Endpoint
const logsEndpoint = await canvas.findByLabelText('Logs Endpoint', {
selector: 'input',
});
expect(batchSizeInputField).toHaveValue(99);
expect(tracesEndpoint).toHaveValue('http://localhost:1234');
expect(metricsEndpoint).toHaveValue('http://localhost:1234');
expect(logsEndpoint).toHaveValue('http://localhost:1234');
// All the other input fields are not tested since if one input field has the correct default value
// all of the other input fields have the correct default value.

View File

@ -46,6 +46,7 @@ export function Form(props: FormProps) {
const traceType = dataType.includes('traces');
const metricsType = dataType.includes('metrics');
const logsType = dataType.includes('logs');
const buttonTexts = firstTimeSetup
? { text: 'Connect', loadingText: 'Connecting...' }
@ -64,7 +65,7 @@ export function Form(props: FormProps) {
loading={skeletonMode}
/>
{/* No need to redact the input fields since Heap avoid recording the input field values by default */}
<div className="flex">
<div>
<InputField
name="tracesEndpoint"
label="Traces Endpoint"
@ -73,7 +74,6 @@ export function Form(props: FormProps) {
learnMoreLink="https://hasura.io/docs/latest/observability/opentelemetry/#endpoint"
clearButton
loading={skeletonMode}
className="pr-4"
disabled={!traceType}
prependLabel={
<Switch
@ -112,6 +112,29 @@ export function Form(props: FormProps) {
/>
}
/>
<InputField
name="logsEndpoint"
label="Logs Endpoint"
placeholder="Your OpenTelemetry logs endpoint"
tooltip="OpenTelemetry-compliant logs receiver endpoint URL(At the moment, only HTTP is supported). This usually ends in /v1/logs. Environment variable templating is available using the {{VARIABLE}} tag"
learnMoreLink="https://hasura.io/docs/latest/observability/opentelemetry/#endpoint"
clearButton
loading={skeletonMode}
disabled={!logsType}
prependLabel={
<Switch
checked={logsType}
onCheckedChange={checked => {
setValue(
'dataType',
checked
? dataType.concat('logs')
: dataType.filter(type => type !== 'logs')
);
}}
/>
}
/>
</div>
<InputField
name="batchSize"

View File

@ -15,10 +15,11 @@ export const formSchema = z
connectionType: z.enum(['http/protobuf']),
enabled: z.boolean(),
dataType: z.enum(['traces', 'metrics']).array(),
dataType: z.enum(['traces', 'metrics', 'logs']).array(),
// NOTE: We enforce more specific invariants below:
tracesEndpoint: z.union([endPointSchema, z.literal('')]),
metricsEndpoint: z.union([endPointSchema, z.literal('')]),
logsEndpoint: z.union([endPointSchema, z.literal('')]),
// HEADERS
// Names should be validated against /^[a-zA-Z0-9]*$/ but we must be sure the server performs the
@ -68,6 +69,15 @@ export const formSchema = z
'A valid metrics endpoint must be supplied when metrics export is enabled',
path: ['metricsEndpoint'],
}
)
.refine(
obj =>
obj.enabled && obj.dataType.includes('logs') ? obj.logsEndpoint : true,
{
message:
'A valid logs endpoint must be supplied when logs export is enabled',
path: ['logsEndpoint'],
}
);
// --------------------------------------------------
@ -82,13 +92,14 @@ export const defaultValues: FormValues = {
// localhost would not work because HGE is running inside Docker, and the OpenTelemetry host is not.
tracesEndpoint: '',
metricsEndpoint: '',
logsEndpoint: '',
// At the time of writing, the server sets 512 as the default value.
batchSize: 512,
connectionType: 'http/protobuf',
dataType: ['traces', 'metrics'],
dataType: ['traces', 'metrics', 'logs'],
headers: [],
attributes: [],

View File

@ -227,7 +227,7 @@ export function HeroDisabled() {
/>
</g>
<text x="385" y="111" font-size="12" fill="#475569">
Export traces/metrics
Export traces/metrics/logs
</text>
</g>
</g>
@ -321,7 +321,7 @@ export function HeroDisabled() {
/>
</g>
<text x="342" y="229" font-size="11" fill="#297393">
Available traces/metrics
Available traces/metrics/logs
</text>
</g>
<g id="Icon_11">

View File

@ -227,7 +227,7 @@ export function HeroEnabled() {
/>
</g>
<text x="385" y="108" font-size="12" fill="#475569">
Export traces/metrics
Export traces/metrics/logs
</text>
</g>
</g>
@ -322,7 +322,7 @@ export function HeroEnabled() {
/>
</g>
<text x="342" y="229" font-size="11" fill="#297393">
Available traces/metrics
Available traces/metrics/logs
</text>
</g>
<g id="Icon_11" opacity="0.9">

View File

@ -16,9 +16,10 @@ describe('openTelemetryToFormValues', () => {
headers: [{ name: 'baz', value: 'qux' }],
otlp_traces_endpoint: 'https://hasura.io/v1/traces',
otlp_metrics_endpoint: 'https://hasura.io/v1/metrics',
otlp_logs_endpoint: 'https://hasura.io/v1/logs',
},
data_types: ['traces', 'metrics'],
data_types: ['traces', 'metrics', 'logs'],
batch_span_processor: {
max_export_batch_size: 100,
},
@ -29,11 +30,12 @@ describe('openTelemetryToFormValues', () => {
batchSize: 100,
attributes: [],
logsEndpoint: 'https://hasura.io/v1/logs',
tracesEndpoint: 'https://hasura.io/v1/traces',
metricsEndpoint: 'https://hasura.io/v1/metrics',
headers: [{ name: 'baz', value: 'qux', type: 'from_value' }],
dataType: ['traces', 'metrics'],
dataType: ['traces', 'metrics', 'logs'],
// At the beginning, only one Connection Type is available
connectionType: 'http/protobuf',
};
@ -55,6 +57,7 @@ describe('openTelemetryToFormValues', () => {
attributes: [],
tracesEndpoint: '',
metricsEndpoint: '',
logsEndpoint: '',
headers: [{ name: 'baz', value: 'qux', type: 'from_value' }],
// At the beginning, only one Data Type is available

View File

@ -27,6 +27,7 @@ export function openTelemetryToFormValues(
enabled: openTelemetry.status === 'enabled',
tracesEndpoint: openTelemetry.exporter_otlp.otlp_traces_endpoint ?? '',
metricsEndpoint: openTelemetry.exporter_otlp.otlp_metrics_endpoint ?? '',
logsEndpoint: openTelemetry.exporter_otlp.otlp_logs_endpoint ?? '',
headers: metadataHeadersToFormHeaders(openTelemetry.exporter_otlp.headers),
batchSize: openTelemetry.batch_span_processor.max_export_batch_size,
tracesPropagators: openTelemetry.exporter_otlp.traces_propagators,
@ -48,6 +49,7 @@ export function formValuesToOpenTelemetry(
): OpenTelemetry {
const otlp_traces_endpoint = formValues.tracesEndpoint;
const otlp_metrics_endpoint = formValues.metricsEndpoint;
const otlp_logs_endpoint = formValues.logsEndpoint;
const max_export_batch_size = formValues.batchSize;
const traces_propagators = formValues.tracesPropagators;
// At the beginning, only one Connection Type is available
@ -82,5 +84,8 @@ export function formValuesToOpenTelemetry(
if (otlp_metrics_endpoint) {
ot.exporter_otlp.otlp_metrics_endpoint = otlp_metrics_endpoint;
}
if (otlp_logs_endpoint) {
ot.exporter_otlp.otlp_logs_endpoint = otlp_logs_endpoint;
}
return ot;
}

View File

@ -73,6 +73,7 @@ const exporterSchema = z.object({
*/
otlp_traces_endpoint: validUrlSchema,
otlp_metrics_endpoint: validUrlSchema,
otlp_logs_endpoint: validUrlSchema,
});
// --------------------------------------------------
@ -91,7 +92,7 @@ export const openTelemetrySchema = z
/**
* The individually-enabled telemetry export types
*/
data_types: z.array(z.enum(['traces', 'metrics'])),
data_types: z.array(z.enum(['traces', 'metrics', 'logs'])),
batch_span_processor: z.object({
// a value between 1 and 512
@ -106,6 +107,9 @@ export const openTelemetrySchema = z
otlp_metrics_endpoint: validUrlSchema
.or(z.literal(''))
.or(z.literal(undefined)),
otlp_logs_endpoint: validUrlSchema
.or(z.literal(''))
.or(z.literal(undefined)),
}),
})
// enforce invariant that: when export is enabled globally AND when the
@ -131,6 +135,17 @@ export const openTelemetrySchema = z
'A valid metrics endpoint must be supplied when metrics export is enabled',
path: ['exporter_otlp', 'otlp_metrics_endpoint'],
}
)
.refine(
obj =>
obj.status === 'enabled' && obj.data_types.includes('logs')
? obj.exporter_otlp.otlp_logs_endpoint
: true,
{
message:
'A valid logs endpoint must be supplied when logs export is enabled',
path: ['exporter_otlp', 'otlp_logs_endpoint'],
}
);
export type OpenTelemetry = z.infer<typeof openTelemetrySchema>;

View File

@ -173,7 +173,8 @@ export const Input = ({
},
type === 'file' && 'h-auto',
inputClassName,
rightButton && 'rounded-r-none border-r-0'
rightButton && 'rounded-r-none border-r-0',
showInputEndContainer && 'pr-8'
)}
placeholder={placeholder}
{...inputProps}

View File

@ -6405,7 +6405,8 @@
"items": {
"enum": [
"traces",
"metrics"
"metrics",
"logs"
],
"type": "string"
},
@ -6454,6 +6455,10 @@
},
"type": "array"
},
"otlp_logs_endpoint": {
"description": "Target URL to which the exporter is going to send logs. No default.",
"type": "string"
},
"otlp_metrics_endpoint": {
"description": "Target URL to which the exporter is going to send metrics. No default.",
"type": "string"

View File

@ -550,7 +550,7 @@ if [ "$MODE" = "graphql-engine" ] || [ "$MODE" = "graphql-engine-pro" ]; then
export HASURA_GRAPHQL_DATABASE_URL=${HASURA_GRAPHQL_DATABASE_URL-$PG_DB_URL}
export HASURA_GRAPHQL_SERVER_PORT=${HASURA_GRAPHQL_SERVER_PORT-8181}
# Add 'developer' to the default list, for more visiblility:
export HASURA_GRAPHQL_ENABLED_APIS=metadata,graphql,pgdump,config,developer
export HASURA_GRAPHQL_ENABLED_APIS=metadata,graphql,pgdump,config,developer,metrics
echo_pretty "We will connect to postgres at '$HASURA_GRAPHQL_DATABASE_URL'"
echo_pretty "If you haven't overridden HASURA_GRAPHQL_DATABASE_URL, you can"

View File

@ -371,6 +371,8 @@ data AppInit = AppInit
-- that are not required to create the 'AppEnv', such as starting background
-- processes and logging startup information. All of those are flagged with a
-- comment marking them as a side-effect.
--
-- NOTE: this is invoked in pro, but only for OSS mode (no license key)
initialiseAppEnv ::
(C.ForkableMonadIO m) =>
Env.Environment ->
@ -698,12 +700,12 @@ instance HttpLog AppM where
buildExtraHttpLogMetadata _ _ = ()
logHttpError logger loggingSettings userInfoM reqId waiReq req qErr headers _ =
unLogger logger
unLoggerTracing logger
$ mkHttpLog
$ mkHttpErrorLogContext userInfoM loggingSettings reqId waiReq req qErr Nothing Nothing headers
logHttpSuccess logger loggingSettings userInfoM reqId waiReq reqBody response compressedResponse qTime cType headers (CommonHttpLogMetadata rb batchQueryOpLogs, ()) =
unLogger logger
unLoggerTracing logger
$ mkHttpLog
$ mkHttpAccessLogContext userInfoM loggingSettings reqId waiReq reqBody (BL.length response) compressedResponse qTime cType headers rb batchQueryOpLogs
@ -759,13 +761,13 @@ instance MonadConfigApiHandler AppM where
runConfigApiHandler = configApiGetHandler
instance MonadQueryLog AppM where
logQueryLog logger = unLogger logger
logQueryLog logger = unLoggerTracing logger
instance MonadExecutionLog AppM where
logExecutionLog logger = unLogger logger
logExecutionLog logger = unLoggerTracing logger
instance WS.MonadWSLog AppM where
logWSLog logger = unLogger logger
logWSLog logger = unLoggerTracing logger
instance MonadResolveSource AppM where
getPGSourceResolver = asks (mkPgSourceResolver . _lsPgLogger . appEnvLoggers)
@ -973,7 +975,8 @@ runHGEServer setupHook appStateRef initTime startupStatusHook consoleType ekgSto
finishTime <- liftIO Clock.getCurrentTime
let apiInitTime = realToFrac $ Clock.diffUTCTime finishTime initTime
unLogger logger
lift
$ unLoggerTracing logger
$ mkGenericLog LevelInfo "server"
$ StartupTimeInfo "starting API server" apiInitTime
@ -1078,7 +1081,7 @@ mkHGEServer setupHook appStateRef consoleType ekgStore = do
startScheduledEventsPollerThread logger appEnvLockedEventsCtx
EventingDisabled ->
unLogger logger $ mkGenericLog @Text LevelInfo "server" "starting in eventing disabled mode"
lift $ unLoggerTracing logger $ mkGenericLog @Text LevelInfo "server" "starting in eventing disabled mode"
-- start a background thread to check for updates
_updateThread <-
@ -1110,7 +1113,7 @@ mkHGEServer setupHook appStateRef consoleType ekgStore = do
liftIO (runExceptT $ PG.runTx appEnvMetadataDbPool (PG.ReadCommitted, Nothing) $ getPgVersion)
`onLeftM` throwErrJExit DatabaseMigrationError
lift . unLogger logger $ mkGenericLog @Text LevelInfo "telemetry" telemetryNotice
lift . unLoggerTracing logger $ mkGenericLog @Text LevelInfo "telemetry" telemetryNotice
computeResources <- getServerResources
@ -1143,8 +1146,8 @@ mkHGEServer setupHook appStateRef consoleType ekgStore = do
(getAppContext appStateRef)
getSchemaCache' = getSchemaCache appStateRef
prepareScheduledEvents (Logger logger) = do
liftIO $ logger $ mkGenericLog @Text LevelInfo "scheduled_triggers" "preparing data"
prepareScheduledEvents (LoggerTracing logger) = do
logger $ mkGenericLog @Text LevelInfo "scheduled_triggers" "preparing data"
res <- Retry.retrying Retry.retryPolicyDefault isRetryRequired (return unlockAllLockedScheduledEvents)
onLeft res (\err -> logger $ mkGenericLog @String LevelError "scheduled_triggers" (show $ qeError err))
@ -1263,7 +1266,7 @@ mkHGEServer setupHook appStateRef consoleType ekgStore = do
(createFetchedEventsStatsLogger logger)
(closeFetchedEventsStatsLogger logger)
unLogger logger $ mkGenericLog @Text LevelInfo "event_triggers" "starting workers"
lift $ unLoggerTracing logger $ mkGenericLog @Text LevelInfo "event_triggers" "starting workers"
void
$ C.forkManagedTWithGracefulShutdown
"processEventQueue"

View File

@ -60,7 +60,7 @@ instance ToEngineLog AgentCommunicationLog Hasura where
]
logAgentRequest :: (MonadIO m, MonadTrace m) => Logger Hasura -> HTTP.Request -> Either HTTP.HttpException (HTTP.Response BSL.ByteString) -> m ()
logAgentRequest (Logger writeLog) req responseOrError = do
logAgentRequest (LoggerTracing writeLog) req responseOrError = do
traceCtx <- Tracing.currentContext
let _aclRequest = Just $ extractRequestLogInfoFromClientRequest req
_aclResponseStatusCode = case responseOrError of
@ -82,7 +82,7 @@ extractRequestLogInfoFromClientRequest req =
in RequestLogInfo {..}
logClientError :: (MonadIO m, MonadTrace m) => Logger Hasura -> ClientError -> m ()
logClientError (Logger writeLog) clientError = do
logClientError (LoggerTracing writeLog) clientError = do
traceCtx <- Tracing.currentContext
let _aclResponseStatusCode = case clientError of
FailureResponse _ response -> Just . HTTP.statusCode $ responseStatusCode response

View File

@ -526,7 +526,7 @@ validateMutation env manager logger userInfo (ResolvedWebhook urlText) confHeade
responseBody = response Lens.^. Wreq.responseBody
responseBodyForLogging = fromMaybe (J.String $ lbsToTxt responseBody) $ J.decode' responseBody
-- Log the details of the HTTP webhook call
L.unLogger logger $ VIILHttpHandler $ HttpHandlerLog urlText requestBody confHeaders responseBodyForLogging (HTTP.statusCode responseStatus)
L.unLoggerTracing logger $ VIILHttpHandler $ HttpHandlerLog urlText requestBody confHeaders responseBodyForLogging (HTTP.statusCode responseStatus)
if
| HTTP.statusIsSuccessful responseStatus -> pure ()
| responseStatus == HTTP.status400 -> do

View File

@ -360,7 +360,7 @@ processEventQueue logger statsLogger httpMgr getSchemaCache getEventEngineCtx ac
saveLockedEventTriggerEvents sourceName (eId <$> events) leEvents
return $ map (\event -> AB.mkAnyBackend @b $ EventWithSource event _siConfiguration sourceName eventsFetchedTime) events
Left err -> do
liftIO $ L.unLogger logger $ EventInternalErr err
L.unLogger logger $ EventInternalErr err
pure []
else pure []
@ -422,7 +422,7 @@ processEventQueue logger statsLogger httpMgr getSchemaCache getEventEngineCtx ac
let clearlyBehind = fullFetchCount >= 3
unless alreadyWarned
$ when clearlyBehind
$ L.unLogger logger
$ L.unLoggerTracing logger
$ L.UnstructuredLog L.LevelWarn
$ fromString
$ "Events processor may not be keeping up with events generated in postgres, "
@ -433,7 +433,7 @@ processEventQueue logger statsLogger httpMgr getSchemaCache getEventEngineCtx ac
when (lenEvents /= fetchBatchSize && alreadyWarned)
$
-- emit as warning in case users are only logging warning severity and saw above
L.unLogger logger
L.unLoggerTracing logger
$ L.UnstructuredLog L.LevelWarn
$ fromString
$ "It looks like the events processor is keeping up again."
@ -626,7 +626,7 @@ processEventQueue logger statsLogger httpMgr getSchemaCache getEventEngineCtx ac
(HTTPError reqBody err) ->
processError @b sourceConfig e retryConf logHeaders reqBody maintenanceModeVersion eventTriggerMetrics err >>= flip onLeft logQErr
(TransformationError _ err) -> do
L.unLogger logger $ L.UnstructuredLog L.LevelError (SB.fromLBS $ J.encode err)
L.unLoggerTracing logger $ L.UnstructuredLog L.LevelError (SB.fromLBS $ J.encode err)
-- Record an Event Error
recordError' @b sourceConfig e Nothing PESetError maintenanceModeVersion >>= flip onLeft logQErr
@ -638,7 +638,7 @@ processEventQueue logger statsLogger httpMgr getSchemaCache getEventEngineCtx ac
`onNothingM` do
let eventTriggerTimeoutMessage = "Event Trigger " <> etiName eti <<> " timed out while processing."
eventTriggerTimeoutError = err500 TimeoutErrorCode eventTriggerTimeoutMessage
L.unLogger logger $ EventInternalErr eventTriggerTimeoutError
L.unLoggerTracing logger $ EventInternalErr eventTriggerTimeoutError
processError @b sourceConfig e retryConf logHeaders J.Null maintenanceModeVersion eventTriggerMetrics (HOther $ T.unpack eventTriggerTimeoutMessage)
>>= flip onLeft logQErr
@ -769,10 +769,10 @@ mkInvocation eid ep statusMaybe reqHeaders respBody respHeaders =
(mkWebhookReq ep reqHeaders invocationVersionET)
resp
logQErr :: (MonadReader r m, Has (L.Logger L.Hasura) r, MonadIO m) => QErr -> m ()
logQErr :: (Tracing.MonadTraceContext m, MonadReader r m, Has (L.Logger L.Hasura) r, MonadIO m) => QErr -> m ()
logQErr err = do
logger :: L.Logger L.Hasura <- asks getter
L.unLogger logger $ EventInternalErr err
L.unLoggerTracing logger $ EventInternalErr err
getEventTriggerInfoFromEvent ::
forall b. (Backend b) => SchemaCache -> Event b -> Either Text (EventTriggerInfo b)

View File

@ -258,6 +258,7 @@ logHTTPForTriggers ::
( MonadReader r m,
Has (Logger Hasura) r,
MonadIO m,
MonadTraceContext m,
ToEngineLog (HTTPRespExtraLog a) Hasura
) =>
Either (HTTPErr a) (HTTPResp a) ->
@ -270,11 +271,12 @@ logHTTPForTriggers ::
logHTTPForTriggers eitherResp extraLogCtx reqDetails webhookVarName logHeaders triggersErrorLogLevelStatus = do
logger :: Logger Hasura <- asks getter
case (eitherResp, isTriggersErrorLogLevelEnabled triggersErrorLogLevelStatus) of
(Left _, True) -> unLogger logger $ HTTPRespExtraLog LevelError $ HTTPRespExtra eitherResp extraLogCtx reqDetails webhookVarName logHeaders
(_, _) -> unLogger logger $ HTTPRespExtraLog LevelInfo $ HTTPRespExtra eitherResp extraLogCtx reqDetails webhookVarName logHeaders
(Left _, True) -> unLoggerTracing logger $ HTTPRespExtraLog LevelError $ HTTPRespExtra eitherResp extraLogCtx reqDetails webhookVarName logHeaders
(_, _) -> unLoggerTracing logger $ HTTPRespExtraLog LevelInfo $ HTTPRespExtra eitherResp extraLogCtx reqDetails webhookVarName logHeaders
logHTTPForET ::
( MonadReader r m,
MonadTraceContext m,
Has (Logger Hasura) r,
MonadIO m
) =>
@ -289,6 +291,7 @@ logHTTPForET = logHTTPForTriggers
logHTTPForST ::
( MonadReader r m,
MonadTraceContext m,
Has (Logger Hasura) r,
MonadIO m
) =>
@ -384,7 +387,7 @@ invokeRequest reqDetails@RequestDetails {..} respTransform' sessionVars logger t
Left err -> do
-- Log The Response Transformation Error
logger' :: Logger Hasura <- asks getter
unLogger logger' $ UnstructuredLog LevelError (SB.fromLBS $ J.encode err)
unLoggerTracing logger' $ UnstructuredLog LevelError (SB.fromLBS $ J.encode err)
-- Throw an exception with the Transformation Error
throwError $ HTTPError reqBody $ HOther $ T.unpack $ TE.decodeUtf8 $ LBS.toStrict $ J.encode $ J.toJSON err
Right transformedBody -> pure $ resp {hrsBody = SB.fromLBS transformedBody}

View File

@ -173,6 +173,7 @@ import Text.Builder qualified as TB
-- have an adequate buffer of cron events.
runCronEventsGenerator ::
( MonadIO m,
Tracing.MonadTraceContext m,
MonadMetadataStorage m
) =>
L.Logger L.Hasura ->
@ -200,7 +201,7 @@ runCronEventsGenerator logger cronTriggerStatsLogger getSC = do
<$> mapM (withCronTrigger cronTriggersCache) deprivedCronTriggerStats
insertCronEventsFor cronTriggersForHydrationWithStats
onLeft eitherRes $ L.unLogger logger . ScheduledTriggerInternalErr
onLeft eitherRes $ L.unLoggerTracing logger . ScheduledTriggerInternalErr
-- See discussion: https://github.com/hasura/graphql-engine-mono/issues/1001
liftIO $ sleep (minutes 1)
@ -208,7 +209,7 @@ runCronEventsGenerator logger cronTriggerStatsLogger getSC = do
withCronTrigger cronTriggerCache cronTriggerStat = do
case HashMap.lookup (_ctsName cronTriggerStat) cronTriggerCache of
Nothing -> do
L.unLogger logger
L.unLoggerTracing logger
$ ScheduledTriggerInternalErr
$ err500 Unexpected "could not find scheduled trigger in the schema cache"
pure Nothing
@ -307,7 +308,7 @@ processCronEvents logger httpMgr sc scheduledTriggerMetrics cronEvents cronTrigg
Just finally -> onLeft finally logInternalError
removeEventFromLockedEvents id' lockedCronEvents
where
logInternalError err = liftIO . L.unLogger logger $ ScheduledTriggerInternalErr err
logInternalError err = L.unLoggerTracing logger $ ScheduledTriggerInternalErr err
mkErrorObject :: Text -> J.Value
mkErrorObject errorMessage =
@ -394,7 +395,7 @@ processOneOffScheduledEvents
(HOther $ T.unpack $ qeError (err400 NotFound (mkInvalidEnvVarErrMsg envVarError)))
scheduledTriggerMetrics
where
logInternalError err = liftIO . L.unLogger logger $ ScheduledTriggerInternalErr err
logInternalError err = L.unLoggerTracing logger $ ScheduledTriggerInternalErr err
getTemplateFromUrl url = printTemplate $ unInputWebhook url
mkInvalidEnvVarErrMsg envVarErrorValues = "The value for environment variables not found: " <> (getInvalidEnvVarText envVarErrorValues)
mkErrorObject :: Text -> J.Value
@ -439,7 +440,7 @@ processScheduledTriggers getEnvHook logger statsLogger httpMgr scheduledTriggerM
-- might be before we begin processing:
liftIO $ sleep (seconds 10)
where
logInternalError err = liftIO . L.unLogger logger $ ScheduledTriggerInternalErr err
logInternalError err = L.unLoggerTracing logger $ ScheduledTriggerInternalErr err
processScheduledEvent ::
( MonadReader r m,
@ -513,7 +514,7 @@ processScheduledEvent schemaCache scheduledTriggerMetrics eventId eventHeaders r
Left (TransformationError _ e) -> do
-- Log The Transformation Error
logger :: L.Logger L.Hasura <- asks getter
L.unLogger logger $ L.UnstructuredLog L.LevelError (SB.fromLBS $ J.encode e)
L.unLoggerTracing logger $ L.UnstructuredLog L.LevelError (SB.fromLBS $ J.encode e)
-- Set event state to Error
liftEitherM $ setScheduledEventOp eventId (SEOpStatus SESError) type'

View File

@ -613,7 +613,7 @@ callWebhook
Left err -> do
-- Log The Transformation Error
logger :: L.Logger L.Hasura <- asks getter
L.unLogger logger $ L.UnstructuredLog L.LevelError (SB.fromLBS $ J.encode err)
L.unLoggerTracing logger $ L.UnstructuredLog L.LevelError (SB.fromLBS $ J.encode err)
-- Throw an exception with the Transformation Error
throw500WithDetail "Request Transformation Failed" $ J.toJSON err
@ -656,7 +656,7 @@ callWebhook
in applyResponseTransform responseTransform responseTransformCtx `onLeft` \err -> do
-- Log The Response Transformation Error
logger :: L.Logger L.Hasura <- asks getter
L.unLogger logger $ L.UnstructuredLog L.LevelError (SB.fromLBS $ J.encode err)
L.unLoggerTracing logger $ L.UnstructuredLog L.LevelError (SB.fromLBS $ J.encode err)
-- Throw an exception with the Transformation Error
throw500WithDetail "Response Transformation Failed" $ J.toJSON err
@ -670,7 +670,7 @@ callWebhook
(pmActionBytesReceived prometheusMetrics)
responseBodySize
logger :: (L.Logger L.Hasura) <- asks getter
L.unLogger logger $ ActionHandlerLog req transformedReq requestBodySize transformedReqSize responseBodySize actionName
L.unLoggerTracing logger $ ActionHandlerLog req transformedReq requestBodySize transformedReqSize responseBodySize actionName
case J.eitherDecode transformedResponseBody of
Left e -> do

View File

@ -1106,8 +1106,8 @@ onPing :: (MonadIO m) => WSConn -> Maybe PingPongPayload -> m ()
onPing wsConn mPayload =
liftIO $ sendMsg wsConn (SMPong mPayload)
onStop :: (MonadIO m) => WSServerEnv impl -> WSConn -> StopMsg -> IO GranularPrometheusMetricsState -> m ()
onStop serverEnv wsConn (StopMsg opId) granularPrometheusMetricsState = liftIO $ do
onStop :: (Tracing.MonadTraceContext m, MonadIO m) => WSServerEnv impl -> WSConn -> StopMsg -> IO GranularPrometheusMetricsState -> m ()
onStop serverEnv wsConn (StopMsg opId) granularPrometheusMetricsState = do
-- When a stop message is received for an operation, it may not be present in OpMap
-- in these cases:
-- 1. If the operation is a query/mutation - as we remove the operation from the
@ -1115,7 +1115,7 @@ onStop serverEnv wsConn (StopMsg opId) granularPrometheusMetricsState = liftIO $
-- 2. A misbehaving client
-- 3. A bug on our end
stopOperation serverEnv wsConn opId granularPrometheusMetricsState
$ L.unLogger logger
$ L.unLoggerTracing logger
$ L.UnstructuredLog L.LevelDebug
$ fromString
$ "Received STOP for an operation that we have no record for: "
@ -1124,19 +1124,19 @@ onStop serverEnv wsConn (StopMsg opId) granularPrometheusMetricsState = liftIO $
where
logger = _wseLogger serverEnv
stopOperation :: WSServerEnv impl -> WSConn -> OperationId -> IO GranularPrometheusMetricsState -> IO () -> IO ()
stopOperation :: (MonadIO m) => WSServerEnv impl -> WSConn -> OperationId -> IO GranularPrometheusMetricsState -> m () -> m ()
stopOperation serverEnv wsConn opId granularPrometheusMetricsState logWhenOpNotExist = do
opM <- liftIO $ STM.atomically $ STMMap.lookup opId opMap
case opM of
Just (subscriberDetails, operationName) -> do
logWSEvent logger wsConn $ EOperation $ opDet operationName
liftIO $ logWSEvent logger wsConn $ EOperation $ opDet operationName
case subscriberDetails of
LiveQuerySubscriber lqId ->
ES.removeLiveQuery logger (_wseServerMetrics serverEnv) (_wsePrometheusMetrics serverEnv) subscriptionState lqId granularPrometheusMetricsState operationName
liftIO $ ES.removeLiveQuery logger (_wseServerMetrics serverEnv) (_wsePrometheusMetrics serverEnv) subscriptionState lqId granularPrometheusMetricsState operationName
StreamingQuerySubscriber streamSubscriberId ->
ES.removeStreamingQuery logger (_wseServerMetrics serverEnv) (_wsePrometheusMetrics serverEnv) subscriptionState streamSubscriberId granularPrometheusMetricsState operationName
liftIO $ ES.removeStreamingQuery logger (_wseServerMetrics serverEnv) (_wsePrometheusMetrics serverEnv) subscriptionState streamSubscriberId granularPrometheusMetricsState operationName
Nothing -> logWhenOpNotExist
STM.atomically $ STMMap.delete opId opMap
liftIO $ STM.atomically $ STMMap.delete opId opMap
where
logger = _wseLogger serverEnv
subscriptionState = _wseSubscriptionState serverEnv

View File

@ -24,9 +24,19 @@ module Hasura.Logging
UnhandledInternalErrorLog (..),
mkLogger,
nullLogger,
LoggerCtx (..),
-- ** LoggerCtx
LoggerCtx,
getLoggerSet,
getTimeGetter,
getLogLevel,
getEnabledLogTypes,
getLogsExporter,
mkLoggerCtx,
mkLoggerCtxOTLP,
cleanLoggerCtx,
-- ** etc
eventTriggerLogType,
eventTriggerProcessLogType,
scheduledTriggerLogType,
@ -303,13 +313,32 @@ instance ToEngineLog UnstructuredLog Hasura where
toEngineLog (UnstructuredLog level t) =
(level, ELTInternal ILTUnstructured, J.toJSON t)
-- | Abstract. Constructed with 'mkLoggerCtx'.
data LoggerCtx impl = LoggerCtx
{ _lcLoggerSet :: !FL.LoggerSet,
_lcLogLevel :: !LogLevel,
_lcTimeGetter :: !(IO FormattedTime),
_lcEnabledLogTypes :: !(Set.HashSet (EngineLogType impl))
_lcEnabledLogTypes :: !(Set.HashSet (EngineLogType impl)),
-- | @LogsExporter@ or a noop. Wrapped in readIORef to work around cycle at
-- callsite of @runOtlpLogsExporter@
_lcLogsExporter :: !(IO (EngineLog impl -> IO ()))
}
getLoggerSet :: LoggerCtx impl -> FL.LoggerSet
getLoggerSet = _lcLoggerSet
getLogLevel :: LoggerCtx impl -> LogLevel
getLogLevel = _lcLogLevel
getTimeGetter :: LoggerCtx impl -> IO FormattedTime
getTimeGetter = _lcTimeGetter
getEnabledLogTypes :: LoggerCtx impl -> Set.HashSet (EngineLogType impl)
getEnabledLogTypes = _lcEnabledLogTypes
getLogsExporter :: LoggerCtx impl -> IO (EngineLog impl -> IO ())
getLogsExporter = _lcLogsExporter
-- * Unhandled Internal Errors
-- | We expect situations where there are code paths that should not occur and we throw
@ -345,22 +374,26 @@ getFormattedTime tzM = do
t <- Time.getCurrentTime
return $ FormattedTime t tz
-- | Creates a new 'LoggerCtx'.
-- | Creates a new 'LoggerCtx', optionally fanning out to an OTLP endpoint
-- (while enabled) as well.
--
-- The underlying 'LoggerSet' is bound to the 'ManagedT' context: when it exits,
-- the log will be flushed and cleared regardless of whether it was exited
-- properly or not ('ManagedT' uses 'bracket' underneath). This guarantees that
-- the logs will always be flushed, even in case of error, avoiding a repeat of
-- https://github.com/hasura/graphql-engine/issues/4772.
mkLoggerCtx ::
mkLoggerCtxOTLP ::
(MonadIO io, MonadBaseControl IO io) =>
-- | @LogsExporter@ or a noop. Wrapped in readIORef to work around cycle at
-- callsite of @runOtlpLogsExporter@
IO (EngineLog impl -> IO ()) ->
LoggerSettings ->
Set.HashSet (EngineLogType impl) ->
ManagedT io (LoggerCtx impl)
mkLoggerCtx (LoggerSettings cacheTime tzM logLevel) enabledLogs = do
mkLoggerCtxOTLP logsExporter (LoggerSettings cacheTime tzM logLevel) enabledLogs = do
loggerSet <- allocate acquire release
timeGetter <- liftIO $ bool (pure $ getFormattedTime tzM) cachedTimeGetter cacheTime
pure $ LoggerCtx loggerSet logLevel timeGetter enabledLogs
pure $ LoggerCtx loggerSet logLevel timeGetter enabledLogs logsExporter
where
acquire = liftIO do
FL.newStdoutLoggerSet FL.defaultBufSize
@ -373,6 +406,14 @@ mkLoggerCtx (LoggerSettings cacheTime tzM logLevel) enabledLogs = do
{ Auto.updateAction = getFormattedTime tzM
}
-- | 'mkLoggerCtxOTLP' but with no otlp log shipping, for compatibility
mkLoggerCtx ::
(MonadIO io, MonadBaseControl IO io) =>
LoggerSettings ->
Set.HashSet (EngineLogType impl) ->
ManagedT io (LoggerCtx impl)
mkLoggerCtx = mkLoggerCtxOTLP (pure (\_ -> pure ()))
cleanLoggerCtx :: LoggerCtx a -> IO ()
cleanLoggerCtx =
FL.rmLoggerSet . _lcLoggerSet
@ -396,7 +437,7 @@ newToOrig :: Logger impl -> (forall a m. (ToEngineLog a impl, MonadIO m) => a ->
newToOrig (LoggerTracing f) = fmap Tracing.runNoMonadTraceContext f
mkLogger :: (J.ToJSON (EngineLogType impl)) => LoggerCtx impl -> Logger impl
mkLogger (LoggerCtx loggerSet serverLogLevel timeGetter enabledLogTypes) = LoggerTracing $ \l -> do
mkLogger (LoggerCtx loggerSet serverLogLevel timeGetter enabledLogTypes logsExporter) = LoggerTracing $ \l -> do
-- NOTE: This has us logging a trace and span id even in the OSS server,
-- where tracing isn't actually supported. We decided this was fine, and
-- actually might end up being useful as a way for OSS users to correlate
@ -406,10 +447,10 @@ mkLogger (LoggerCtx loggerSet serverLogLevel timeGetter enabledLogTypes) = Logge
mbCurrentTrace = tcCurrentTrace <$> cxt
localTime <- liftIO timeGetter
let (logLevel, logTy, logDet) = toEngineLog l
when (logLevel >= serverLogLevel && isLogTypeEnabled enabledLogTypes logTy)
$ liftIO
$ FL.pushLogStrLn loggerSet
$ FL.toLogStr (J.encode $ EngineLog localTime logLevel logTy logDet mbCurrentTrace mbCurrentSpan)
when (logLevel >= serverLogLevel && isLogTypeEnabled enabledLogTypes logTy) $ liftIO do
let logLine = EngineLog localTime logLevel logTy logDet mbCurrentTrace mbCurrentSpan
FL.pushLogStrLn loggerSet $ FL.toLogStr (J.encode logLine)
logsExporter >>= \f -> f logLine
nullLogger :: Logger Hasura
nullLogger = Logger \_ -> pure ()

View File

@ -303,6 +303,7 @@ createEventTriggerQueryMetadata ::
CacheRWM m,
MetadataM m,
BackendEventTrigger b,
Tracing.MonadTraceContext m,
MonadIO m,
MonadEventLogCleanup m,
MonadReader r m,
@ -359,6 +360,7 @@ runCreateEventTriggerQuery ::
QErrM m,
UserInfoM m,
CacheRWM m,
Tracing.MonadTraceContext m,
MetadataM m,
MonadIO m,
MonadEventLogCleanup m,

View File

@ -95,10 +95,19 @@ parseOtelExporterConfig env enabledDataTypes OtelExporterConfig {..} = do
| OtelMetrics `Set.member` enabledDataTypes ->
mkExportReq rawMetricsEndpoint
_ -> pure Nothing -- disabled
_oteleiLogsBaseRequest <- case _oecLogsEndpoint of
Nothing
| OtelLogs `Set.member` enabledDataTypes ->
Left (err400 InvalidParams "Logs export is enabled but logs endpoint missing")
Just rawLogsEndpoint
| OtelLogs `Set.member` enabledDataTypes ->
mkExportReq rawLogsEndpoint
_ -> pure Nothing -- disabled
pure
$ OtelExporterInfo
{ _oteleiMetricsBaseRequest,
_oteleiTracesBaseRequest,
_oteleiLogsBaseRequest,
_oteleiResourceAttributes =
Map.fromList
$ map

View File

@ -243,18 +243,18 @@ instance (Monad m) => CacheRM (CacheRWT m) where
-- fetching/storing stored introspection) in the critical code path of building
-- the 'SchemaCache'.
loadStoredIntrospection ::
(MonadMetadataStorage m, MonadIO m) =>
(Tracing.MonadTraceContext m, MonadMetadataStorage m, MonadIO m) =>
Logger Hasura ->
MetadataResourceVersion ->
m (Maybe StoredIntrospection)
loadStoredIntrospection logger metadataVersion = do
fetchSourceIntrospection metadataVersion `onLeftM` \err -> do
unLogger logger
unLoggerTracing logger
$ StoredIntrospectionStorageLog "Could not load stored-introspection. Continuing without it" err
pure Nothing
saveSourcesIntrospection ::
(MonadIO m, MonadMetadataStorage m) =>
(Tracing.MonadTraceContext m, MonadIO m, MonadMetadataStorage m) =>
Logger Hasura ->
SourcesIntrospectionStatus ->
MetadataResourceVersion ->
@ -267,10 +267,11 @@ saveSourcesIntrospection logger sourcesIntrospection metadataVersion = do
SourcesIntrospectionChangedPartial _ -> pure ()
SourcesIntrospectionChangedFull introspection ->
storeSourceIntrospection introspection metadataVersion `onLeftM` \err ->
unLogger logger $ StoredIntrospectionStorageLog "Could not save source introspection" err
unLoggerTracing logger $ StoredIntrospectionStorageLog "Could not save source introspection" err
instance
( MonadIO m,
( Tracing.MonadTraceContext m,
MonadIO m,
MonadError QErr m,
ProvidesNetwork m,
MonadResolveSource m,

View File

@ -148,23 +148,27 @@ instance ToJSON OtelStatus where
data OtelDataType
= OtelTraces
| OtelMetrics
| OtelLogs
deriving stock (Eq, Ord, Show, Bounded, Enum)
instance HasCodec OtelDataType where
codec = boundedEnumCodec \case
OtelTraces -> "traces"
OtelMetrics -> "metrics"
OtelLogs -> "logs"
instance FromJSON OtelDataType where
parseJSON = J.withText "OtelDataType" \case
"traces" -> pure OtelTraces
"metrics" -> pure OtelMetrics
"logs" -> pure OtelLogs
x -> fail $ "unexpected string '" <> show x <> "'."
instance ToJSON OtelDataType where
toJSON = \case
OtelTraces -> J.String "traces"
OtelMetrics -> J.String "metrics"
OtelLogs -> J.String "logs"
defaultOtelEnabledDataTypes :: Set OtelDataType
defaultOtelEnabledDataTypes = Set.empty
@ -177,6 +181,9 @@ data OtelExporterConfig = OtelExporterConfig
-- | Target URL to which the exporter is going to send metrics. No default.
-- Used as-is without modification (e.g. appending /v1/metrics).
_oecMetricsEndpoint :: Maybe Text,
-- | Target URL to which the exporter is going to send logs. No default.
-- Used as-is without modification (e.g. appending /v1/logs).
_oecLogsEndpoint :: Maybe Text,
-- | The transport protocol, for all telemetry types.
_oecProtocol :: OtlpProtocol,
-- | Key-value pairs to be used as headers to send with an export request,
@ -199,6 +206,8 @@ instance HasCodec OtelExporterConfig where
AC..= _oecTracesEndpoint
<*> optionalField "otlp_metrics_endpoint" metricsEndpointDoc
AC..= _oecMetricsEndpoint
<*> optionalField "otlp_logs_endpoint" logsEndpointDoc
AC..= _oecLogsEndpoint
<*> optionalFieldWithDefault "protocol" defaultOtelExporterProtocol protocolDoc
AC..= _oecProtocol
<*> optionalFieldWithDefault "headers" defaultOtelExporterHeaders headersDoc
@ -210,6 +219,7 @@ instance HasCodec OtelExporterConfig where
where
tracesEndpointDoc = "Target URL to which the exporter is going to send traces. No default."
metricsEndpointDoc = "Target URL to which the exporter is going to send metrics. No default."
logsEndpointDoc = "Target URL to which the exporter is going to send logs. No default."
protocolDoc = "The transport protocol"
headersDoc = "Key-value pairs to be used as headers to send with an export request."
attrsDoc = "Attributes to send as the resource attributes of an export request. We currently only support string-valued attributes."
@ -218,9 +228,11 @@ instance HasCodec OtelExporterConfig where
instance FromJSON OtelExporterConfig where
parseJSON = J.withObject "OtelExporterConfig" $ \o -> do
_oecTracesEndpoint <-
o .:? "otlp_traces_endpoint" .!= defaultOtelExporterTracesEndpoint
o .:? "otlp_traces_endpoint" .!= Nothing
_oecMetricsEndpoint <-
o .:? "otlp_metrics_endpoint" .!= defaultOtelExporterMetricsEndpoint
o .:? "otlp_metrics_endpoint" .!= Nothing
_oecLogsEndpoint <-
o .:? "otlp_logs_endpoint" .!= Nothing
_oecProtocol <-
o .:? "protocol" .!= defaultOtelExporterProtocol
_oecHeaders <-
@ -232,11 +244,12 @@ instance FromJSON OtelExporterConfig where
pure OtelExporterConfig {..}
instance ToJSON OtelExporterConfig where
toJSON (OtelExporterConfig otlpTracesEndpoint otlpMetricsEndpoint protocol headers resourceAttributes tracesPropagators) =
toJSON (OtelExporterConfig otlpTracesEndpoint otlpMetricsEndpoint otlpLogsEndpoint protocol headers resourceAttributes tracesPropagators) =
J.object
$ catMaybes
[ ("otlp_traces_endpoint" .=) <$> otlpTracesEndpoint,
("otlp_metrics_endpoint" .=) <$> otlpMetricsEndpoint,
("otlp_logs_endpoint" .=) <$> otlpLogsEndpoint,
Just $ "protocol" .= protocol,
Just $ "headers" .= headers,
Just $ "resource_attributes" .= resourceAttributes,
@ -246,8 +259,9 @@ instance ToJSON OtelExporterConfig where
defaultOtelExporterConfig :: OtelExporterConfig
defaultOtelExporterConfig =
OtelExporterConfig
{ _oecTracesEndpoint = defaultOtelExporterTracesEndpoint,
_oecMetricsEndpoint = defaultOtelExporterMetricsEndpoint,
{ _oecTracesEndpoint = Nothing,
_oecMetricsEndpoint = Nothing,
_oecLogsEndpoint = Nothing,
_oecProtocol = defaultOtelExporterProtocol,
_oecHeaders = defaultOtelExporterHeaders,
_oecResourceAttributes = defaultOtelExporterResourceAttributes,
@ -334,12 +348,6 @@ instance ToJSON TracePropagator where
B3 -> J.String "b3"
TraceContext -> J.String "tracecontext"
defaultOtelExporterTracesEndpoint :: Maybe Text
defaultOtelExporterTracesEndpoint = Nothing
defaultOtelExporterMetricsEndpoint :: Maybe Text
defaultOtelExporterMetricsEndpoint = Nothing
defaultOtelExporterProtocol :: OtlpProtocol
defaultOtelExporterProtocol = OtlpProtocolHttpProtobuf
@ -415,6 +423,11 @@ data OtelExporterInfo = OtelExporterInfo
-- A value of 'Nothing' indicates that the export of trace data is
-- disabled.
_oteleiMetricsBaseRequest :: Maybe Request,
-- | HTTP 'Request' containing (1) the target URL to which the exporter is
-- going to send logs, and (2) the user-specified request headers.
-- A value of 'Nothing' indicates that the export of trace data is
-- disabled.
_oteleiLogsBaseRequest :: Maybe Request,
-- | Attributes to send as the resource attributes of an export request. We
-- currently only support string-valued attributes.
--
@ -428,7 +441,7 @@ data OtelExporterInfo = OtelExporterInfo
}
emptyOtelExporterInfo :: OtelExporterInfo
emptyOtelExporterInfo = OtelExporterInfo Nothing Nothing mempty mempty
emptyOtelExporterInfo = OtelExporterInfo Nothing Nothing Nothing mempty mempty
-- | Batch processor configuration for trace export.
--

View File

@ -154,7 +154,7 @@ runMetadataQuery appContext schemaCache closeWebsocketsOnMetadataChange RQLMetad
then case (appEnvEnableMaintenanceMode, appEnvEnableReadOnlyMode) of
(MaintenanceModeDisabled, ReadOnlyModeDisabled) -> do
-- set modified metadata in storage
L.unLogger logger
L.unLoggerTracing logger
$ SchemaSyncLog L.LevelInfo TTMetadataApi
$ String
$ "Attempting to insert new metadata in storage"
@ -162,7 +162,7 @@ runMetadataQuery appContext schemaCache closeWebsocketsOnMetadataChange RQLMetad
Tracing.newSpan "setMetadata"
$ liftEitherM
$ setMetadata (fromMaybe currentResourceVersion _rqlMetadataResourceVersion) modMetadata
L.unLogger logger
L.unLoggerTracing logger
$ SchemaSyncLog L.LevelInfo TTMetadataApi
$ String
$ "Successfully inserted new metadata in storage with resource version: "
@ -182,7 +182,7 @@ runMetadataQuery appContext schemaCache closeWebsocketsOnMetadataChange RQLMetad
Tracing.newSpan "notifySchemaCacheSync"
$ liftEitherM
$ notifySchemaCacheSync newResourceVersion appEnvInstanceId cacheInvalidations
L.unLogger logger
L.unLoggerTracing logger
$ SchemaSyncLog L.LevelInfo TTMetadataApi
$ String
$ "Inserted schema cache sync notification at resource version:"

View File

@ -167,12 +167,12 @@ instance MonadTrans Handler where
instance (Monad m) => UserInfoM (Handler m) where
askUserInfo = asks hcUser
runHandler :: (HasResourceLimits m, MonadBaseControl IO m) => L.Logger L.Hasura -> HandlerCtx -> Handler m a -> m (Either QErr a)
runHandler :: (MonadIO m, Tracing.MonadTraceContext m, HasResourceLimits m, MonadBaseControl IO m) => L.Logger L.Hasura -> HandlerCtx -> Handler m a -> m (Either QErr a)
runHandler logger ctx (Handler r) = do
handlerLimit <- askHTTPHandlerLimit
runExceptT (runReaderT (runResourceLimits handlerLimit r) ctx)
`catch` \errorCallWithLoc@(ErrorCallWithLocation txt _) -> do
liftBase $ L.unLogger logger $ L.UnhandledInternalErrorLog errorCallWithLoc
L.unLoggerTracing logger $ L.UnhandledInternalErrorLog errorCallWithLoc
pure
$ throw500WithDetail "Internal Server Error"
$ object [("error", fromString txt)]

View File

@ -13,7 +13,7 @@ import Data.Either (fromRight)
import Data.Text qualified as T
import Data.Text.Conversions (toText)
import Hasura.HTTP
import Hasura.Logging (LoggerCtx (..))
import Hasura.Logging (LoggerCtx, getLoggerSet)
import Hasura.Prelude
import Hasura.Server.Version (Version, currentVersion)
import Network.HTTP.Client qualified as HTTP
@ -36,7 +36,7 @@ instance J.ToJSON UpdateInfo where
toEncoding = J.genericToEncoding (J.aesonDrop 2 J.snakeCase)
checkForUpdates :: LoggerCtx a -> HTTP.Manager -> IO void
checkForUpdates (LoggerCtx loggerSet _ _ _) manager = do
checkForUpdates ctx manager = do
let options = wreqOptions manager []
url <- getUrl
forever $ do
@ -46,7 +46,7 @@ checkForUpdates (LoggerCtx loggerSet _ _ _) manager = do
Right bs -> do
UpdateInfo latestVersion <- decodeResp $ bs ^. Wreq.responseBody
when (latestVersion /= currentVersion)
$ FL.pushLogStrLn loggerSet
$ FL.pushLogStrLn (getLoggerSet ctx)
$ FL.toLogStr
$ updateMsg latestVersion

View File

@ -39,6 +39,7 @@ import Hasura.Server.AppStateRef
import Hasura.Server.Logging
import Hasura.Server.Types
import Hasura.Services
import Hasura.Tracing qualified as Tracing
import Refined (NonNegative, Refined, unrefine)
data ThreadError
@ -141,7 +142,8 @@ startSchemaSyncListenerThread logger pool instanceId interval metaVersionRef = d
-- | An async thread which processes the schema sync events
-- See Note [Schema Cache Sync]
startSchemaSyncProcessorThread ::
( C.ForkableMonadIO m,
( Tracing.MonadTraceContext m,
C.ForkableMonadIO m,
HasAppEnv m,
HasCacheStaticConfig m,
MonadMetadataStorage m,
@ -247,7 +249,8 @@ listener logger pool metaVersionRef interval = L.iterateM_ listenerLoop defaultE
-- | An IO action that processes events from Queue, in a loop forever.
processor ::
forall m void impl.
( C.ForkableMonadIO m,
( Tracing.MonadTraceContext m,
C.ForkableMonadIO m,
HasAppEnv m,
HasCacheStaticConfig m,
MonadMetadataStorage m,
@ -266,7 +269,8 @@ processor
refreshSchemaCache metaVersion appStateRef TTProcessor logTVar
refreshSchemaCache ::
( MonadIO m,
( Tracing.MonadTraceContext m,
MonadIO m,
MonadBaseControl IO m,
HasAppEnv m,
HasCacheStaticConfig m,
@ -299,7 +303,7 @@ refreshSchemaCache
schemaCache <- askSchemaCache
let engineResourceVersion = scMetadataResourceVersion schemaCache
unless (engineResourceVersion == resourceVersion) $ do
logInfo logger threadType
logInfoTracing logger threadType
$ String
$ T.unwords
[ "Received metadata resource version:",
@ -311,7 +315,7 @@ refreshSchemaCache
MetadataWithResourceVersion metadata latestResourceVersion <- liftEitherM fetchMetadata
logInfo logger threadType
logInfoTracing logger threadType
$ String
$ T.unwords
[ "Fetched metadata with resource version:",
@ -322,7 +326,7 @@ refreshSchemaCache
case notifications of
[] -> do
logInfo logger threadType
logInfoTracing logger threadType
$ String
$ T.unwords
[ "Fetched metadata notifications and received no notifications. Not updating the schema cache.",
@ -332,7 +336,7 @@ refreshSchemaCache
]
setMetadataResourceVersionInSchemaCache latestResourceVersion
_ -> do
logInfo logger threadType
logInfoTracing logger threadType
$ String "Fetched metadata notifications and received some notifications. Updating the schema cache."
let cacheInvalidations =
if any ((== (engineResourceVersion + 1)) . fst) notifications
@ -353,20 +357,31 @@ refreshSchemaCache
}
buildSchemaCacheWithOptions CatalogSync cacheInvalidations metadata (Just latestResourceVersion)
setMetadataResourceVersionInSchemaCache latestResourceVersion
logInfo logger threadType
logInfoTracing logger threadType
$ String
$ "Schema cache updated with resource version: "
<> showMetadataResourceVersion latestResourceVersion
pure (msg, cache)
onLeft respErr (logError logger threadType . TEQueryError)
onLeft respErr (logErrorTracing logger threadType . TEQueryError)
logInfo :: (MonadIO m) => Logger Hasura -> SchemaSyncThreadType -> Value -> m ()
logInfo logger threadType val =
unLogger logger
$ SchemaSyncLog LevelInfo threadType val
logInfoTracing :: (Tracing.MonadTraceContext m, MonadIO m) => Logger Hasura -> SchemaSyncThreadType -> Value -> m ()
logInfoTracing logger threadType val =
unLoggerTracing logger
$ SchemaSyncLog LevelInfo threadType val
logError :: (MonadIO m, ToJSON a) => Logger Hasura -> SchemaSyncThreadType -> a -> m ()
logError logger threadType err =
unLogger logger
$ SchemaSyncLog LevelError threadType
$ object ["error" .= toJSON err]
logErrorTracing :: (Tracing.MonadTraceContext m, MonadIO m, ToJSON a) => Logger Hasura -> SchemaSyncThreadType -> a -> m ()
logErrorTracing logger threadType err =
unLoggerTracing logger
$ SchemaSyncLog LevelError threadType
$ object ["error" .= toJSON err]

View File

@ -33,6 +33,7 @@ import Hasura.Server.Migrate
import Hasura.Server.Types
import Hasura.Services.Network
import Hasura.Session
import Hasura.Tracing qualified as Tracing
import Test.Hspec.Core.Spec
import Test.Hspec.Expectations.Lifted
@ -61,6 +62,9 @@ newtype CacheRefT m a = CacheRefT {runCacheRefT :: (CacheDynamicConfig, MVar Reb
instance MonadTrans CacheRefT where
lift = CacheRefT . const
instance (Tracing.MonadTraceContext m) => Tracing.MonadTraceContext (CacheRefT m) where
currentContext = lift Tracing.currentContext
instance MFunctor CacheRefT where
hoist f (CacheRefT m) = CacheRefT (f . m)
@ -74,7 +78,8 @@ instance (MonadEventLogCleanup m) => MonadEventLogCleanup (CacheRefT m) where
updateTriggerCleanupSchedules logger oldSources newSources schemaCache = lift $ updateTriggerCleanupSchedules logger oldSources newSources schemaCache
instance
( MonadIO m,
( Tracing.MonadTraceContext m,
MonadIO m,
MonadBaseControl IO m,
MonadError QErr m,
MonadMetadataStorage m,
@ -107,7 +112,8 @@ singleTransaction = id
suite ::
forall m.
( MonadIO m,
( Tracing.MonadTraceContext m,
MonadIO m,
MonadError QErr m,
MonadBaseControl IO m,
MonadResolveSource m,