mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-15 01:12:56 +03:00
server: add scheduled triggers Co-authored-by: Alexis King <lexi.lambda@gmail.com> Co-authored-by: Marion Schleifer <marion@hasura.io> Co-authored-by: Karthikeyan Chinnakonda <karthikeyan@hasura.io> Co-authored-by: Aleksandra Sikora <ola.zxcvbnm@gmail.com>
This commit is contained in:
parent
2735d284c1
commit
cc8e2ccc78
@ -191,12 +191,14 @@ pip3 install -r requirements.txt
|
||||
mkdir -p "$OUTPUT_FOLDER/hpc"
|
||||
|
||||
export EVENT_WEBHOOK_HEADER="MyEnvValue"
|
||||
|
||||
export HGE_URL="http://localhost:8080"
|
||||
export HGE_URL_2=""
|
||||
if [ -n ${HASURA_GRAPHQL_DATABASE_URL_2:-} ] ; then
|
||||
HGE_URL_2="http://localhost:8081"
|
||||
fi
|
||||
export WEBHOOK_FROM_ENV="http://127.0.0.1:5592"
|
||||
export SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN="http://127.0.0.1:5594"
|
||||
export HASURA_GRAPHQL_STRINGIFY_NUMERIC_TYPES=true
|
||||
|
||||
HGE_PIDS=""
|
||||
|
13
CHANGELOG.md
13
CHANGELOG.md
@ -2,6 +2,19 @@
|
||||
|
||||
## Next release
|
||||
|
||||
### Scheduled Triggers
|
||||
|
||||
A scheduled trigger can be used to execute custom business logic based on time. There are two types of timing events: cron based or timestamp based.
|
||||
|
||||
A cron trigger will be useful when something needs to be done periodically. For example, you can create a cron trigger to generate an end-of-day sales report every weekday at 9pm.
|
||||
|
||||
You can also schedule one-off events based on a timestamp. For example, a new scheduled event can be created for 2 weeks from when a user signs up to send them an email about their experience.
|
||||
|
||||
<Add docs links>
|
||||
|
||||
(close #1914)
|
||||
|
||||
|
||||
### Allow access to session variables by computed fields (fix #3846)
|
||||
|
||||
Sometimes it is useful for computed fields to have access to the Hasura session variables directly. For example, suppose you want to fetch some articles but also get related user info, say `likedByMe`. Now, you can define a function like:
|
||||
|
@ -621,11 +621,10 @@ class Main extends React.Component {
|
||||
<img src={read} alt={'read'} />
|
||||
</div>
|
||||
<div className={styles.featuresList}>
|
||||
<div className={styles.featuresTitle}>
|
||||
Read Replicas
|
||||
</div>
|
||||
<div className={styles.featuresTitle}>Read Replicas</div>
|
||||
<div className={styles.featuresDescription}>
|
||||
Native Read Replica support for enhanced performance and scalability
|
||||
Native Read Replica support for enhanced performance and
|
||||
scalability
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -190,17 +190,17 @@ const RelationshipEditor = ({
|
||||
disabled={!name}
|
||||
>
|
||||
{// default unselected option
|
||||
refSchema === '' && (
|
||||
<option value={''} disabled>
|
||||
{'-- reference schema --'}
|
||||
</option>
|
||||
)}
|
||||
refSchema === '' && (
|
||||
<option value={''} disabled>
|
||||
{'-- reference schema --'}
|
||||
</option>
|
||||
)}
|
||||
{// all reference schema options
|
||||
orderedSchemaList.map((rs, j) => (
|
||||
<option key={j} value={rs}>
|
||||
{rs}
|
||||
</option>
|
||||
))}
|
||||
orderedSchemaList.map((rs, j) => (
|
||||
<option key={j} value={rs}>
|
||||
{rs}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
</div>
|
||||
);
|
||||
|
@ -727,13 +727,9 @@ const permChangePermissions = changeType => {
|
||||
'_table_' +
|
||||
table;
|
||||
|
||||
const requestMsg = capitalize(
|
||||
getIngForm(changeType) + ' permissions...'
|
||||
);
|
||||
const requestMsg = capitalize(getIngForm(changeType) + ' permissions...');
|
||||
const successMsg = 'Permissions ' + getEdForm(changeType);
|
||||
const errorMsg = capitalize(
|
||||
getIngForm(changeType) + ' permissions failed'
|
||||
);
|
||||
const errorMsg = capitalize(getIngForm(changeType) + ' permissions failed');
|
||||
|
||||
const customOnSuccess = () => {
|
||||
if (changeType === permChangeTypes.save) {
|
||||
|
@ -82,7 +82,7 @@ import {
|
||||
QUERY_TYPES,
|
||||
} from '../../../Common/utils/pgUtils';
|
||||
import { showErrorNotification } from '../../Common/Notification';
|
||||
import KnowMoreLink from "../../../Common/KnowMoreLink/KnowMoreLink";
|
||||
import KnowMoreLink from '../../../Common/KnowMoreLink/KnowMoreLink';
|
||||
import {
|
||||
getFilterQueries,
|
||||
replaceLegacyOperators,
|
||||
@ -597,12 +597,14 @@ class Permissions extends Component {
|
||||
}
|
||||
|
||||
let knowMoreHtml;
|
||||
if(knowMoreRef) {
|
||||
if (knowMoreRef) {
|
||||
knowMoreHtml = (
|
||||
<span className={`${styles.add_mar_left_small} ${styles.sectionStatus}`}>
|
||||
<KnowMoreLink href={knowMoreRef}/>
|
||||
<span
|
||||
className={`${styles.add_mar_left_small} ${styles.sectionStatus}`}
|
||||
>
|
||||
<KnowMoreLink href={knowMoreRef} />
|
||||
</span>
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
@ -1842,14 +1844,21 @@ class Permissions extends Component {
|
||||
const backendStatus = isBackendOnly ? 'enabled' : 'disabled';
|
||||
return (
|
||||
<CollapsibleToggle
|
||||
title={getSectionHeader('Backend only', tooltip, backendStatus, 'https://docs.hasura.io/1.0/graphql/manual/auth/authorization/permission-rules.html#backend-only-inserts')}
|
||||
title={getSectionHeader(
|
||||
'Backend only',
|
||||
tooltip,
|
||||
backendStatus,
|
||||
'https://docs.hasura.io/1.0/graphql/manual/auth/authorization/permission-rules.html#backend-only-inserts'
|
||||
)}
|
||||
useDefaultTitleStyle
|
||||
testId={'toggle-backend-only'}
|
||||
>
|
||||
<div
|
||||
className={`${styles.editPermsSection} ${styles.display_flex}`}
|
||||
>
|
||||
<div className={`${styles.display_flex} ${styles.add_mar_right_mid}`}>
|
||||
<div
|
||||
className={`${styles.display_flex} ${styles.add_mar_right_mid}`}
|
||||
>
|
||||
<Toggle
|
||||
checked={isBackendOnly}
|
||||
onChange={() => dispatch(permToggleBackendOnly())}
|
||||
|
@ -4,7 +4,7 @@ import { StyledRadioButton } from './RadioButton';
|
||||
|
||||
export type RadioButtonProps = {
|
||||
name: string;
|
||||
}
|
||||
};
|
||||
|
||||
export const RadioButton: React.FC<RadioButtonProps> = props => {
|
||||
const { children, name } = props;
|
||||
|
@ -24,7 +24,7 @@ export type TextProps = {
|
||||
mt: keyof Theme['space'];
|
||||
mr: keyof Theme['space'];
|
||||
ml: keyof Theme['space'];
|
||||
}
|
||||
};
|
||||
|
||||
export const Text: React.FC<TextProps> = props => {
|
||||
const { children, type, fontWeight, fontSize } = props;
|
||||
@ -69,7 +69,7 @@ Text.defaultProps = {
|
||||
type TextLinkProps = {
|
||||
underline: boolean;
|
||||
color: string;
|
||||
}
|
||||
};
|
||||
|
||||
export const TextLink: React.FC<TextLinkProps> = props => {
|
||||
const { children, underline } = props;
|
||||
|
@ -1,6 +1,6 @@
|
||||
.. meta::
|
||||
:description: Creating Hasura actions
|
||||
:keywords: hasura, docs, actions, create
|
||||
:keywords: hasura, docs, actions, create
|
||||
|
||||
.. _create_actions:
|
||||
|
||||
|
@ -105,6 +105,10 @@ Args syntax
|
||||
- false
|
||||
- [ :ref:`HeaderFromValue <HeaderFromValue>` | :ref:`HeaderFromEnv <HeaderFromEnv>` ]
|
||||
- List of headers to be sent with the webhook
|
||||
* - retry_conf
|
||||
- false
|
||||
- RetryConf_
|
||||
- Retry configuration if event delivery fails
|
||||
* - replace
|
||||
- false
|
||||
- Boolean
|
||||
@ -271,3 +275,28 @@ EventTriggerColumns
|
||||
:class: haskell-pre
|
||||
|
||||
"*" | [:ref:`PGColumn`]
|
||||
|
||||
.. _RetryConf:
|
||||
|
||||
RetryConf
|
||||
&&&&&&&&&
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Key
|
||||
- required
|
||||
- Schema
|
||||
- Description
|
||||
* - num_retries
|
||||
- false
|
||||
- Integer
|
||||
- Number of times to retry delivery. Default: 0
|
||||
* - interval_sec
|
||||
- false
|
||||
- Integer
|
||||
- Number of seconds to wait between each retry. Default: 10
|
||||
* - timeout_sec
|
||||
- false
|
||||
- Integer
|
||||
- Number of seconds to wait for response before timing out. Default: 60
|
||||
|
@ -226,6 +226,21 @@ The various types of queries are listed in the following table:
|
||||
- 1
|
||||
- Invoke a trigger with custom payload
|
||||
|
||||
* - :ref:`create_cron_trigger`
|
||||
- :ref:`create_cron_trigger_args <create_cron_trigger_syntax>`
|
||||
- 1
|
||||
- Create a cron trigger
|
||||
|
||||
* - :ref:`delete_cron_trigger`
|
||||
- :ref:`delete_cron_trigger_args <delete_cron_trigger_syntax>`
|
||||
- 1
|
||||
- Delete an existing cron trigger
|
||||
|
||||
* - :ref:`create_scheduled_event`
|
||||
- :ref:`create_scheduled_event_args <create_scheduled_event_syntax>`
|
||||
- 1
|
||||
- Create a new scheduled event
|
||||
|
||||
* - :ref:`add_remote_schema`
|
||||
- :ref:`add_remote_schema_args <add_remote_schema_syntax>`
|
||||
- 1
|
||||
@ -426,6 +441,7 @@ See :ref:`server_flag_reference` for info on setting the above flag/env var.
|
||||
Permissions <permission>
|
||||
Computed Fields <computed-field>
|
||||
Event Triggers <event-triggers>
|
||||
Scheduled Triggers <scheduled-triggers>
|
||||
Remote Schemas <remote-schemas>
|
||||
Query Collections <query-collections>
|
||||
Custom Types <custom-types>
|
||||
|
@ -0,0 +1,295 @@
|
||||
.. meta::
|
||||
:description: Manage scheduled triggers with the Hasura schema/metadata API
|
||||
:keywords: hasura, docs, schema/metadata API, API reference, scheduled trigger
|
||||
|
||||
Schema/Metadata API Reference: Scheduled Triggers
|
||||
=================================================
|
||||
|
||||
.. contents:: Table of contents
|
||||
:backlinks: none
|
||||
:depth: 1
|
||||
:local:
|
||||
|
||||
Scheduled triggers are used to invoke webhooks based on a timestamp or cron.
|
||||
|
||||
.. _create_cron_trigger:
|
||||
|
||||
create_cron_trigger
|
||||
-------------------
|
||||
|
||||
``create_cron_trigger`` is used to create a new cron trigger.
|
||||
|
||||
.. code-block:: http
|
||||
|
||||
POST /v1/query HTTP/1.1
|
||||
Content-Type: application/json
|
||||
X-Hasura-Role: admin
|
||||
|
||||
{
|
||||
"type" : "create_cron_trigger",
|
||||
"args" : {
|
||||
"name": "sample_cron",
|
||||
"webhook": "https://httpbin.org/post",
|
||||
"schedule": "* * * * *",
|
||||
"payload": {
|
||||
"key1": "value1",
|
||||
"key2": "value2"
|
||||
},
|
||||
"include_in_metadata":false,
|
||||
"comment":"sample_cron commment"
|
||||
}
|
||||
}
|
||||
|
||||
.. _create_cron_trigger_syntax:
|
||||
|
||||
Args syntax
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Key
|
||||
- Required
|
||||
- Schema
|
||||
- Description
|
||||
* - name
|
||||
- true
|
||||
- TriggerName_
|
||||
- Name of the cron trigger
|
||||
* - webhook
|
||||
- true
|
||||
- :ref:`WebhookURL <WebhookURL>`
|
||||
- URL of the webhook
|
||||
* - schedule
|
||||
- true
|
||||
- Cron Expression
|
||||
- Cron expression at which the trigger should be invoked.
|
||||
* - payload
|
||||
- false
|
||||
- JSON
|
||||
- Any JSON payload which will be sent when the webhook is invoked.
|
||||
* - headers
|
||||
- false
|
||||
- [ HeaderFromValue_ | HeaderFromEnv_ ]
|
||||
- List of headers to be sent with the webhook
|
||||
* - retry_conf
|
||||
- false
|
||||
- RetryConfST_
|
||||
- Retry configuration if scheduled invocation delivery fails
|
||||
* - include_in_metadata
|
||||
- true
|
||||
- Boolean
|
||||
- Flag to indicate whether a trigger should be included in the metadata. When a cron
|
||||
trigger is included in the metadata, the user will be able to export it when the
|
||||
metadata of the graphql-engine is exported.
|
||||
* - comment
|
||||
- false
|
||||
- Text
|
||||
- Custom comment.
|
||||
* - replace
|
||||
- false
|
||||
- Bool
|
||||
- When replace is set to ``true``, the cron trigger will be updated(if exists) and when it's ``false`` or the
|
||||
field is omitted, then a new cron trigger will be created.
|
||||
|
||||
.. _delete_cron_trigger:
|
||||
|
||||
delete_cron_trigger
|
||||
-------------------
|
||||
|
||||
``delete_cron_trigger`` is used to delete an existing cron trigger. The scheduled events associated with the cron trigger will also be deleted.
|
||||
|
||||
|
||||
.. code-block:: http
|
||||
|
||||
POST /v1/query HTTP/1.1
|
||||
Content-Type: application/json
|
||||
X-Hasura-Role: admin
|
||||
|
||||
{
|
||||
"type" : "delete_cron_trigger",
|
||||
"args" : {
|
||||
"name": "sample_cron"
|
||||
}
|
||||
}
|
||||
|
||||
.. _delete_cron_trigger_syntax:
|
||||
|
||||
Args syntax
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Key
|
||||
- Required
|
||||
- Schema
|
||||
- Description
|
||||
* - name
|
||||
- true
|
||||
- TriggerName_
|
||||
- Name of the cron trigger
|
||||
|
||||
.. _create_scheduled_event:
|
||||
|
||||
create_scheduled_event
|
||||
----------------------
|
||||
|
||||
``create_scheduled_event`` is used to create a scheduled event.
|
||||
|
||||
.. code-block:: http
|
||||
|
||||
POST /v1/query HTTP/1.1
|
||||
Content-Type: application/json
|
||||
X-Hasura-Role: admin
|
||||
|
||||
{
|
||||
"type" : "create_scheduled_event",
|
||||
"args" : {
|
||||
"webhook": "https://httpbin.org/post",
|
||||
"schedule_at": "2019-09-09T22:00:00Z",
|
||||
"payload": {
|
||||
"key1": "value1",
|
||||
"key2": "value2"
|
||||
},
|
||||
"headers" : {
|
||||
"name":"header-key",
|
||||
"value":"header-value"
|
||||
},
|
||||
"comment":"sample scheduled event comment"
|
||||
}
|
||||
}
|
||||
|
||||
.. _create_scheduled_event_syntax:
|
||||
|
||||
Args syntax
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Key
|
||||
- Required
|
||||
- Schema
|
||||
- Description
|
||||
* - webhook
|
||||
- true
|
||||
- :ref:`WebhookURL <WebhookURL>`
|
||||
- URL of the webhook
|
||||
* - schedule_at
|
||||
- true
|
||||
- Timestamp (ISO8601 format)
|
||||
- The time at which the invocation should be invoked.
|
||||
* - payload
|
||||
- false
|
||||
- JSON
|
||||
- Any JSON payload which will be sent when the webhook is invoked.
|
||||
* - headers
|
||||
- false
|
||||
- [ HeaderFromValue_ | HeaderFromEnv_ ]
|
||||
- List of headers to be sent with the webhook
|
||||
* - retry_conf
|
||||
- false
|
||||
- RetryConfST_
|
||||
- Retry configuration if scheduled event delivery fails
|
||||
* - comment
|
||||
- false
|
||||
- Text
|
||||
- Custom comment.
|
||||
|
||||
.. _TriggerName:
|
||||
|
||||
TriggerName
|
||||
&&&&&&&&&&&
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
String
|
||||
|
||||
.. _UrlFromEnv:
|
||||
|
||||
UrlFromEnv
|
||||
&&&&&&&&&&
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Key
|
||||
- required
|
||||
- Schema
|
||||
- Description
|
||||
* - from_env
|
||||
- true
|
||||
- String
|
||||
- Name of the environment variable which has the URL
|
||||
|
||||
.. _HeaderFromValue:
|
||||
|
||||
HeaderFromValue
|
||||
&&&&&&&&&&&&&&&
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Key
|
||||
- required
|
||||
- Schema
|
||||
- Description
|
||||
* - name
|
||||
- true
|
||||
- String
|
||||
- Name of the header
|
||||
* - value
|
||||
- true
|
||||
- String
|
||||
- Value of the header
|
||||
|
||||
.. _HeaderFromEnv:
|
||||
|
||||
HeaderFromEnv
|
||||
&&&&&&&&&&&&&
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Key
|
||||
- required
|
||||
- Schema
|
||||
- Description
|
||||
* - name
|
||||
- true
|
||||
- String
|
||||
- Name of the header
|
||||
* - value_from_env
|
||||
- true
|
||||
- String
|
||||
- Name of the environment variable which holds the value of the header
|
||||
|
||||
.. _RetryConfST:
|
||||
|
||||
RetryConfST
|
||||
&&&&&&&&&&&
|
||||
|
||||
.. list-table::
|
||||
:header-rows: 1
|
||||
|
||||
* - Key
|
||||
- required
|
||||
- Schema
|
||||
- Description
|
||||
* - num_retries
|
||||
- false
|
||||
- Integer
|
||||
- Number of times to retry delivery. Default: 0
|
||||
* - retry_interval_seconds
|
||||
- false
|
||||
- Integer
|
||||
- Number of seconds to wait between each retry. Default: 10
|
||||
* - timeout_seconds
|
||||
- false
|
||||
- Integer
|
||||
- Number of seconds to wait for response before timing out. Default: 60
|
||||
* - tolerance_seconds
|
||||
- false
|
||||
- Integer
|
||||
- Number of seconds between scheduled time and actual delivery time that is acceptable. If the time difference is more than this, then the event is dropped. Default: 21600 (6 hours)
|
@ -167,18 +167,18 @@ if [ "$MODE" = "graphql-engine" ]; then
|
||||
# Attempt to run this after a CTRL-C:
|
||||
function cleanup {
|
||||
echo
|
||||
# Generate coverage, which can be useful for debugging or understanding
|
||||
# Generate coverage, which can be useful for debugging or understanding
|
||||
if command -v hpc >/dev/null; then
|
||||
# Get the appropriate mix dir (the newest one). This way this hopefully
|
||||
# works when cabal.project.dev-sh.local is edited to turn on optimizations.
|
||||
hpcdir=$(ls -td dist-newstyle/build/**/hpc/vanilla/mix/graphql-engine-* | head -1)
|
||||
echo_pretty "Generating code coverage report..."
|
||||
COVERAGE_DIR="dist-newstyle/dev.sh-coverage"
|
||||
hpc_invocation=(hpc markup
|
||||
--exclude=Main
|
||||
--hpcdir "$hpcdir"
|
||||
--reset-hpcdirs graphql-engine.tix
|
||||
--fun-entry-count
|
||||
hpc_invocation=(hpc markup
|
||||
--exclude=Main
|
||||
--hpcdir "$hpcdir"
|
||||
--reset-hpcdirs graphql-engine.tix
|
||||
--fun-entry-count
|
||||
--destdir="$COVERAGE_DIR")
|
||||
${hpc_invocation[@]} >/dev/null
|
||||
|
||||
@ -190,7 +190,7 @@ if [ "$MODE" = "graphql-engine" ]; then
|
||||
echo_pretty ""
|
||||
echo_pretty "The tix file we used has been archived to: $tix_archive"
|
||||
echo_pretty ""
|
||||
echo_pretty "You might want to use 'hpc combine' to create a diff of two different tix"
|
||||
echo_pretty "You might want to use 'hpc combine' to create a diff of two different tix"
|
||||
echo_pretty "files, and then generate a new report with something like:"
|
||||
echo_pretty " $ ${hpc_invocation[*]}"
|
||||
else
|
||||
@ -365,6 +365,7 @@ elif [ "$MODE" = "test" ]; then
|
||||
|
||||
export EVENT_WEBHOOK_HEADER="MyEnvValue"
|
||||
export WEBHOOK_FROM_ENV="http://127.0.0.1:5592"
|
||||
export SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN="http://127.0.0.1:5594"
|
||||
|
||||
# It's better UX to build first (possibly failing) before trying to launch
|
||||
# PG, but make sure that new-run uses the exact same build plan, else we risk
|
||||
@ -452,9 +453,9 @@ elif [ "$MODE" = "test" ]; then
|
||||
fi # RUN_INTEGRATION_TESTS
|
||||
|
||||
# TODO generate coverage report when we CTRL-C from 'dev.sh graphql-engine'.
|
||||
# If hpc available, combine any tix from haskell/unit tests:
|
||||
# If hpc available, combine any tix from haskell/unit tests:
|
||||
if command -v hpc >/dev/null; then
|
||||
if [ "$RUN_UNIT_TESTS" = true ] && [ "$RUN_INTEGRATION_TESTS" = true ]; then
|
||||
if [ "$RUN_UNIT_TESTS" = true ] && [ "$RUN_INTEGRATION_TESTS" = true ]; then
|
||||
# As below, it seems we variously get errors related to having two Main
|
||||
# modules, so exclude:
|
||||
hpc combine --exclude=Main graphql-engine-tests.tix graphql-engine.tix --union > graphql-engine-combined.tix
|
||||
@ -480,7 +481,7 @@ elif [ "$MODE" = "test" ]; then
|
||||
--exclude=Main \
|
||||
--hpcdir dist-newstyle/build/*/ghc-*/graphql-engine-*/noopt/hpc/vanilla/mix/graphql-engine-* \
|
||||
--hpcdir dist-newstyle/build/*/ghc-*/graphql-engine-*/t/graphql-engine-tests/noopt/hpc/vanilla/mix/graphql-engine-tests \
|
||||
--reset-hpcdirs graphql-engine-combined.tix
|
||||
--reset-hpcdirs graphql-engine-combined.tix
|
||||
echo_pretty "To view full coverage report open:"
|
||||
echo_pretty " file://$(pwd)/$COVERAGE_DIR/hpc_index.html"
|
||||
|
||||
|
@ -14,6 +14,11 @@
|
||||
-- See: https://www.haskell.org/cabal/users-guide/nix-local-build.html#configuring-builds-with-cabal-project
|
||||
packages: .
|
||||
|
||||
constraints:
|
||||
-- We build with cabal-install 2.4 in CI, so ensure we don’t end up with a
|
||||
-- freeze file that forces an incompatible version for Setup.hs scripts.
|
||||
setup.Cabal <2.6
|
||||
|
||||
package *
|
||||
optimization: 2
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
-- The project configuration used when building in CI.
|
||||
|
||||
reject-unconstrained-dependencies: all
|
||||
|
||||
package graphql-engine
|
||||
ghc-options: -j3 -Werror
|
||||
tests: true
|
||||
|
@ -1,97 +1,101 @@
|
||||
constraints: any.Cabal ==2.4.0.1,
|
||||
any.Glob ==0.9.3,
|
||||
constraints: any.Cabal ==2.4.1.0,
|
||||
Cabal -bundled-binary-generic,
|
||||
any.Glob ==0.10.0,
|
||||
any.HUnit ==1.6.0.0,
|
||||
any.Only ==0.1,
|
||||
any.QuickCheck ==2.12.6.1,
|
||||
any.QuickCheck ==2.14,
|
||||
QuickCheck +templatehaskell,
|
||||
any.RSA ==2.3.1,
|
||||
any.RSA ==2.4.1,
|
||||
any.SHA ==1.6.4.4,
|
||||
SHA -exe,
|
||||
any.Spock-core ==0.13.0.0,
|
||||
any.StateVar ==1.1.1.1,
|
||||
any.StateVar ==1.2,
|
||||
any.abstract-deque ==0.3,
|
||||
abstract-deque -usecas,
|
||||
any.abstract-par ==0.3.3,
|
||||
any.adjunctions ==4.4,
|
||||
any.aeson ==1.4.2.0,
|
||||
any.aeson ==1.4.7.1,
|
||||
aeson -bytestring-builder -cffi -developer -fast,
|
||||
any.aeson-casing ==0.1.1.0,
|
||||
any.ansi-terminal ==0.8.2,
|
||||
any.aeson-casing ==0.2.0.0,
|
||||
any.ansi-terminal ==0.10.3,
|
||||
ansi-terminal -example,
|
||||
any.ansi-wl-pprint ==0.6.8.2,
|
||||
any.ansi-wl-pprint ==0.6.9,
|
||||
ansi-wl-pprint -example,
|
||||
any.appar ==0.1.7,
|
||||
any.appar ==0.1.8,
|
||||
any.array ==0.5.3.0,
|
||||
any.asn1-encoding ==0.9.5,
|
||||
any.asn1-parse ==0.9.4,
|
||||
any.asn1-types ==0.3.2,
|
||||
any.async ==2.2.1,
|
||||
any.asn1-encoding ==0.9.6,
|
||||
any.asn1-parse ==0.9.5,
|
||||
any.asn1-types ==0.3.4,
|
||||
any.assoc ==1.0.1,
|
||||
any.async ==2.2.2,
|
||||
async -bench,
|
||||
any.attoparsec ==0.13.2.2,
|
||||
any.attoparsec ==0.13.2.4,
|
||||
attoparsec -developer,
|
||||
any.attoparsec-iso8601 ==1.0.1.0,
|
||||
attoparsec-iso8601 -developer -fast,
|
||||
any.authenticate-oauth ==1.6,
|
||||
any.auto-update ==0.1.4.1,
|
||||
any.authenticate-oauth ==1.6.0.1,
|
||||
any.auto-update ==0.1.6,
|
||||
any.base ==4.12.0.0,
|
||||
any.base-compat ==0.10.5,
|
||||
any.base-compat-batteries ==0.10.5,
|
||||
any.base-orphans ==0.8.1,
|
||||
any.base-compat ==0.11.1,
|
||||
any.base-compat-batteries ==0.11.1,
|
||||
any.base-orphans ==0.8.2,
|
||||
any.base-prelude ==1.3,
|
||||
any.base16-bytestring ==0.1.1.6,
|
||||
any.base64-bytestring ==1.0.0.2,
|
||||
any.basement ==0.0.10,
|
||||
any.bifunctors ==5.5.4,
|
||||
any.base64-bytestring ==1.0.0.3,
|
||||
any.basement ==0.0.11,
|
||||
any.bifunctors ==5.5.7,
|
||||
bifunctors +semigroups +tagged,
|
||||
any.binary ==0.8.6.0,
|
||||
any.binary-orphans ==1.0.1,
|
||||
any.binary-parser ==0.5.5,
|
||||
any.blaze-builder ==0.4.1.0,
|
||||
any.blaze-html ==0.9.1.1,
|
||||
any.blaze-markup ==0.8.2.2,
|
||||
any.blaze-html ==0.9.1.2,
|
||||
any.blaze-markup ==0.8.2.4,
|
||||
any.bsb-http-chunked ==0.0.0.4,
|
||||
any.byteable ==0.1.1,
|
||||
any.byteorder ==1.0.4,
|
||||
any.bytestring ==0.10.8.2,
|
||||
any.bytestring-builder ==0.10.8.2.0,
|
||||
bytestring-builder +bytestring_has_builder,
|
||||
any.bytestring-strict-builder ==0.4.5.1,
|
||||
any.bytestring-tree-builder ==0.2.7.2,
|
||||
any.bytestring-strict-builder ==0.4.5.3,
|
||||
any.bytestring-tree-builder ==0.2.7.3,
|
||||
any.cabal-doctest ==1.0.8,
|
||||
any.call-stack ==0.1.0,
|
||||
any.case-insensitive ==1.2.0.11,
|
||||
any.cassava ==0.5.1.0,
|
||||
any.call-stack ==0.2.0,
|
||||
any.case-insensitive ==1.2.1.0,
|
||||
any.cassava ==0.5.2.0,
|
||||
cassava -bytestring--lt-0_10_4,
|
||||
any.cereal ==0.5.8.0,
|
||||
any.cereal ==0.5.8.1,
|
||||
cereal -bytestring-builder,
|
||||
any.charset ==0.3.7.1,
|
||||
any.clock ==0.7.2,
|
||||
any.clock ==0.8,
|
||||
clock -llvm,
|
||||
any.cmdargs ==0.10.20,
|
||||
cmdargs +quotation -testprog,
|
||||
any.code-page ==0.2,
|
||||
any.colour ==2.3.5,
|
||||
any.comonad ==5.0.5,
|
||||
any.comonad ==5.0.6,
|
||||
comonad +containers +distributive +test-doctests,
|
||||
any.concise ==0.1.0.1,
|
||||
any.concurrent-output ==1.10.9,
|
||||
any.conduit ==1.3.1.1,
|
||||
any.connection ==0.2.8,
|
||||
any.constraints ==0.10.1,
|
||||
any.concurrent-output ==1.10.11,
|
||||
any.conduit ==1.3.2,
|
||||
any.connection ==0.3.1,
|
||||
any.constraints ==0.12,
|
||||
any.containers ==0.6.0.1,
|
||||
any.contravariant ==1.5.1,
|
||||
any.contravariant ==1.5.2,
|
||||
contravariant +semigroups +statevar +tagged,
|
||||
any.contravariant-extras ==0.3.4,
|
||||
any.cookie ==0.4.4,
|
||||
any.criterion ==1.5.5.0,
|
||||
any.contravariant-extras ==0.3.5.1,
|
||||
any.cookie ==0.4.5,
|
||||
any.criterion ==1.5.6.2,
|
||||
criterion -embed-data-files -fast,
|
||||
any.criterion-measurement ==0.1.1.0,
|
||||
any.criterion-measurement ==0.1.2.0,
|
||||
criterion-measurement -fast,
|
||||
any.cron ==0.7.0,
|
||||
cron -lib-werror,
|
||||
any.crypto-api ==0.13.3,
|
||||
crypto-api -all_cpolys,
|
||||
any.crypto-pubkey-types ==0.4.3,
|
||||
any.cryptohash-md5 ==0.11.100.1,
|
||||
any.cryptohash-sha1 ==0.11.100.1,
|
||||
any.cryptonite ==0.25,
|
||||
any.cryptonite ==0.26,
|
||||
cryptonite -check_alignment +integer-gmp -old_toolchain_inliner +support_aesni +support_deepseq -support_pclmuldq +support_rdrand -support_sse,
|
||||
any.data-bword ==0.1.0.1,
|
||||
any.data-checked ==0.3,
|
||||
@ -100,35 +104,37 @@ constraints: any.Cabal ==2.4.0.1,
|
||||
any.data-default-instances-containers ==0.0.1,
|
||||
any.data-default-instances-dlist ==0.0.1,
|
||||
any.data-default-instances-old-locale ==0.0.1,
|
||||
any.data-dword ==0.3.1.2,
|
||||
any.data-dword ==0.3.2,
|
||||
any.data-endian ==0.1.1,
|
||||
any.data-has ==0.3.0.0,
|
||||
any.data-serializer ==0.3.4,
|
||||
any.data-textual ==0.3.0.2,
|
||||
any.data-serializer ==0.3.4.1,
|
||||
any.data-textual ==0.3.0.3,
|
||||
any.deepseq ==1.4.4.0,
|
||||
any.deferred-folds ==0.9.10,
|
||||
any.deferred-folds ==0.9.10.1,
|
||||
any.dense-linear-algebra ==0.1.0.0,
|
||||
any.dependent-map ==0.2.4.0,
|
||||
any.dependent-sum ==0.4,
|
||||
any.directory ==1.3.3.0,
|
||||
any.distributive ==0.6,
|
||||
any.directory ==1.3.6.1,
|
||||
any.distributive ==0.6.2,
|
||||
distributive +semigroups +tagged,
|
||||
any.dlist ==0.8.0.6,
|
||||
any.dlist ==0.8.0.8,
|
||||
any.easy-file ==0.2.2,
|
||||
any.either ==5.0.1.1,
|
||||
any.ekg-core ==0.1.1.6,
|
||||
any.ekg-core ==0.1.1.7,
|
||||
any.ekg-json ==0.1.0.6,
|
||||
any.entropy ==0.4.1.4,
|
||||
any.entropy ==0.4.1.6,
|
||||
entropy -halvm,
|
||||
any.erf ==2.0.0.0,
|
||||
any.errors ==2.3.0,
|
||||
any.exceptions ==0.10.2,
|
||||
any.fast-logger ==2.4.15,
|
||||
any.file-embed ==0.0.11,
|
||||
any.exceptions ==0.10.4,
|
||||
exceptions +transformers-0-4,
|
||||
any.fail ==4.9.0.0,
|
||||
any.fast-logger ==3.0.1,
|
||||
any.file-embed ==0.0.11.2,
|
||||
any.filepath ==1.4.2.1,
|
||||
any.focus ==1.0.1.3,
|
||||
any.foldl ==1.4.5,
|
||||
any.free ==5.1.1,
|
||||
any.foldl ==1.4.6,
|
||||
any.free ==5.1.3,
|
||||
any.generic-arbitrary ==0.1.0,
|
||||
any.ghc-boot-th ==8.6.5,
|
||||
any.ghc-heap ==8.6.5,
|
||||
@ -137,110 +143,112 @@ constraints: any.Cabal ==2.4.0.1,
|
||||
any.ghc-prim ==0.5.3,
|
||||
any.happy ==1.19.12,
|
||||
happy +small_base,
|
||||
any.hashable ==1.2.7.0,
|
||||
any.hashable ==1.3.0.0,
|
||||
hashable -examples +integer-gmp +sse2 -sse41,
|
||||
any.hashtables ==1.2.3.1,
|
||||
hashtables -bounds-checking -debug -portable -sse42 +unsafe-tricks,
|
||||
any.haskell-lexer ==1.0.2,
|
||||
any.hasql ==1.3.0.5,
|
||||
any.hasql-pool ==0.5.0.2,
|
||||
any.hasql-transaction ==0.7.1,
|
||||
any.hedgehog ==0.6.1,
|
||||
any.hashtables ==1.2.3.4,
|
||||
hashtables -bounds-checking -debug -detailed-profiling -portable -sse42 +unsafe-tricks,
|
||||
any.haskell-lexer ==1.1,
|
||||
any.hasql ==1.4.2,
|
||||
any.hasql-pool ==0.5.1,
|
||||
any.hasql-transaction ==1.0.0.1,
|
||||
any.hedgehog ==1.0.2,
|
||||
any.hourglass ==0.2.12,
|
||||
any.hsc2hs ==0.68.6,
|
||||
any.hsc2hs ==0.68.7,
|
||||
hsc2hs -in-ghc-tree,
|
||||
any.hspec ==2.6.1,
|
||||
any.hspec-core ==2.6.1,
|
||||
any.hspec-discover ==2.6.1,
|
||||
any.hspec ==2.7.1,
|
||||
any.hspec-core ==2.7.1,
|
||||
any.hspec-discover ==2.7.1,
|
||||
any.hspec-expectations ==0.8.2,
|
||||
any.hspec-expectations-lifted ==0.10.0,
|
||||
any.http-api-data ==0.4,
|
||||
any.http-api-data ==0.4.1.1,
|
||||
http-api-data -use-text-show,
|
||||
any.http-client ==0.5.14,
|
||||
any.http-client ==0.6.4.1,
|
||||
http-client +network-uri,
|
||||
any.http-client-tls ==0.3.5.3,
|
||||
any.http-date ==0.0.8,
|
||||
any.http-types ==0.12.3,
|
||||
any.http2 ==1.6.5,
|
||||
any.http2 ==2.0.4,
|
||||
http2 -devel,
|
||||
any.hvect ==0.4.0.0,
|
||||
any.immortal ==0.2.2.1,
|
||||
any.insert-ordered-containers ==0.2.1.0,
|
||||
any.indexed-profunctors ==0.1,
|
||||
any.insert-ordered-containers ==0.2.3.1,
|
||||
any.integer-gmp ==1.0.2.0,
|
||||
any.integer-logarithms ==1.0.3,
|
||||
integer-logarithms -check-bounds +integer-gmp,
|
||||
any.invariant ==0.5.3,
|
||||
any.iproute ==1.7.7,
|
||||
any.jose ==0.8.0.0,
|
||||
any.iproute ==1.7.9,
|
||||
any.jose ==0.8.2.0,
|
||||
jose -demos,
|
||||
any.js-flot ==0.8.3,
|
||||
any.js-jquery ==3.3.1,
|
||||
any.kan-extensions ==5.2,
|
||||
any.keys ==3.12.2,
|
||||
any.lens ==4.17.1,
|
||||
any.lens ==4.19.2,
|
||||
lens -benchmark-uniplate -dump-splices +inlining -j -old-inline-pragmas -safe +test-doctests +test-hunit +test-properties +test-templates +trustworthy,
|
||||
any.lens-aeson ==1.0.2,
|
||||
any.lens-aeson ==1.1,
|
||||
lens-aeson +test-doctests,
|
||||
any.libyaml ==0.1.1.0,
|
||||
any.libyaml ==0.1.2,
|
||||
libyaml -no-unicode -system-libyaml,
|
||||
any.lifted-async ==0.10.0.4,
|
||||
any.lifted-async ==0.10.0.6,
|
||||
any.lifted-base ==0.2.3.12,
|
||||
any.list-t ==1.0.3.1,
|
||||
any.list-t ==1.0.4,
|
||||
any.loch-th ==0.2.2,
|
||||
any.math-functions ==0.3.1.0,
|
||||
math-functions -system-expm1,
|
||||
any.memory ==0.14.18,
|
||||
any.math-functions ==0.3.3.0,
|
||||
math-functions +system-erf +system-expm1,
|
||||
any.memory ==0.15.0,
|
||||
memory +support_basement +support_bytestring +support_deepseq +support_foundation,
|
||||
any.microstache ==1.0.1.1,
|
||||
any.mime-types ==0.1.0.9,
|
||||
any.mmorph ==1.1.3,
|
||||
any.monad-control ==1.0.2.3,
|
||||
any.monad-par ==0.3.4.8,
|
||||
any.monad-par ==0.3.5,
|
||||
monad-par -chaselev -newgeneric,
|
||||
any.monad-par-extras ==0.3.3,
|
||||
any.monad-time ==0.3.1.0,
|
||||
any.monad-validate ==1.2.0.0,
|
||||
any.mono-traversable ==1.0.11.0,
|
||||
any.mono-traversable ==1.0.15.1,
|
||||
any.mtl ==2.2.2,
|
||||
any.mtl-compat ==0.2.2,
|
||||
mtl-compat -two-point-one -two-point-two,
|
||||
any.mustache ==2.3.0,
|
||||
any.mwc-probability ==2.0.4,
|
||||
any.mustache ==2.3.1,
|
||||
any.mwc-probability ==2.2.0,
|
||||
any.mwc-random ==0.14.0.0,
|
||||
any.natural-transformation ==0.4,
|
||||
any.network ==2.8.0.1,
|
||||
any.network-byte-order ==0.0.0.0,
|
||||
any.network ==3.1.1.1,
|
||||
any.network-byte-order ==0.1.4.0,
|
||||
any.network-info ==0.2.0.10,
|
||||
any.network-ip ==0.3.0.2,
|
||||
any.network-uri ==2.6.1.0,
|
||||
any.network-ip ==0.3.0.3,
|
||||
any.network-uri ==2.6.3.0,
|
||||
any.old-locale ==1.0.0.7,
|
||||
any.old-time ==1.1.0.3,
|
||||
any.optparse-applicative ==0.14.3.0,
|
||||
any.optics-core ==0.3,
|
||||
any.optics-extra ==0.3,
|
||||
any.optparse-applicative ==0.15.1.0,
|
||||
any.parallel ==3.2.2.0,
|
||||
any.parsec ==3.1.13.0,
|
||||
any.parsers ==0.12.10,
|
||||
parsers +attoparsec +binary +parsec,
|
||||
any.pem ==0.2.4,
|
||||
any.placeholders ==0.1,
|
||||
any.pointed ==5.0.1,
|
||||
pointed +comonad +containers +kan-extensions +semigroupoids +semigroups +stm +tagged +transformers +unordered-containers,
|
||||
any.postgresql-binary ==0.12.1.2,
|
||||
any.postgresql-binary ==0.12.2,
|
||||
any.postgresql-libpq ==0.9.4.2,
|
||||
postgresql-libpq -use-pkg-config,
|
||||
any.pretty ==1.1.3.6,
|
||||
any.pretty-show ==1.9.5,
|
||||
any.prettyprinter ==1.2.1,
|
||||
any.pretty-show ==1.10,
|
||||
any.prettyprinter ==1.6.1,
|
||||
prettyprinter -buildreadme,
|
||||
any.primitive ==0.6.4.0,
|
||||
any.primitive-extras ==0.7.1,
|
||||
any.process ==1.6.5.0,
|
||||
any.profunctors ==5.3,
|
||||
any.protolude ==0.2.3,
|
||||
any.psqueues ==0.2.7.1,
|
||||
any.quickcheck-instances ==0.3.19,
|
||||
any.primitive ==0.7.0.1,
|
||||
any.primitive-extras ==0.8,
|
||||
any.primitive-unlifted ==0.1.3.0,
|
||||
any.process ==1.6.8.2,
|
||||
any.profunctors ==5.5.2,
|
||||
any.protolude ==0.2.4,
|
||||
any.psqueues ==0.2.7.2,
|
||||
any.quickcheck-instances ==0.3.22,
|
||||
quickcheck-instances -bytestring-builder,
|
||||
any.quickcheck-io ==0.2.0,
|
||||
any.random ==1.1,
|
||||
any.reflection ==2.1.4,
|
||||
any.reflection ==2.1.5,
|
||||
reflection -slow +template-haskell,
|
||||
any.regex-base ==0.94.0.0,
|
||||
any.regex-tdfa ==1.3.1.0,
|
||||
@ -248,107 +256,113 @@ constraints: any.Cabal ==2.4.0.1,
|
||||
any.reroute ==0.5.0.0,
|
||||
any.resource-pool ==0.2.3.2,
|
||||
resource-pool -developer,
|
||||
any.resourcet ==1.2.2,
|
||||
any.retry ==0.7.7.0,
|
||||
any.resourcet ==1.2.3,
|
||||
any.retry ==0.8.1.1,
|
||||
retry -lib-werror,
|
||||
any.rts ==1.0,
|
||||
any.safe ==0.3.17,
|
||||
any.safe ==0.3.18,
|
||||
any.scientific ==0.3.6.2,
|
||||
scientific -bytestring-builder -integer-simple,
|
||||
any.semigroupoids ==5.3.2,
|
||||
any.semialign ==1.1,
|
||||
semialign +semigroupoids,
|
||||
any.semigroupoids ==5.3.4,
|
||||
semigroupoids +comonad +containers +contravariant +distributive +doctests +tagged +unordered-containers,
|
||||
any.semigroups ==0.18.5,
|
||||
semigroups +binary +bytestring -bytestring-builder +containers +deepseq +hashable +tagged +text +transformers +unordered-containers,
|
||||
any.semver ==0.3.3.1,
|
||||
any.semigroups ==0.19.1,
|
||||
semigroups +binary +bytestring -bytestring-builder +containers +deepseq +hashable +tagged +template-haskell +text +transformers +unordered-containers,
|
||||
any.semver ==0.3.4,
|
||||
any.setenv ==0.1.1.3,
|
||||
any.shakespeare ==2.0.22,
|
||||
any.shakespeare ==2.0.24,
|
||||
shakespeare -test_coffee -test_export -test_roy,
|
||||
any.simple-sendfile ==0.2.28,
|
||||
any.simple-sendfile ==0.2.30,
|
||||
simple-sendfile +allow-bsd,
|
||||
any.socks ==0.5.6,
|
||||
any.split ==0.2.3.3,
|
||||
any.statistics ==0.15.0.0,
|
||||
any.socks ==0.6.1,
|
||||
any.split ==0.2.3.4,
|
||||
any.splitmix ==0.0.4,
|
||||
splitmix -optimised-mixer +random,
|
||||
any.statistics ==0.15.2.0,
|
||||
any.stm ==2.5.0.0,
|
||||
any.stm-containers ==1.1.0.4,
|
||||
any.stm-hamt ==1.2.0.2,
|
||||
any.streaming-commons ==0.2.1.0,
|
||||
any.stm-hamt ==1.2.0.4,
|
||||
any.streaming-commons ==0.2.1.2,
|
||||
streaming-commons -use-bytestring-builder,
|
||||
any.string-conversions ==0.4.0.1,
|
||||
any.superbuffer ==0.3.1.1,
|
||||
any.tagged ==0.8.6,
|
||||
tagged +deepseq +transformers,
|
||||
any.template-haskell ==2.14.0.0,
|
||||
any.template-haskell-compat-v0208 ==0.1.2.1,
|
||||
any.terminal-size ==0.3.2.1,
|
||||
any.text ==1.2.3.1,
|
||||
any.text-builder ==0.6.5,
|
||||
any.text-builder ==0.6.6.1,
|
||||
any.text-conversions ==0.3.0,
|
||||
any.text-latin1 ==0.3.1,
|
||||
any.text-printer ==0.5,
|
||||
any.text-short ==0.1.2,
|
||||
any.text-printer ==0.5.0.1,
|
||||
any.text-short ==0.1.3,
|
||||
text-short -asserts,
|
||||
any.tf-random ==0.5,
|
||||
any.th-abstraction ==0.2.11.0,
|
||||
any.th-lift ==0.7.11,
|
||||
any.th-lift-instances ==0.1.12,
|
||||
any.these ==0.7.6,
|
||||
any.time ==1.8.0.2,
|
||||
any.th-abstraction ==0.3.2.0,
|
||||
any.th-lift ==0.8.1,
|
||||
any.th-lift-instances ==0.1.16,
|
||||
any.these ==1.0.1,
|
||||
these +aeson +assoc +quickcheck +semigroupoids,
|
||||
any.time ==1.9.3,
|
||||
any.time-compat ==1.9.3,
|
||||
time-compat -old-locale,
|
||||
any.time-locale-compat ==0.1.1.5,
|
||||
time-locale-compat -old-locale,
|
||||
any.tls ==1.4.1,
|
||||
any.time-manager ==0.0.0,
|
||||
any.tls ==1.5.4,
|
||||
tls +compat -hans +network,
|
||||
any.transformers ==0.5.6.2,
|
||||
any.transformers-base ==0.4.5.2,
|
||||
transformers-base +orphaninstances,
|
||||
any.transformers-compat ==0.6.4,
|
||||
any.transformers-compat ==0.6.5,
|
||||
transformers-compat -five +five-three -four +generic-deriving +mtl -three -two,
|
||||
any.tuple-th ==0.2.5,
|
||||
any.type-equality ==1,
|
||||
any.type-hint ==0.1,
|
||||
any.unix ==2.7.2.2,
|
||||
any.unix-compat ==0.5.1,
|
||||
any.unix-compat ==0.5.2,
|
||||
unix-compat -old-time,
|
||||
any.unix-time ==0.4.5,
|
||||
any.unliftio-core ==0.1.2.0,
|
||||
any.unordered-containers ==0.2.9.0,
|
||||
any.unix-time ==0.4.7,
|
||||
any.unliftio-core ==0.2.0.1,
|
||||
any.unordered-containers ==0.2.10.0,
|
||||
unordered-containers -debug,
|
||||
any.uri-encode ==1.5.0.5,
|
||||
uri-encode +network-uri -tools,
|
||||
any.utf8-string ==1.0.1.1,
|
||||
any.uuid ==1.3.13,
|
||||
any.uuid-types ==1.0.3,
|
||||
any.vault ==0.3.1.2,
|
||||
any.vault ==0.3.1.4,
|
||||
vault +useghc,
|
||||
any.vector ==0.12.0.3,
|
||||
any.vector ==0.12.1.2,
|
||||
vector +boundschecks -internalchecks -unsafechecks -wall,
|
||||
any.vector-algorithms ==0.8.0.1,
|
||||
any.vector-algorithms ==0.8.0.3,
|
||||
vector-algorithms +bench +boundschecks -internalchecks -llvm +properties -unsafechecks,
|
||||
any.vector-binary-instances ==0.2.5.1,
|
||||
any.vector-builder ==0.3.7.2,
|
||||
any.vector-instances ==3.4,
|
||||
vector-instances +hashable,
|
||||
any.vector-th-unbox ==0.2.1.6,
|
||||
any.void ==0.7.2,
|
||||
any.vector-builder ==0.3.8,
|
||||
any.vector-th-unbox ==0.2.1.7,
|
||||
any.void ==0.7.3,
|
||||
void -safe,
|
||||
any.wai ==3.2.2,
|
||||
any.wai-app-static ==3.1.6.3,
|
||||
any.wai ==3.2.2.1,
|
||||
any.wai-app-static ==3.1.7.1,
|
||||
wai-app-static -print,
|
||||
any.wai-extra ==3.0.26,
|
||||
any.wai-extra ==3.0.29.1,
|
||||
wai-extra -build-example,
|
||||
any.wai-logger ==2.3.4,
|
||||
any.wai-logger ==2.3.6,
|
||||
any.wai-websockets ==3.0.1.2,
|
||||
wai-websockets +example,
|
||||
any.warp ==3.2.27,
|
||||
any.warp ==3.3.10,
|
||||
warp +allow-sendfilefd -network-bytestring -warp-debug,
|
||||
any.websockets ==0.12.5.3,
|
||||
any.websockets ==0.12.7.0,
|
||||
websockets -example,
|
||||
any.wl-pprint-annotated ==0.1.0.1,
|
||||
any.word8 ==0.1.3,
|
||||
any.wreq ==0.5.3.1,
|
||||
any.wreq ==0.5.3.2,
|
||||
wreq -aws -developer +doctest -httpbin,
|
||||
any.x509 ==1.7.5,
|
||||
any.x509-store ==1.6.7,
|
||||
any.x509-system ==1.6.6,
|
||||
any.x509-validation ==1.6.11,
|
||||
any.yaml ==0.11.0.0,
|
||||
any.yaml ==0.11.3.0,
|
||||
yaml +no-examples +no-exe,
|
||||
any.zlib ==0.6.2,
|
||||
any.zlib ==0.6.2.1,
|
||||
zlib -non-blocking-ffi -pkg-config
|
||||
|
@ -30,11 +30,11 @@ common common-all
|
||||
|
||||
default-language: Haskell2010
|
||||
default-extensions:
|
||||
ApplicativeDo BangPatterns BlockArguments ConstraintKinds DefaultSignatures DeriveDataTypeable
|
||||
ApplicativeDo BangPatterns BlockArguments ConstraintKinds DataKinds DefaultSignatures DeriveDataTypeable
|
||||
DeriveFoldable DeriveFunctor DeriveGeneric DeriveLift DeriveTraversable DerivingVia EmptyCase
|
||||
FlexibleContexts FlexibleInstances FunctionalDependencies GeneralizedNewtypeDeriving
|
||||
InstanceSigs LambdaCase MultiParamTypeClasses MultiWayIf NamedFieldPuns NoImplicitPrelude
|
||||
OverloadedStrings QuantifiedConstraints QuasiQuotes RankNTypes ScopedTypeVariables
|
||||
OverloadedStrings QuantifiedConstraints QuasiQuotes RankNTypes RecordWildCards ScopedTypeVariables
|
||||
StandaloneDeriving TemplateHaskell TupleSections TypeApplications TypeFamilies TypeOperators
|
||||
|
||||
common common-exe
|
||||
@ -70,7 +70,7 @@ library
|
||||
, http-types
|
||||
, attoparsec
|
||||
, attoparsec-iso8601 >= 1.0
|
||||
, time
|
||||
, time >= 1.9
|
||||
, scientific
|
||||
, Spock-core
|
||||
, split
|
||||
@ -88,11 +88,13 @@ library
|
||||
, deepseq
|
||||
, dependent-map >=0.2.4 && <0.4
|
||||
, dependent-sum >=0.4 && <0.5
|
||||
, exceptions
|
||||
|
||||
-- `these >=1` is split into several different packages, but our current stack
|
||||
-- resolver has `these <1`; when we upgrade we just need to add an extra
|
||||
-- dependency on `semialign`
|
||||
, these >=0.7.1 && <0.8
|
||||
, these
|
||||
, semialign
|
||||
|
||||
-- Encoder related
|
||||
, uuid
|
||||
@ -191,12 +193,17 @@ library
|
||||
-- testing
|
||||
, QuickCheck
|
||||
, generic-arbitrary
|
||||
, quickcheck-instances
|
||||
|
||||
-- 0.6.1 is supposedly not okay for ghc 8.6:
|
||||
-- https://github.com/nomeata/ghc-heap-view/issues/27
|
||||
, ghc-heap-view == 0.6.0
|
||||
|
||||
, directory
|
||||
|
||||
-- scheduled triggers
|
||||
, cron >= 0.6.2
|
||||
|
||||
exposed-modules: Control.Arrow.Extended
|
||||
, Control.Arrow.Trans
|
||||
, Control.Monad.Stateless
|
||||
@ -281,6 +288,7 @@ library
|
||||
, Hasura.RQL.Types.QueryCollection
|
||||
, Hasura.RQL.Types.Action
|
||||
, Hasura.RQL.Types.RemoteSchema
|
||||
, Hasura.RQL.Types.ScheduledTrigger
|
||||
, Hasura.RQL.DDL.ComputedField
|
||||
, Hasura.RQL.DDL.Relationship
|
||||
, Hasura.RQL.Types.CustomTypes
|
||||
@ -302,6 +310,7 @@ library
|
||||
, Hasura.RQL.DDL.Schema.Table
|
||||
, Hasura.RQL.DDL.Utils
|
||||
, Hasura.RQL.DDL.EventTrigger
|
||||
, Hasura.RQL.DDL.ScheduledTrigger
|
||||
, Hasura.RQL.DDL.Headers
|
||||
, Hasura.RQL.DDL.RemoteSchema
|
||||
, Hasura.RQL.DDL.QueryCollection
|
||||
@ -368,8 +377,9 @@ library
|
||||
, Hasura.GraphQL.Context
|
||||
, Hasura.GraphQL.Logging
|
||||
|
||||
, Hasura.Events.Lib
|
||||
, Hasura.Events.HTTP
|
||||
, Hasura.Eventing.HTTP
|
||||
, Hasura.Eventing.EventTrigger
|
||||
, Hasura.Eventing.ScheduledTrigger
|
||||
|
||||
, Control.Concurrent.Extended
|
||||
, Control.Lens.Extended
|
||||
|
@ -3,9 +3,9 @@
|
||||
{-# LANGUAGE DerivingVia #-}
|
||||
{-# LANGUAGE TypeOperators #-}
|
||||
|
||||
{-| Types for time intervals of various units. Each newtype wraps 'DiffTime', but they have
|
||||
different 'Num' instances. The intent is to use the record selectors to write literals with
|
||||
particular units, like this:
|
||||
{-| Types for time intervals of various units. Each newtype wraps 'DiffTime',
|
||||
but they have different 'Num' instances. The intent is to use the record
|
||||
selectors to write literals with particular units, like this:
|
||||
|
||||
@
|
||||
>>> 'milliseconds' 500
|
||||
@ -25,22 +25,23 @@ You can also go the other way using the constructors rather than the selectors:
|
||||
0.5
|
||||
@
|
||||
|
||||
NOTE: the 'Real' and 'Fractional' instances just essentially add or strip the unit label (as
|
||||
above), so you can't use 'realToFrac' to convert between the units types here. Instead try
|
||||
'fromUnits' which is less of a foot-gun.
|
||||
NOTE: the 'Real' and 'Fractional' instances just essentially add or strip the
|
||||
unit label (as above), so you can't use 'realToFrac' to convert between the
|
||||
units types here. Instead try 'convertDuration' which is less of a foot-gun.
|
||||
|
||||
The 'Read' instances for these types mirror the behavior of the 'RealFrac' instance wrt numeric
|
||||
literals for convenient serialization (e.g. when working with env vars):
|
||||
The 'Read' instances for these types mirror the behavior of the 'RealFrac'
|
||||
instance wrt numeric literals for convenient serialization (e.g. when working
|
||||
with env vars):
|
||||
|
||||
@
|
||||
>>> read "1.2" :: Milliseconds
|
||||
>>> read "1.2" :: Milliseconds
|
||||
Milliseconds {milliseconds = 0.0012s}
|
||||
@
|
||||
|
||||
Generally, if you need to pass around a duration between functions you should use 'DiffTime'
|
||||
directly. However if storing a duration in a type that will be serialized, e.g. one having
|
||||
a 'ToJSON' instance, it is better to use one of these explicit wrapper types so that it's
|
||||
obvious what units will be used. -}
|
||||
Generally, if you need to pass around a duration between functions you should
|
||||
use 'DiffTime' directly. However if storing a duration in a type that will be
|
||||
serialized, e.g. one having a 'ToJSON' instance, it is better to use one of
|
||||
these explicit wrapper types so that it's obvious what units will be used. -}
|
||||
module Data.Time.Clock.Units
|
||||
( Days(..)
|
||||
, Hours(..)
|
||||
@ -51,16 +52,16 @@ module Data.Time.Clock.Units
|
||||
, Nanoseconds(..)
|
||||
-- * Converting between units
|
||||
, Duration(..)
|
||||
, fromUnits
|
||||
, convertDuration
|
||||
-- * Reexports
|
||||
-- | We use 'DiffTime' as the standard type for unit-agnostic duration in our
|
||||
-- code. You'll need to convert to a 'NominalDiffTime' (with 'fromUnits') in
|
||||
-- code. You'll need to convert to a 'NominalDiffTime' (with 'convertDuration') in
|
||||
-- order to do anything useful with 'UTCTime' with these durations.
|
||||
--
|
||||
-- NOTE: some care must be taken especially when 'NominalDiffTime' interacts
|
||||
-- with 'UTCTime':
|
||||
--
|
||||
-- - a 'DiffTime' or 'NominalDiffTime' my be negative
|
||||
-- - a 'DiffTime' or 'NominalDiffTime' may be negative
|
||||
-- - 'addUTCTime' and 'diffUTCTime' do not attempt to handle leap seconds
|
||||
, DiffTime
|
||||
) where
|
||||
@ -75,7 +76,6 @@ import Data.Time.Clock
|
||||
import GHC.TypeLits
|
||||
import Numeric (readFloat)
|
||||
|
||||
|
||||
newtype Seconds = Seconds { seconds :: DiffTime }
|
||||
-- NOTE: we want Show to give a pastable data structure string, even
|
||||
-- though Read is custom.
|
||||
@ -149,11 +149,11 @@ instance (KnownNat picosPerUnit) => RealFrac (TimeUnit picosPerUnit) where
|
||||
|
||||
-- we can ignore unit:
|
||||
instance Hashable (TimeUnit a) where
|
||||
hashWithSalt salt (TimeUnit dt) = hashWithSalt salt $
|
||||
hashWithSalt salt (TimeUnit dt) = hashWithSalt salt $
|
||||
(realToFrac :: DiffTime -> Double) dt
|
||||
|
||||
|
||||
-- | Duration types isomorphic to 'DiffTime', powering 'fromUnits'.
|
||||
-- | Duration types isomorphic to 'DiffTime', powering 'convertDuration'.
|
||||
class Duration d where
|
||||
fromDiffTime :: DiffTime -> d
|
||||
toDiffTime :: d -> DiffTime
|
||||
@ -167,5 +167,5 @@ instance Duration NominalDiffTime where
|
||||
toDiffTime = realToFrac
|
||||
|
||||
-- | Safe conversion between duration units.
|
||||
fromUnits :: (Duration x, Duration y)=> x -> y
|
||||
fromUnits = fromDiffTime . toDiffTime
|
||||
convertDuration :: (Duration x, Duration y) => x -> y
|
||||
convertDuration = fromDiffTime . toDiffTime
|
||||
|
@ -33,7 +33,8 @@ import qualified Text.Mustache.Compile as M
|
||||
|
||||
import Hasura.Db
|
||||
import Hasura.EncJSON
|
||||
import Hasura.Events.Lib
|
||||
import Hasura.Eventing.EventTrigger
|
||||
import Hasura.Eventing.ScheduledTrigger
|
||||
import Hasura.GraphQL.Resolve.Action (asyncActionsProcessor)
|
||||
import Hasura.Logging
|
||||
import Hasura.Prelude
|
||||
@ -242,7 +243,7 @@ runHGEServer ServeOptions{..} InitCtx{..} initTime = do
|
||||
|
||||
maxEvThrds <- liftIO $ getFromEnv defaultMaxEventThreads "HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE"
|
||||
fetchI <- fmap milliseconds $ liftIO $
|
||||
getFromEnv defaultFetchIntervalMilliSec "HASURA_GRAPHQL_EVENTS_FETCH_INTERVAL"
|
||||
getFromEnv (Milliseconds defaultFetchInterval) "HASURA_GRAPHQL_EVENTS_FETCH_INTERVAL"
|
||||
logEnvHeaders <- liftIO $ getFromEnv False "LOG_HEADERS_FROM_ENV"
|
||||
|
||||
-- prepare event triggers data
|
||||
@ -257,6 +258,13 @@ runHGEServer ServeOptions{..} InitCtx{..} initTime = do
|
||||
_asyncActionsThread <- C.forkImmortal "asyncActionsProcessor" logger $ liftIO $
|
||||
asyncActionsProcessor (_scrCache cacheRef) _icPgPool _icHttpManager
|
||||
|
||||
-- start a background thread to create new cron events
|
||||
void $ liftIO $ C.forkImmortal "runCronEventsGenerator" logger $
|
||||
runCronEventsGenerator logger _icPgPool (getSCFromRef cacheRef)
|
||||
|
||||
-- start a background thread to deliver the scheduled events
|
||||
void $ liftIO $ C.forkImmortal "processScheduledTriggers" logger $ processScheduledTriggers logger logEnvHeaders _icHttpManager _icPgPool (getSCFromRef cacheRef)
|
||||
|
||||
-- start a background thread to check for updates
|
||||
_updateThread <- C.forkImmortal "checkForUpdates" logger $ liftIO $
|
||||
checkForUpdates loggerCtx _icHttpManager
|
||||
|
@ -1,20 +1,50 @@
|
||||
{-|
|
||||
= Event Triggers
|
||||
|
||||
Event triggers are like ordinary SQL triggers, except instead of calling a SQL
|
||||
procedure, they call a webhook. The event delivery mechanism involves coordination
|
||||
between both the database and graphql-engine: only the SQL database knows
|
||||
when the events should fire, but only graphql-engine know how to actually
|
||||
deliver them.
|
||||
|
||||
Therefore, event triggers are implemented in two parts:
|
||||
|
||||
1. Every event trigger is backed by a bona fide SQL trigger. When the SQL trigger
|
||||
fires, it creates a new record in the hdb_catalog.event_log table.
|
||||
|
||||
2. Concurrently, a thread in graphql-engine monitors the hdb_catalog.event_log
|
||||
table for new events. When new event(s) are found, it uses the information
|
||||
(URL,payload and headers) stored in the event to deliver the event
|
||||
to the webhook.
|
||||
|
||||
The creation and deletion of SQL trigger itself is managed by the metadata DDL
|
||||
APIs (see Hasura.RQL.DDL.EventTrigger), so this module focuses on event delivery.
|
||||
|
||||
Most of the subtleties involve guaranteeing reliable delivery of events:
|
||||
we guarantee that every event will be delivered at least once,
|
||||
even if graphql-engine crashes. This means we have to record the state
|
||||
of each event in the database, and we have to retry
|
||||
failed requests at a regular (user-configurable) interval.
|
||||
|
||||
-}
|
||||
{-# LANGUAGE RecordWildCards #-}
|
||||
{-# LANGUAGE StrictData #-}
|
||||
module Hasura.Events.Lib
|
||||
module Hasura.Eventing.EventTrigger
|
||||
( initEventEngineCtx
|
||||
, processEventQueue
|
||||
, unlockAllEvents
|
||||
, defaultMaxEventThreads
|
||||
, defaultFetchIntervalMilliSec
|
||||
, defaultFetchInterval
|
||||
, Event(..)
|
||||
, unlockEvents
|
||||
, EventEngineCtx(..)
|
||||
) where
|
||||
|
||||
import Control.Concurrent.Async (async, link, wait, withAsync)
|
||||
|
||||
import Control.Concurrent.Async (wait, withAsync)
|
||||
import Control.Concurrent.Extended (sleep)
|
||||
import Control.Concurrent.STM.TVar
|
||||
import Control.Exception.Lifted (finally, mask_, try)
|
||||
import Control.Monad.Catch (MonadMask, bracket_)
|
||||
import Control.Monad.STM
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
@ -24,7 +54,7 @@ import Data.Int (Int64)
|
||||
import Data.String
|
||||
import Data.Time.Clock
|
||||
import Data.Word
|
||||
import Hasura.Events.HTTP
|
||||
import Hasura.Eventing.HTTP
|
||||
import Hasura.HTTP
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.DDL.Headers
|
||||
@ -32,29 +62,22 @@ import Hasura.RQL.Types
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.SQL.Types
|
||||
|
||||
-- remove these when array encoding is merged
|
||||
import qualified Database.PG.Query.PTI as PTI
|
||||
import qualified PostgreSQL.Binary.Encoding as PE
|
||||
import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.TByteString as TBS
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Time.Clock as Time
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Database.PG.Query.PTI as PTI
|
||||
import qualified PostgreSQL.Binary.Encoding as PE
|
||||
import qualified Data.Set as Set
|
||||
|
||||
import qualified Data.ByteString as BS
|
||||
import qualified Data.CaseInsensitive as CI
|
||||
import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.Set as Set
|
||||
import qualified Data.TByteString as TBS
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Text.Encoding as T
|
||||
import qualified Data.Time.Clock as Time
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Types as HTTP
|
||||
data TriggerMetadata
|
||||
= TriggerMetadata { tmName :: TriggerName }
|
||||
deriving (Show, Eq)
|
||||
|
||||
type Version = T.Text
|
||||
|
||||
invocationVersion :: Version
|
||||
invocationVersion = "2"
|
||||
|
||||
type LogEnvHeaders = Bool
|
||||
$(deriveJSON (aesonDrop 2 snakeCase){omitNothingFields=True} ''TriggerMetadata)
|
||||
|
||||
newtype EventInternalErr
|
||||
= EventInternalErr QErr
|
||||
@ -63,11 +86,27 @@ newtype EventInternalErr
|
||||
instance L.ToEngineLog EventInternalErr L.Hasura where
|
||||
toEngineLog (EventInternalErr qerr) = (L.LevelError, L.eventTriggerLogType, toJSON qerr)
|
||||
|
||||
data TriggerMeta
|
||||
= TriggerMeta { tmName :: TriggerName }
|
||||
deriving (Show, Eq)
|
||||
-- | Change data for a particular row
|
||||
--
|
||||
-- https://docs.hasura.io/1.0/graphql/manual/event-triggers/payload.html
|
||||
data Event
|
||||
= Event
|
||||
{ eId :: EventId
|
||||
, eTable :: QualifiedTable
|
||||
, eTrigger :: TriggerMetadata
|
||||
, eEvent :: Value
|
||||
, eTries :: Int
|
||||
, eCreatedAt :: Time.UTCTime
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveJSON (aesonDrop 2 snakeCase){omitNothingFields=True} ''TriggerMeta)
|
||||
$(deriveFromJSON (aesonDrop 1 snakeCase){omitNothingFields=True} ''Event)
|
||||
|
||||
data EventEngineCtx
|
||||
= EventEngineCtx
|
||||
{ _eeCtxEventThreadsCapacity :: TVar Int
|
||||
, _eeCtxFetchInterval :: DiffTime
|
||||
, _eeCtxLockedEvents :: TVar (Set.Set EventId)
|
||||
}
|
||||
|
||||
data DeliveryInfo
|
||||
= DeliveryInfo
|
||||
@ -77,21 +116,6 @@ data DeliveryInfo
|
||||
|
||||
$(deriveJSON (aesonDrop 2 snakeCase){omitNothingFields=True} ''DeliveryInfo)
|
||||
|
||||
-- | Change data for a particular row
|
||||
--
|
||||
-- https://docs.hasura.io/1.0/graphql/manual/event-triggers/payload.html
|
||||
data Event
|
||||
= Event
|
||||
{ eId :: EventId
|
||||
, eTable :: QualifiedTable
|
||||
, eTrigger :: TriggerMeta
|
||||
, eEvent :: Value
|
||||
, eTries :: Int
|
||||
, eCreatedAt :: Time.UTCTime
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveFromJSON (aesonDrop 1 snakeCase){omitNothingFields=True} ''Event)
|
||||
|
||||
newtype QualifiedTableStrict = QualifiedTableStrict
|
||||
{ getQualifiedTable :: QualifiedTable
|
||||
} deriving (Show, Eq)
|
||||
@ -102,12 +126,11 @@ instance ToJSON QualifiedTableStrict where
|
||||
, "name" .= tn
|
||||
]
|
||||
|
||||
-- | See 'Event'.
|
||||
data EventPayload
|
||||
= EventPayload
|
||||
{ epId :: EventId
|
||||
, epTable :: QualifiedTableStrict
|
||||
, epTrigger :: TriggerMeta
|
||||
, epTrigger :: TriggerMetadata
|
||||
, epEvent :: Value
|
||||
, epDeliveryInfo :: DeliveryInfo
|
||||
, epCreatedAt :: Time.UTCTime
|
||||
@ -115,62 +138,11 @@ data EventPayload
|
||||
|
||||
$(deriveToJSON (aesonDrop 2 snakeCase){omitNothingFields=True} ''EventPayload)
|
||||
|
||||
data WebhookRequest
|
||||
= WebhookRequest
|
||||
{ _rqPayload :: Value
|
||||
, _rqHeaders :: Maybe [HeaderConf]
|
||||
, _rqVersion :: T.Text
|
||||
}
|
||||
$(deriveToJSON (aesonDrop 3 snakeCase){omitNothingFields=True} ''WebhookRequest)
|
||||
|
||||
data WebhookResponse
|
||||
= WebhookResponse
|
||||
{ _wrsBody :: TBS.TByteString
|
||||
, _wrsHeaders :: Maybe [HeaderConf]
|
||||
, _wrsStatus :: Int
|
||||
}
|
||||
$(deriveToJSON (aesonDrop 4 snakeCase){omitNothingFields=True} ''WebhookResponse)
|
||||
|
||||
data ClientError = ClientError { _ceMessage :: TBS.TByteString}
|
||||
$(deriveToJSON (aesonDrop 3 snakeCase){omitNothingFields=True} ''ClientError)
|
||||
|
||||
data Response = ResponseType1 WebhookResponse | ResponseType2 ClientError
|
||||
|
||||
instance ToJSON Response where
|
||||
toJSON (ResponseType1 resp) = object
|
||||
[ "type" .= String "webhook_response"
|
||||
, "data" .= toJSON resp
|
||||
, "version" .= invocationVersion
|
||||
]
|
||||
toJSON (ResponseType2 err) = object
|
||||
[ "type" .= String "client_error"
|
||||
, "data" .= toJSON err
|
||||
, "version" .= invocationVersion
|
||||
]
|
||||
|
||||
data Invocation
|
||||
= Invocation
|
||||
{ iEventId :: EventId
|
||||
, iStatus :: Int
|
||||
, iRequest :: WebhookRequest
|
||||
, iResponse :: Response
|
||||
}
|
||||
|
||||
data EventEngineCtx
|
||||
= EventEngineCtx
|
||||
{ _eeCtxEventThreadsCapacity :: TVar Int
|
||||
, _eeCtxFetchInterval :: DiffTime
|
||||
, _eeCtxLockedEvents :: TVar (Set.Set EventId)
|
||||
}
|
||||
|
||||
defaultMaxEventThreads :: Int
|
||||
defaultMaxEventThreads = 100
|
||||
|
||||
defaultFetchIntervalMilliSec :: Milliseconds
|
||||
defaultFetchIntervalMilliSec = 1000
|
||||
|
||||
retryAfterHeader :: CI.CI T.Text
|
||||
retryAfterHeader = "Retry-After"
|
||||
defaultFetchInterval :: DiffTime
|
||||
defaultFetchInterval = seconds 1
|
||||
|
||||
initEventEngineCtx :: Int -> DiffTime -> STM EventEngineCtx
|
||||
initEventEngineCtx maxT _eeCtxFetchInterval = do
|
||||
@ -191,7 +163,7 @@ processEventQueue
|
||||
:: (HasVersion) => L.Logger L.Hasura -> LogEnvHeaders -> HTTP.Manager-> Q.PGPool
|
||||
-> IO SchemaCache -> EventEngineCtx
|
||||
-> IO void
|
||||
processEventQueue logger logenv httpMgr pool getSchemaCache EventEngineCtx{..} = do
|
||||
processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx{..} = do
|
||||
events0 <- popEventsBatch
|
||||
go events0 0 False
|
||||
where
|
||||
@ -231,24 +203,7 @@ processEventQueue logger logenv httpMgr pool getSchemaCache EventEngineCtx{..} =
|
||||
eventsNext <- withAsync popEventsBatch $ \eventsNextA -> do
|
||||
-- process approximately in order, minding HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE:
|
||||
forM_ events $ \event ->
|
||||
mask_ $ do
|
||||
atomically $ do -- block until < HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE threads:
|
||||
capacity <- readTVar _eeCtxEventThreadsCapacity
|
||||
check $ capacity > 0
|
||||
writeTVar _eeCtxEventThreadsCapacity $! (capacity - 1)
|
||||
-- since there is some capacity in our worker threads, we can launch another:
|
||||
let restoreCapacity evt =
|
||||
liftIO $ atomically $
|
||||
do
|
||||
modifyTVar' _eeCtxEventThreadsCapacity (+ 1)
|
||||
-- After the event has been processed, remove it from the
|
||||
-- locked events cache
|
||||
modifyTVar' _eeCtxLockedEvents (Set.delete (eId evt))
|
||||
t <- async $ flip runReaderT (logger, httpMgr) $
|
||||
processEvent event `finally` (restoreCapacity event)
|
||||
link t
|
||||
|
||||
-- return when next batch ready; some 'processEvent' threads may be running.
|
||||
runReaderT (withEventEngineCtx eeCtx $ (processEvent event)) (logger, httpMgr)
|
||||
wait eventsNextA
|
||||
|
||||
let lenEvents = length events
|
||||
@ -302,13 +257,32 @@ processEventQueue logger logenv httpMgr pool getSchemaCache EventEngineCtx{..} =
|
||||
etHeaders = map encodeHeader headerInfos
|
||||
headers = addDefaultHeaders etHeaders
|
||||
ep = createEventPayload retryConf e
|
||||
res <- runExceptT $ tryWebhook headers responseTimeout ep webhook
|
||||
extraLogCtx = ExtraLogContext Nothing (epId ep) -- avoiding getting current time here to avoid another IO call with each event call
|
||||
res <- runExceptT $ tryWebhook headers responseTimeout (toJSON ep) webhook
|
||||
logHTTPForET res extraLogCtx
|
||||
let decodedHeaders = map (decodeHeader logenv headerInfos) headers
|
||||
either
|
||||
(processError pool e retryConf decodedHeaders ep)
|
||||
(processSuccess pool e decodedHeaders ep) res
|
||||
>>= flip onLeft logQErr
|
||||
|
||||
withEventEngineCtx ::
|
||||
( MonadIO m
|
||||
, MonadMask m
|
||||
)
|
||||
=> EventEngineCtx -> m () -> m ()
|
||||
withEventEngineCtx eeCtx = bracket_ (decrementThreadCount eeCtx) (incrementThreadCount eeCtx)
|
||||
|
||||
incrementThreadCount :: MonadIO m => EventEngineCtx -> m ()
|
||||
incrementThreadCount (EventEngineCtx c _ _) = liftIO $ atomically $ modifyTVar' c (+1)
|
||||
|
||||
decrementThreadCount :: MonadIO m => EventEngineCtx -> m ()
|
||||
decrementThreadCount (EventEngineCtx c _ _) = liftIO $ atomically $ do
|
||||
countThreads <- readTVar c
|
||||
if countThreads > 0
|
||||
then modifyTVar' c (\v -> v - 1)
|
||||
else retry
|
||||
|
||||
createEventPayload :: RetryConf -> Event -> EventPayload
|
||||
createEventPayload retryConf e = EventPayload
|
||||
{ epId = eId e
|
||||
@ -324,46 +298,42 @@ createEventPayload retryConf e = EventPayload
|
||||
|
||||
processSuccess
|
||||
:: ( MonadIO m )
|
||||
=> Q.PGPool -> Event -> [HeaderConf] -> EventPayload -> HTTPResp
|
||||
=> Q.PGPool -> Event -> [HeaderConf] -> EventPayload -> HTTPResp a
|
||||
-> m (Either QErr ())
|
||||
processSuccess pool e decodedHeaders ep resp = do
|
||||
let respBody = hrsBody resp
|
||||
respHeaders = hrsHeaders resp
|
||||
respStatus = hrsStatus resp
|
||||
invocation = mkInvo ep respStatus decodedHeaders respBody respHeaders
|
||||
invocation = mkInvocation ep respStatus decodedHeaders respBody respHeaders
|
||||
liftIO $ runExceptT $ Q.runTx pool (Q.RepeatableRead, Just Q.ReadWrite) $ do
|
||||
insertInvocation invocation
|
||||
setSuccess e
|
||||
|
||||
processError
|
||||
:: ( MonadIO m
|
||||
, MonadReader r m
|
||||
, Has (L.Logger L.Hasura) r
|
||||
)
|
||||
=> Q.PGPool -> Event -> RetryConf -> [HeaderConf] -> EventPayload -> HTTPErr
|
||||
:: ( MonadIO m )
|
||||
=> Q.PGPool -> Event -> RetryConf -> [HeaderConf] -> EventPayload -> HTTPErr a
|
||||
-> m (Either QErr ())
|
||||
processError pool e retryConf decodedHeaders ep err = do
|
||||
logHTTPErr err
|
||||
let invocation = case err of
|
||||
HClient excp -> do
|
||||
let errMsg = TBS.fromLBS $ encode $ show excp
|
||||
mkInvo ep 1000 decodedHeaders errMsg []
|
||||
mkInvocation ep 1000 decodedHeaders errMsg []
|
||||
HParse _ detail -> do
|
||||
let errMsg = TBS.fromLBS $ encode detail
|
||||
mkInvo ep 1001 decodedHeaders errMsg []
|
||||
mkInvocation ep 1001 decodedHeaders errMsg []
|
||||
HStatus errResp -> do
|
||||
let respPayload = hrsBody errResp
|
||||
respHeaders = hrsHeaders errResp
|
||||
respStatus = hrsStatus errResp
|
||||
mkInvo ep respStatus decodedHeaders respPayload respHeaders
|
||||
mkInvocation ep respStatus decodedHeaders respPayload respHeaders
|
||||
HOther detail -> do
|
||||
let errMsg = (TBS.fromLBS $ encode detail)
|
||||
mkInvo ep 500 decodedHeaders errMsg []
|
||||
mkInvocation ep 500 decodedHeaders errMsg []
|
||||
liftIO $ runExceptT $ Q.runTx pool (Q.RepeatableRead, Just Q.ReadWrite) $ do
|
||||
insertInvocation invocation
|
||||
retryOrSetError e retryConf err
|
||||
|
||||
retryOrSetError :: Event -> RetryConf -> HTTPErr -> Q.TxE QErr ()
|
||||
retryOrSetError :: Event -> RetryConf -> HTTPErr a -> Q.TxE QErr ()
|
||||
retryOrSetError e retryConf err = do
|
||||
let mretryHeader = getRetryAfterHeaderFromError err
|
||||
tries = eTries e
|
||||
@ -384,40 +354,12 @@ retryOrSetError e retryConf err = do
|
||||
getRetryAfterHeaderFromError (HStatus resp) = getRetryAfterHeaderFromResp resp
|
||||
getRetryAfterHeaderFromError _ = Nothing
|
||||
|
||||
getRetryAfterHeaderFromResp resp
|
||||
= let mHeader = find (\(HeaderConf name _)
|
||||
-> CI.mk name == retryAfterHeader) (hrsHeaders resp)
|
||||
in case mHeader of
|
||||
Just (HeaderConf _ (HVValue value)) -> Just value
|
||||
_ -> Nothing
|
||||
|
||||
parseRetryHeader = mfilter (> 0) . readMaybe . T.unpack
|
||||
|
||||
encodeHeader :: EventHeaderInfo -> HTTP.Header
|
||||
encodeHeader (EventHeaderInfo hconf cache) =
|
||||
let (HeaderConf name _) = hconf
|
||||
ciname = CI.mk $ T.encodeUtf8 name
|
||||
value = T.encodeUtf8 cache
|
||||
in (ciname, value)
|
||||
|
||||
decodeHeader
|
||||
:: LogEnvHeaders -> [EventHeaderInfo] -> (HTTP.HeaderName, BS.ByteString)
|
||||
-> HeaderConf
|
||||
decodeHeader logenv headerInfos (hdrName, hdrVal)
|
||||
= let name = bsToTxt $ CI.original hdrName
|
||||
getName ehi = let (HeaderConf name' _) = ehiHeaderConf ehi
|
||||
in name'
|
||||
mehi = find (\hi -> getName hi == name) headerInfos
|
||||
in case mehi of
|
||||
Nothing -> HeaderConf name (HVValue (bsToTxt hdrVal))
|
||||
Just ehi -> if logenv
|
||||
then HeaderConf name (HVValue (ehiCachedValue ehi))
|
||||
else ehiHeaderConf ehi
|
||||
|
||||
mkInvo
|
||||
mkInvocation
|
||||
:: EventPayload -> Int -> [HeaderConf] -> TBS.TByteString -> [HeaderConf]
|
||||
-> Invocation
|
||||
mkInvo ep status reqHeaders respBody respHeaders
|
||||
-> (Invocation 'EventType)
|
||||
mkInvocation ep status reqHeaders respBody respHeaders
|
||||
= let resp = if isClientError status
|
||||
then mkClientErr respBody
|
||||
else mkResp status respBody respHeaders
|
||||
@ -425,70 +367,14 @@ mkInvo ep status reqHeaders respBody respHeaders
|
||||
Invocation
|
||||
(epId ep)
|
||||
status
|
||||
(mkWebhookReq (toJSON ep) reqHeaders)
|
||||
(mkWebhookReq (toJSON ep) reqHeaders invocationVersionET)
|
||||
resp
|
||||
|
||||
mkResp :: Int -> TBS.TByteString -> [HeaderConf] -> Response
|
||||
mkResp status payload headers =
|
||||
let wr = WebhookResponse payload (mkMaybe headers) status
|
||||
in ResponseType1 wr
|
||||
|
||||
mkClientErr :: TBS.TByteString -> Response
|
||||
mkClientErr message =
|
||||
let cerr = ClientError message
|
||||
in ResponseType2 cerr
|
||||
|
||||
mkWebhookReq :: Value -> [HeaderConf] -> WebhookRequest
|
||||
mkWebhookReq payload headers = WebhookRequest payload (mkMaybe headers) invocationVersion
|
||||
|
||||
isClientError :: Int -> Bool
|
||||
isClientError status = status >= 1000
|
||||
|
||||
mkMaybe :: [a] -> Maybe [a]
|
||||
mkMaybe [] = Nothing
|
||||
mkMaybe x = Just x
|
||||
|
||||
logQErr :: ( MonadReader r m, Has (L.Logger L.Hasura) r, MonadIO m) => QErr -> m ()
|
||||
logQErr err = do
|
||||
logger :: L.Logger L.Hasura <- asks getter
|
||||
L.unLogger logger $ EventInternalErr err
|
||||
|
||||
logHTTPErr
|
||||
:: ( MonadReader r m
|
||||
, Has (L.Logger L.Hasura) r
|
||||
, MonadIO m
|
||||
)
|
||||
=> HTTPErr -> m ()
|
||||
logHTTPErr err = do
|
||||
logger :: L.Logger L.Hasura <- asks getter
|
||||
L.unLogger logger $ err
|
||||
|
||||
-- These run concurrently on their respective EventPayloads
|
||||
tryWebhook
|
||||
:: ( Has (L.Logger L.Hasura) r
|
||||
, Has HTTP.Manager r
|
||||
, MonadReader r m
|
||||
, MonadIO m
|
||||
, MonadError HTTPErr m
|
||||
)
|
||||
=> [HTTP.Header] -> HTTP.ResponseTimeout -> EventPayload -> String
|
||||
-> m HTTPResp
|
||||
tryWebhook headers responseTimeout ep webhook = do
|
||||
let context = ExtraContext (epCreatedAt ep) (epId ep)
|
||||
initReqE <- liftIO $ try $ HTTP.parseRequest webhook
|
||||
case initReqE of
|
||||
Left excp -> throwError $ HClient excp
|
||||
Right initReq -> do
|
||||
let req = initReq
|
||||
{ HTTP.method = "POST"
|
||||
, HTTP.requestHeaders = headers
|
||||
, HTTP.requestBody = HTTP.RequestBodyLBS (encode ep)
|
||||
, HTTP.responseTimeout = responseTimeout
|
||||
}
|
||||
|
||||
eitherResp <- runHTTP req (Just context)
|
||||
onLeft eitherResp throwError
|
||||
|
||||
getEventTriggerInfoFromEvent :: SchemaCache -> Event -> Maybe EventTriggerInfo
|
||||
getEventTriggerInfoFromEvent sc e = let table = eTable e
|
||||
tableInfo = M.lookup table $ scTables sc
|
||||
@ -522,20 +408,20 @@ fetchEvents limitI =
|
||||
Event
|
||||
{ eId = id'
|
||||
, eTable = QualifiedObject sn tn
|
||||
, eTrigger = TriggerMeta trn
|
||||
, eTrigger = TriggerMetadata trn
|
||||
, eEvent = payload
|
||||
, eTries = tries
|
||||
, eCreatedAt = created
|
||||
}
|
||||
limit = fromIntegral limitI :: Word64
|
||||
|
||||
insertInvocation :: Invocation -> Q.TxE QErr ()
|
||||
insertInvocation :: Invocation 'EventType -> Q.TxE QErr ()
|
||||
insertInvocation invo = do
|
||||
Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
INSERT INTO hdb_catalog.event_invocation_logs (event_id, status, request, response)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
|] ( iEventId invo
|
||||
, toInt64 $ iStatus invo
|
||||
, toInt64 $ iStatus invo :: Int64
|
||||
, Q.AltJ $ toJSON $ iRequest invo
|
||||
, Q.AltJ $ toJSON $ iResponse invo) True
|
||||
Q.unitQE defaultTxErrorHandler [Q.sql|
|
373
server/src-lib/Hasura/Eventing/HTTP.hs
Normal file
373
server/src-lib/Hasura/Eventing/HTTP.hs
Normal file
@ -0,0 +1,373 @@
|
||||
{-|
|
||||
= Hasura.Eventing.HTTP
|
||||
|
||||
This module is an utility module providing HTTP utilities for
|
||||
"Hasura.Eventing.EventTriggers" and "Hasura.Eventing.ScheduledTriggers".
|
||||
|
||||
The event triggers and scheduled triggers share the event delivery
|
||||
mechanism using the 'tryWebhook' function defined in this module.
|
||||
-}
|
||||
module Hasura.Eventing.HTTP
|
||||
( HTTPErr(..)
|
||||
, HTTPResp(..)
|
||||
, tryWebhook
|
||||
, runHTTP
|
||||
, isNetworkError
|
||||
, isNetworkErrorHC
|
||||
, logHTTPForET
|
||||
, logHTTPForST
|
||||
, ExtraLogContext(..)
|
||||
, EventId
|
||||
, Invocation(..)
|
||||
, InvocationVersion
|
||||
, Response(..)
|
||||
, WebhookRequest(..)
|
||||
, WebhookResponse(..)
|
||||
, ClientError(..)
|
||||
, isClientError
|
||||
, mkClientErr
|
||||
, mkWebhookReq
|
||||
, mkResp
|
||||
, LogEnvHeaders
|
||||
, encodeHeader
|
||||
, decodeHeader
|
||||
, getRetryAfterHeaderFromHTTPErr
|
||||
, getRetryAfterHeaderFromResp
|
||||
, parseRetryHeaderValue
|
||||
, TriggerTypes(..)
|
||||
, invocationVersionET
|
||||
, invocationVersionST
|
||||
) where
|
||||
|
||||
import qualified Data.ByteString as BS
|
||||
import qualified Data.ByteString.Lazy as LBS
|
||||
import qualified Data.CaseInsensitive as CI
|
||||
import qualified Data.TByteString as TBS
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Text.Encoding as TE
|
||||
import qualified Data.Text.Encoding.Error as TE
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Types as HTTP
|
||||
import qualified Data.Time.Clock as Time
|
||||
|
||||
import Control.Exception (try)
|
||||
import Control.Monad.IO.Class (MonadIO, liftIO)
|
||||
import Control.Monad.Reader (MonadReader)
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import Data.Either
|
||||
import Data.Has
|
||||
import Hasura.Logging
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.DDL.Headers
|
||||
import Hasura.RQL.Types.EventTrigger
|
||||
|
||||
type LogEnvHeaders = Bool
|
||||
|
||||
retryAfterHeader :: CI.CI T.Text
|
||||
retryAfterHeader = "Retry-After"
|
||||
|
||||
data WebhookRequest
|
||||
= WebhookRequest
|
||||
{ _rqPayload :: Value
|
||||
, _rqHeaders :: [HeaderConf]
|
||||
, _rqVersion :: T.Text
|
||||
}
|
||||
$(deriveToJSON (aesonDrop 3 snakeCase){omitNothingFields=True} ''WebhookRequest)
|
||||
|
||||
data WebhookResponse
|
||||
= WebhookResponse
|
||||
{ _wrsBody :: TBS.TByteString
|
||||
, _wrsHeaders :: [HeaderConf]
|
||||
, _wrsStatus :: Int
|
||||
}
|
||||
$(deriveToJSON (aesonDrop 4 snakeCase){omitNothingFields=True} ''WebhookResponse)
|
||||
|
||||
newtype ClientError = ClientError { _ceMessage :: TBS.TByteString}
|
||||
$(deriveToJSON (aesonDrop 3 snakeCase){omitNothingFields=True} ''ClientError)
|
||||
|
||||
type InvocationVersion = T.Text
|
||||
|
||||
invocationVersionET :: InvocationVersion
|
||||
invocationVersionET = "2"
|
||||
|
||||
invocationVersionST :: InvocationVersion
|
||||
invocationVersionST = "1"
|
||||
|
||||
-- | There are two types of events: EventType (for event triggers) and ScheduledType (for scheduled triggers)
|
||||
data TriggerTypes = EventType | ScheduledType
|
||||
|
||||
data Response (a :: TriggerTypes) =
|
||||
ResponseHTTP WebhookResponse | ResponseError ClientError
|
||||
|
||||
instance ToJSON (Response 'EventType) where
|
||||
toJSON (ResponseHTTP resp) = object
|
||||
[ "type" .= String "webhook_response"
|
||||
, "data" .= toJSON resp
|
||||
, "version" .= invocationVersionET
|
||||
]
|
||||
toJSON (ResponseError err) = object
|
||||
[ "type" .= String "client_error"
|
||||
, "data" .= toJSON err
|
||||
, "version" .= invocationVersionET
|
||||
]
|
||||
|
||||
instance ToJSON (Response 'ScheduledType) where
|
||||
toJSON (ResponseHTTP resp) = object
|
||||
[ "type" .= String "webhook_response"
|
||||
, "data" .= toJSON resp
|
||||
, "version" .= invocationVersionST
|
||||
]
|
||||
toJSON (ResponseError err) = object
|
||||
[ "type" .= String "client_error"
|
||||
, "data" .= toJSON err
|
||||
, "version" .= invocationVersionST
|
||||
]
|
||||
|
||||
|
||||
data Invocation (a :: TriggerTypes)
|
||||
= Invocation
|
||||
{ iEventId :: EventId
|
||||
, iStatus :: Int
|
||||
, iRequest :: WebhookRequest
|
||||
, iResponse :: (Response a)
|
||||
}
|
||||
|
||||
data ExtraLogContext
|
||||
= ExtraLogContext
|
||||
{ elEventCreatedAt :: Maybe Time.UTCTime
|
||||
, elEventId :: EventId
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveJSON (aesonDrop 2 snakeCase){omitNothingFields=True} ''ExtraLogContext)
|
||||
|
||||
data HTTPResp (a :: TriggerTypes)
|
||||
= HTTPResp
|
||||
{ hrsStatus :: !Int
|
||||
, hrsHeaders :: ![HeaderConf]
|
||||
, hrsBody :: !TBS.TByteString
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveToJSON (aesonDrop 3 snakeCase){omitNothingFields=True} ''HTTPResp)
|
||||
|
||||
instance ToEngineLog (HTTPResp 'EventType) Hasura where
|
||||
toEngineLog resp = (LevelInfo, eventTriggerLogType, toJSON resp)
|
||||
|
||||
instance ToEngineLog (HTTPResp 'ScheduledType) Hasura where
|
||||
toEngineLog resp = (LevelInfo, scheduledTriggerLogType, toJSON resp)
|
||||
|
||||
data HTTPErr (a :: TriggerTypes)
|
||||
= HClient !HTTP.HttpException
|
||||
| HParse !HTTP.Status !String
|
||||
| HStatus !(HTTPResp a)
|
||||
| HOther !String
|
||||
deriving (Show)
|
||||
|
||||
instance ToJSON (HTTPErr a) where
|
||||
toJSON err = toObj $ case err of
|
||||
(HClient e) -> ("client", toJSON $ show e)
|
||||
(HParse st e) ->
|
||||
( "parse"
|
||||
, toJSON (HTTP.statusCode st, show e)
|
||||
)
|
||||
(HStatus resp) ->
|
||||
("status", toJSON resp)
|
||||
(HOther e) -> ("internal", toJSON $ show e)
|
||||
where
|
||||
toObj :: (T.Text, Value) -> Value
|
||||
toObj (k, v) = object [ "type" .= k
|
||||
, "detail" .= v]
|
||||
|
||||
instance ToEngineLog (HTTPErr 'EventType) Hasura where
|
||||
toEngineLog err = (LevelError, eventTriggerLogType, toJSON err)
|
||||
|
||||
instance ToEngineLog (HTTPErr 'ScheduledType) Hasura where
|
||||
toEngineLog err = (LevelError, scheduledTriggerLogType, toJSON err)
|
||||
|
||||
mkHTTPResp :: HTTP.Response LBS.ByteString -> HTTPResp a
|
||||
mkHTTPResp resp =
|
||||
HTTPResp
|
||||
{ hrsStatus = HTTP.statusCode $ HTTP.responseStatus resp
|
||||
, hrsHeaders = map decodeHeader $ HTTP.responseHeaders resp
|
||||
, hrsBody = TBS.fromLBS $ HTTP.responseBody resp
|
||||
}
|
||||
where
|
||||
decodeBS = TE.decodeUtf8With TE.lenientDecode
|
||||
decodeHeader (hdrName, hdrVal)
|
||||
= HeaderConf (decodeBS $ CI.original hdrName) (HVValue (decodeBS hdrVal))
|
||||
|
||||
data HTTPRespExtra (a :: TriggerTypes)
|
||||
= HTTPRespExtra
|
||||
{ _hreResponse :: Either (HTTPErr a) (HTTPResp a)
|
||||
, _hreContext :: ExtraLogContext
|
||||
}
|
||||
|
||||
instance ToJSON (HTTPRespExtra a) where
|
||||
toJSON (HTTPRespExtra resp ctxt) = do
|
||||
case resp of
|
||||
Left errResp ->
|
||||
object [ "response" .= toJSON errResp
|
||||
, "context" .= toJSON ctxt
|
||||
]
|
||||
Right rsp ->
|
||||
object [ "response" .= toJSON rsp
|
||||
, "context" .= toJSON ctxt
|
||||
]
|
||||
|
||||
instance ToEngineLog (HTTPRespExtra 'EventType) Hasura where
|
||||
toEngineLog resp = (LevelInfo, eventTriggerLogType, toJSON resp)
|
||||
|
||||
instance ToEngineLog (HTTPRespExtra 'ScheduledType) Hasura where
|
||||
toEngineLog resp = (LevelInfo, scheduledTriggerLogType, toJSON resp)
|
||||
|
||||
isNetworkError :: HTTPErr a -> Bool
|
||||
isNetworkError = \case
|
||||
HClient he -> isNetworkErrorHC he
|
||||
_ -> False
|
||||
|
||||
isNetworkErrorHC :: HTTP.HttpException -> Bool
|
||||
isNetworkErrorHC = \case
|
||||
HTTP.HttpExceptionRequest _ (HTTP.ConnectionFailure _) -> True
|
||||
HTTP.HttpExceptionRequest _ HTTP.ConnectionTimeout -> True
|
||||
HTTP.HttpExceptionRequest _ HTTP.ResponseTimeout -> True
|
||||
_ -> False
|
||||
|
||||
anyBodyParser :: HTTP.Response LBS.ByteString -> Either (HTTPErr a) (HTTPResp a)
|
||||
anyBodyParser resp = do
|
||||
let httpResp = mkHTTPResp resp
|
||||
if respCode >= HTTP.status200 && respCode < HTTP.status300
|
||||
then return httpResp
|
||||
else throwError $ HStatus httpResp
|
||||
where
|
||||
respCode = HTTP.responseStatus resp
|
||||
|
||||
data HTTPReq
|
||||
= HTTPReq
|
||||
{ _hrqMethod :: !String
|
||||
, _hrqUrl :: !String
|
||||
, _hrqPayload :: !(Maybe Value)
|
||||
, _hrqTry :: !Int
|
||||
, _hrqDelay :: !(Maybe Int)
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveJSON (aesonDrop 4 snakeCase){omitNothingFields=True} ''HTTPReq)
|
||||
|
||||
instance ToEngineLog HTTPReq Hasura where
|
||||
toEngineLog req = (LevelInfo, eventTriggerLogType, toJSON req)
|
||||
|
||||
logHTTPForET
|
||||
:: ( MonadReader r m
|
||||
, Has (Logger Hasura) r
|
||||
, MonadIO m
|
||||
)
|
||||
=> Either (HTTPErr 'EventType) (HTTPResp 'EventType) -> ExtraLogContext -> m ()
|
||||
logHTTPForET eitherResp extraLogCtx = do
|
||||
logger :: Logger Hasura <- asks getter
|
||||
unLogger logger $ HTTPRespExtra eitherResp extraLogCtx
|
||||
|
||||
logHTTPForST
|
||||
:: ( MonadReader r m
|
||||
, Has (Logger Hasura) r
|
||||
, MonadIO m
|
||||
)
|
||||
=> Either (HTTPErr 'ScheduledType) (HTTPResp 'ScheduledType) -> ExtraLogContext -> m ()
|
||||
logHTTPForST eitherResp extraLogCtx = do
|
||||
logger :: Logger Hasura <- asks getter
|
||||
unLogger logger $ HTTPRespExtra eitherResp extraLogCtx
|
||||
|
||||
runHTTP :: (MonadIO m) => HTTP.Manager -> HTTP.Request -> m (Either (HTTPErr a) (HTTPResp a))
|
||||
runHTTP manager req = do
|
||||
res <- liftIO $ try $ HTTP.httpLbs req manager
|
||||
return $ either (Left . HClient) anyBodyParser res
|
||||
|
||||
tryWebhook ::
|
||||
( MonadReader r m
|
||||
, Has HTTP.Manager r
|
||||
, MonadIO m
|
||||
, MonadError (HTTPErr a) m
|
||||
)
|
||||
=> [HTTP.Header]
|
||||
-> HTTP.ResponseTimeout
|
||||
-> Value
|
||||
-> String
|
||||
-> m (HTTPResp a)
|
||||
tryWebhook headers timeout payload webhook = do
|
||||
initReqE <- liftIO $ try $ HTTP.parseRequest webhook
|
||||
manager <- asks getter
|
||||
case initReqE of
|
||||
Left excp -> throwError $ HClient excp
|
||||
Right initReq -> do
|
||||
let req =
|
||||
initReq
|
||||
{ HTTP.method = "POST"
|
||||
, HTTP.requestHeaders = headers
|
||||
, HTTP.requestBody = HTTP.RequestBodyLBS (encode payload)
|
||||
, HTTP.responseTimeout = timeout
|
||||
}
|
||||
eitherResp <- runHTTP manager req
|
||||
onLeft eitherResp throwError
|
||||
|
||||
mkResp :: Int -> TBS.TByteString -> [HeaderConf] -> Response a
|
||||
mkResp status payload headers =
|
||||
let wr = WebhookResponse payload headers status
|
||||
in ResponseHTTP wr
|
||||
|
||||
mkClientErr :: TBS.TByteString -> Response a
|
||||
mkClientErr message =
|
||||
let cerr = ClientError message
|
||||
in ResponseError cerr
|
||||
|
||||
mkWebhookReq :: Value -> [HeaderConf] -> InvocationVersion -> WebhookRequest
|
||||
mkWebhookReq payload headers = WebhookRequest payload headers
|
||||
|
||||
isClientError :: Int -> Bool
|
||||
isClientError status = status >= 1000
|
||||
|
||||
encodeHeader :: EventHeaderInfo -> HTTP.Header
|
||||
encodeHeader (EventHeaderInfo hconf cache) =
|
||||
let (HeaderConf name _) = hconf
|
||||
ciname = CI.mk $ TE.encodeUtf8 name
|
||||
value = TE.encodeUtf8 cache
|
||||
in (ciname, value)
|
||||
|
||||
decodeHeader
|
||||
:: LogEnvHeaders -> [EventHeaderInfo] -> (HTTP.HeaderName, BS.ByteString)
|
||||
-> HeaderConf
|
||||
decodeHeader logenv headerInfos (hdrName, hdrVal)
|
||||
= let name = decodeBS $ CI.original hdrName
|
||||
getName ehi = let (HeaderConf name' _) = ehiHeaderConf ehi
|
||||
in name'
|
||||
mehi = find (\hi -> getName hi == name) headerInfos
|
||||
in case mehi of
|
||||
Nothing -> HeaderConf name (HVValue (decodeBS hdrVal))
|
||||
Just ehi -> if logenv
|
||||
then HeaderConf name (HVValue (ehiCachedValue ehi))
|
||||
else ehiHeaderConf ehi
|
||||
where
|
||||
decodeBS = TE.decodeUtf8With TE.lenientDecode
|
||||
|
||||
getRetryAfterHeaderFromHTTPErr :: HTTPErr a -> Maybe Text
|
||||
getRetryAfterHeaderFromHTTPErr (HStatus resp) = getRetryAfterHeaderFromResp resp
|
||||
getRetryAfterHeaderFromHTTPErr _ = Nothing
|
||||
|
||||
getRetryAfterHeaderFromResp :: HTTPResp a -> Maybe Text
|
||||
getRetryAfterHeaderFromResp resp =
|
||||
let mHeader =
|
||||
find
|
||||
(\(HeaderConf name _) -> CI.mk name == retryAfterHeader)
|
||||
(hrsHeaders resp)
|
||||
in case mHeader of
|
||||
Just (HeaderConf _ (HVValue value)) -> Just value
|
||||
_ -> Nothing
|
||||
|
||||
parseRetryHeaderValue :: T.Text -> Maybe Int
|
||||
parseRetryHeaderValue hValue =
|
||||
let seconds = readMaybe $ T.unpack hValue
|
||||
in case seconds of
|
||||
Nothing -> Nothing
|
||||
Just sec ->
|
||||
if sec > 0
|
||||
then Just sec
|
||||
else Nothing
|
686
server/src-lib/Hasura/Eventing/ScheduledTrigger.hs
Normal file
686
server/src-lib/Hasura/Eventing/ScheduledTrigger.hs
Normal file
@ -0,0 +1,686 @@
|
||||
{-|
|
||||
= Scheduled Triggers
|
||||
|
||||
This module implements the functionality of invoking webhooks during specified
|
||||
time events aka scheduled events. The scheduled events are the events generated
|
||||
by the graphql-engine using the cron triggers or/and a scheduled event can
|
||||
be created by the user at a specified time with the payload, webhook, headers
|
||||
and the retry configuration. Scheduled events are modeled using rows in Postgres
|
||||
with a @timestamp@ column.
|
||||
|
||||
This module implements scheduling and delivery of scheduled
|
||||
events:
|
||||
|
||||
1. Scheduling a cron event involves creating new cron events. New
|
||||
cron events are created based on the cron schedule and the number of
|
||||
scheduled events that are already present in the scheduled events buffer.
|
||||
The graphql-engine computes the new scheduled events and writes them to
|
||||
the database.(Generator)
|
||||
|
||||
2. Delivering a scheduled event involves reading undelivered scheduled events
|
||||
from the database and delivering them to the webhook server. (Processor)
|
||||
|
||||
The rationale behind separating the event scheduling and event delivery
|
||||
mechanism into two different threads is that the scheduling and delivering of
|
||||
the scheduled events are not directly dependent on each other. The generator
|
||||
will almost always try to create scheduled events which are supposed to be
|
||||
delivered in the future (timestamp > current_timestamp) and the processor
|
||||
will fetch scheduled events of the past (timestamp < current_timestamp). So,
|
||||
the set of the scheduled events generated by the generator and the processor
|
||||
will never be the same. The point here is that they're not correlated to each
|
||||
other. They can be split into different threads for a better performance.
|
||||
|
||||
== Implementation
|
||||
|
||||
During the startup, two threads are started:
|
||||
|
||||
1. Generator: Fetches the list of scheduled triggers from cache and generates
|
||||
the scheduled events.
|
||||
|
||||
- Additional events will be generated only if there are fewer than 100
|
||||
scheduled events.
|
||||
|
||||
- The upcoming events timestamp will be generated using:
|
||||
|
||||
- cron schedule of the scheduled trigger
|
||||
|
||||
- max timestamp of the scheduled events that already exist or
|
||||
current_timestamp(when no scheduled events exist)
|
||||
|
||||
- The timestamp of the scheduled events is stored with timezone because
|
||||
`SELECT NOW()` returns timestamp with timezone, so it's good to
|
||||
compare two things of the same type.
|
||||
|
||||
This effectively corresponds to doing an INSERT with values containing
|
||||
specific timestamp.
|
||||
|
||||
2. Processor: Fetches the undelivered cron events and the scheduled events
|
||||
from the database and which have timestamp lesser than the
|
||||
current timestamp and then process them.
|
||||
-}
|
||||
module Hasura.Eventing.ScheduledTrigger
|
||||
( runCronEventsGenerator
|
||||
, processScheduledTriggers
|
||||
|
||||
, CronEventSeed(..)
|
||||
, generateScheduleTimes
|
||||
, insertCronEvents
|
||||
, StandAloneScheduledEvent(..)
|
||||
) where
|
||||
|
||||
import Control.Arrow.Extended (dup)
|
||||
import Control.Concurrent.Extended (sleep)
|
||||
import Data.Has
|
||||
import Data.Int (Int64)
|
||||
import Data.List (unfoldr)
|
||||
import Data.Time.Clock
|
||||
import Hasura.Eventing.HTTP
|
||||
import Hasura.HTTP
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.DDL.Headers
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.RQL.DDL.EventTrigger (getHeaderInfosFromConf)
|
||||
import Hasura.SQL.DML
|
||||
import Hasura.SQL.Types
|
||||
import System.Cron
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Aeson.Casing as J
|
||||
import qualified Data.Aeson.TH as J
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.TByteString as TBS
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Hasura.Logging as L
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Text.Builder as TB (run)
|
||||
import qualified PostgreSQL.Binary.Decoding as PD
|
||||
|
||||
newtype ScheduledTriggerInternalErr
|
||||
= ScheduledTriggerInternalErr QErr
|
||||
deriving (Show, Eq)
|
||||
|
||||
instance L.ToEngineLog ScheduledTriggerInternalErr L.Hasura where
|
||||
toEngineLog (ScheduledTriggerInternalErr qerr) =
|
||||
(L.LevelError, L.scheduledTriggerLogType, J.toJSON qerr)
|
||||
|
||||
cronEventsTable :: QualifiedTable
|
||||
cronEventsTable =
|
||||
QualifiedObject
|
||||
hdbCatalogSchema
|
||||
(TableName $ T.pack "hdb_cron_events")
|
||||
|
||||
data ScheduledEventStatus
|
||||
= SESScheduled
|
||||
| SESLocked
|
||||
| SESDelivered
|
||||
| SESError
|
||||
| SESDead
|
||||
deriving (Show, Eq)
|
||||
|
||||
scheduledEventStatusToText :: ScheduledEventStatus -> Text
|
||||
scheduledEventStatusToText SESScheduled = "scheduled"
|
||||
scheduledEventStatusToText SESLocked = "locked"
|
||||
scheduledEventStatusToText SESDelivered = "delivered"
|
||||
scheduledEventStatusToText SESError = "error"
|
||||
scheduledEventStatusToText SESDead = "dead"
|
||||
|
||||
instance Q.ToPrepArg ScheduledEventStatus where
|
||||
toPrepVal = Q.toPrepVal . scheduledEventStatusToText
|
||||
|
||||
instance Q.FromCol ScheduledEventStatus where
|
||||
fromCol bs = flip Q.fromColHelper bs $ PD.enum $ \case
|
||||
"scheduled" -> Just SESScheduled
|
||||
"locked" -> Just SESLocked
|
||||
"delivered" -> Just SESDelivered
|
||||
"error" -> Just SESError
|
||||
"dead" -> Just SESDead
|
||||
_ -> Nothing
|
||||
|
||||
instance J.ToJSON ScheduledEventStatus where
|
||||
toJSON = J.String . scheduledEventStatusToText
|
||||
|
||||
data CronTriggerStats
|
||||
= CronTriggerStats
|
||||
{ ctsName :: !TriggerName
|
||||
, ctsUpcomingEventsCount :: !Int
|
||||
, ctsMaxScheduledTime :: !UTCTime
|
||||
} deriving (Show, Eq)
|
||||
|
||||
data CronEventSeed
|
||||
= CronEventSeed
|
||||
{ cesName :: !TriggerName
|
||||
, cesScheduledTime :: !UTCTime
|
||||
} deriving (Show, Eq)
|
||||
|
||||
data CronEventPartial
|
||||
= CronEventPartial
|
||||
{ cepId :: !Text
|
||||
, cepName :: !TriggerName
|
||||
, cepScheduledTime :: !UTCTime
|
||||
, cepTries :: !Int
|
||||
} deriving (Show, Eq)
|
||||
|
||||
data ScheduledEventFull
|
||||
= ScheduledEventFull
|
||||
{ sefId :: !Text
|
||||
, sefName :: !(Maybe TriggerName)
|
||||
-- ^ sefName is the name of the cron trigger.
|
||||
-- A standalone scheduled event is not associated with a name, so in that
|
||||
-- case, 'sefName' will be @Nothing@
|
||||
, sefScheduledTime :: !UTCTime
|
||||
, sefTries :: !Int
|
||||
, sefWebhook :: !Text
|
||||
, sefPayload :: !J.Value
|
||||
, sefRetryConf :: !STRetryConf
|
||||
, sefHeaders :: ![EventHeaderInfo]
|
||||
, sefComment :: !(Maybe Text)
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(J.deriveToJSON (J.aesonDrop 3 J.snakeCase) {J.omitNothingFields = True} ''ScheduledEventFull)
|
||||
|
||||
data StandAloneScheduledEvent
|
||||
= StandAloneScheduledEvent
|
||||
{ saseId :: !Text
|
||||
, saseScheduledTime :: !UTCTime
|
||||
, saseTries :: !Int
|
||||
, saseWebhook :: !InputWebhook
|
||||
, sasePayload :: !(Maybe J.Value)
|
||||
, saseRetryConf :: !STRetryConf
|
||||
, saseHeaderConf :: ![HeaderConf]
|
||||
, saseComment :: !(Maybe Text)
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(J.deriveToJSON (J.aesonDrop 4 J.snakeCase) {J.omitNothingFields = True} ''StandAloneScheduledEvent)
|
||||
|
||||
-- | The 'ScheduledEventType' data type is needed to differentiate
|
||||
-- between a 'CronScheduledEvent' and 'StandAloneEvent' scheduled
|
||||
-- event because they both have different configurations
|
||||
-- and they live in different tables.
|
||||
data ScheduledEventType =
|
||||
CronScheduledEvent
|
||||
-- ^ A Cron scheduled event has a template defined which will
|
||||
-- contain the webhook, header configuration, retry
|
||||
-- configuration and a payload. Every cron event created
|
||||
-- uses the above mentioned configurations defined in the template.
|
||||
-- The configuration defined with the cron trigger is cached
|
||||
-- and hence it's not fetched along the cron scheduled events.
|
||||
| StandAloneEvent
|
||||
-- ^ A standalone scheduled event doesn't have any template defined
|
||||
-- so all the configuration is fetched along the scheduled events.
|
||||
deriving (Eq, Show)
|
||||
|
||||
-- | runCronEventsGenerator makes sure that all the cron triggers
|
||||
-- have an adequate buffer of cron events.
|
||||
runCronEventsGenerator ::
|
||||
L.Logger L.Hasura
|
||||
-> Q.PGPool
|
||||
-> IO SchemaCache
|
||||
-> IO void
|
||||
runCronEventsGenerator logger pgpool getSC = do
|
||||
forever $ do
|
||||
sc <- getSC
|
||||
-- get cron triggers from cache
|
||||
let cronTriggersCache = scCronTriggers sc
|
||||
|
||||
-- get cron trigger stats from db
|
||||
runExceptT
|
||||
(Q.runTx pgpool (Q.ReadCommitted, Just Q.ReadOnly) getDeprivedCronTriggerStats) >>= \case
|
||||
Left err -> L.unLogger logger $
|
||||
ScheduledTriggerInternalErr $ err500 Unexpected (T.pack $ show err)
|
||||
Right deprivedCronTriggerStats -> do
|
||||
-- join stats with cron triggers and produce @[(CronTriggerInfo, CronTriggerStats)]@
|
||||
cronTriggersForHydrationWithStats <-
|
||||
catMaybes <$>
|
||||
mapM (withCronTrigger cronTriggersCache) deprivedCronTriggerStats
|
||||
-- insert cron events for cron triggers that need hydration
|
||||
runExceptT
|
||||
(Q.runTx pgpool (Q.ReadCommitted, Just Q.ReadWrite) $
|
||||
insertCronEventsFor cronTriggersForHydrationWithStats) >>= \case
|
||||
Right _ -> pure ()
|
||||
Left err ->
|
||||
L.unLogger logger $ ScheduledTriggerInternalErr $ err500 Unexpected (T.pack $ show err)
|
||||
sleep (minutes 1)
|
||||
where
|
||||
getDeprivedCronTriggerStats = liftTx $ do
|
||||
map uncurryStats <$>
|
||||
Q.listQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
SELECT name, upcoming_events_count, max_scheduled_time
|
||||
FROM hdb_catalog.hdb_cron_events_stats
|
||||
WHERE upcoming_events_count < 100
|
||||
|] () True
|
||||
|
||||
uncurryStats (n, count, maxTs) = CronTriggerStats n count maxTs
|
||||
|
||||
withCronTrigger cronTriggerCache cronTriggerStat = do
|
||||
case Map.lookup (ctsName cronTriggerStat) cronTriggerCache of
|
||||
Nothing -> do
|
||||
L.unLogger logger $
|
||||
ScheduledTriggerInternalErr $
|
||||
err500 Unexpected $
|
||||
"could not find scheduled trigger in the schema cache"
|
||||
pure Nothing
|
||||
Just cronTrigger -> pure $
|
||||
Just (cronTrigger, cronTriggerStat)
|
||||
|
||||
insertCronEventsFor :: [(CronTriggerInfo, CronTriggerStats)] -> Q.TxE QErr ()
|
||||
insertCronEventsFor cronTriggersWithStats = do
|
||||
let scheduledEvents = flip concatMap cronTriggersWithStats $ \(cti, stats) ->
|
||||
generateCronEventsFrom (ctsMaxScheduledTime stats) cti
|
||||
case scheduledEvents of
|
||||
[] -> pure ()
|
||||
events -> do
|
||||
let insertCronEventsSql = TB.run $ toSQL
|
||||
SQLInsert
|
||||
{ siTable = cronEventsTable
|
||||
, siCols = map unsafePGCol ["trigger_name", "scheduled_time"]
|
||||
, siValues = ValuesExp $ map (toTupleExp . toArr) events
|
||||
, siConflict = Just $ DoNothing Nothing
|
||||
, siRet = Nothing
|
||||
}
|
||||
Q.unitQE defaultTxErrorHandler (Q.fromText insertCronEventsSql) () False
|
||||
where
|
||||
toArr (CronEventSeed n t) = [(triggerNameToTxt n), (formatTime' t)]
|
||||
toTupleExp = TupleExp . map SELit
|
||||
|
||||
insertCronEvents :: [CronEventSeed] -> Q.TxE QErr ()
|
||||
insertCronEvents events = do
|
||||
let insertCronEventsSql = TB.run $ toSQL
|
||||
SQLInsert
|
||||
{ siTable = cronEventsTable
|
||||
, siCols = map unsafePGCol ["trigger_name", "scheduled_time"]
|
||||
, siValues = ValuesExp $ map (toTupleExp . toArr) events
|
||||
, siConflict = Just $ DoNothing Nothing
|
||||
, siRet = Nothing
|
||||
}
|
||||
Q.unitQE defaultTxErrorHandler (Q.fromText insertCronEventsSql) () False
|
||||
where
|
||||
toArr (CronEventSeed n t) = [(triggerNameToTxt n), (formatTime' t)]
|
||||
toTupleExp = TupleExp . map SELit
|
||||
|
||||
generateCronEventsFrom :: UTCTime -> CronTriggerInfo-> [CronEventSeed]
|
||||
generateCronEventsFrom startTime CronTriggerInfo{..} =
|
||||
map (CronEventSeed ctiName) $
|
||||
generateScheduleTimes startTime 100 ctiSchedule -- generate next 100 events
|
||||
|
||||
-- | Generates next @n events starting @from according to 'CronSchedule'
|
||||
generateScheduleTimes :: UTCTime -> Int -> CronSchedule -> [UTCTime]
|
||||
generateScheduleTimes from n cron = take n $ go from
|
||||
where
|
||||
go = unfoldr (fmap dup . nextMatch cron)
|
||||
|
||||
processCronEvents
|
||||
:: HasVersion
|
||||
=> L.Logger L.Hasura
|
||||
-> LogEnvHeaders
|
||||
-> HTTP.Manager
|
||||
-> Q.PGPool
|
||||
-> IO SchemaCache
|
||||
-> IO ()
|
||||
processCronEvents logger logEnv httpMgr pgpool getSC = do
|
||||
cronTriggersInfo <- scCronTriggers <$> getSC
|
||||
cronScheduledEvents <-
|
||||
runExceptT $
|
||||
Q.runTx pgpool (Q.ReadCommitted, Just Q.ReadWrite) getPartialCronEvents
|
||||
case cronScheduledEvents of
|
||||
Right partialEvents ->
|
||||
for_ partialEvents $ \(CronEventPartial id' name st tries)-> do
|
||||
case Map.lookup name cronTriggersInfo of
|
||||
Nothing -> logInternalError $
|
||||
err500 Unexpected "could not find cron trigger in cache"
|
||||
Just CronTriggerInfo{..} -> do
|
||||
let webhook = unResolvedWebhook ctiWebhookInfo
|
||||
payload' = fromMaybe J.Null ctiPayload
|
||||
scheduledEvent =
|
||||
ScheduledEventFull id'
|
||||
(Just name)
|
||||
st
|
||||
tries
|
||||
webhook
|
||||
payload'
|
||||
ctiRetryConf
|
||||
ctiHeaders
|
||||
ctiComment
|
||||
finally <- runExceptT $
|
||||
runReaderT (processScheduledEvent logEnv pgpool scheduledEvent CronScheduledEvent) (logger, httpMgr)
|
||||
either logInternalError pure finally
|
||||
Left err -> logInternalError err
|
||||
where
|
||||
logInternalError err = L.unLogger logger $ ScheduledTriggerInternalErr err
|
||||
|
||||
processStandAloneEvents
|
||||
:: HasVersion
|
||||
=> L.Logger L.Hasura
|
||||
-> LogEnvHeaders
|
||||
-> HTTP.Manager
|
||||
-> Q.PGPool
|
||||
-> IO ()
|
||||
processStandAloneEvents logger logEnv httpMgr pgpool = do
|
||||
standAloneScheduledEvents <-
|
||||
runExceptT $
|
||||
Q.runTx pgpool (Q.ReadCommitted, Just Q.ReadWrite) getOneOffScheduledEvents
|
||||
case standAloneScheduledEvents of
|
||||
Right standAloneScheduledEvents' ->
|
||||
for_ standAloneScheduledEvents' $
|
||||
\(StandAloneScheduledEvent id'
|
||||
scheduledTime
|
||||
tries
|
||||
webhookConf
|
||||
payload
|
||||
retryConf
|
||||
headerConf
|
||||
comment )
|
||||
-> do
|
||||
webhookInfo <- runExceptT $ resolveWebhook webhookConf
|
||||
headerInfo <- runExceptT $ getHeaderInfosFromConf headerConf
|
||||
|
||||
case webhookInfo of
|
||||
Right webhookInfo' -> do
|
||||
case headerInfo of
|
||||
Right headerInfo' -> do
|
||||
let webhook = unResolvedWebhook webhookInfo'
|
||||
payload' = fromMaybe J.Null payload
|
||||
scheduledEvent = ScheduledEventFull id'
|
||||
Nothing
|
||||
scheduledTime
|
||||
tries
|
||||
webhook
|
||||
payload'
|
||||
retryConf
|
||||
headerInfo'
|
||||
comment
|
||||
finally <- runExceptT $
|
||||
runReaderT (processScheduledEvent logEnv pgpool scheduledEvent StandAloneEvent) $
|
||||
(logger, httpMgr)
|
||||
either logInternalError pure finally
|
||||
|
||||
Left headerInfoErr -> logInternalError headerInfoErr
|
||||
|
||||
Left webhookInfoErr -> logInternalError webhookInfoErr
|
||||
|
||||
Left standAloneScheduledEventsErr -> logInternalError standAloneScheduledEventsErr
|
||||
where
|
||||
logInternalError err = L.unLogger logger $ ScheduledTriggerInternalErr err
|
||||
|
||||
processScheduledTriggers
|
||||
:: HasVersion
|
||||
=> L.Logger L.Hasura
|
||||
-> LogEnvHeaders
|
||||
-> HTTP.Manager
|
||||
-> Q.PGPool
|
||||
-> IO SchemaCache
|
||||
-> IO void
|
||||
processScheduledTriggers logger logEnv httpMgr pgpool getSC=
|
||||
forever $ do
|
||||
processCronEvents logger logEnv httpMgr pgpool getSC
|
||||
processStandAloneEvents logger logEnv httpMgr pgpool
|
||||
sleep (minutes 1)
|
||||
|
||||
processScheduledEvent ::
|
||||
( MonadReader r m
|
||||
, Has HTTP.Manager r
|
||||
, Has (L.Logger L.Hasura) r
|
||||
, HasVersion
|
||||
, MonadIO m
|
||||
, MonadError QErr m
|
||||
)
|
||||
=> LogEnvHeaders
|
||||
-> Q.PGPool
|
||||
-> ScheduledEventFull
|
||||
-> ScheduledEventType
|
||||
-> m ()
|
||||
processScheduledEvent
|
||||
logEnv pgpool se@ScheduledEventFull {..} type' = do
|
||||
currentTime <- liftIO getCurrentTime
|
||||
if convertDuration (diffUTCTime currentTime sefScheduledTime)
|
||||
> unNonNegativeDiffTime (strcToleranceSeconds sefRetryConf)
|
||||
then processDead pgpool se type'
|
||||
else do
|
||||
let timeoutSeconds = round $ unNonNegativeDiffTime
|
||||
$ strcTimeoutSeconds sefRetryConf
|
||||
httpTimeout = HTTP.responseTimeoutMicro (timeoutSeconds * 1000000)
|
||||
headers = addDefaultHeaders $ map encodeHeader sefHeaders
|
||||
extraLogCtx = ExtraLogContext (Just currentTime) sefId
|
||||
res <- runExceptT $ tryWebhook headers httpTimeout sefPayload (T.unpack sefWebhook)
|
||||
logHTTPForST res extraLogCtx
|
||||
let decodedHeaders = map (decodeHeader logEnv sefHeaders) headers
|
||||
either
|
||||
(processError pgpool se decodedHeaders type')
|
||||
(processSuccess pgpool se decodedHeaders type')
|
||||
res
|
||||
|
||||
processError
|
||||
:: (MonadIO m, MonadError QErr m)
|
||||
=> Q.PGPool -> ScheduledEventFull -> [HeaderConf] -> ScheduledEventType -> HTTPErr a -> m ()
|
||||
processError pgpool se decodedHeaders type' err = do
|
||||
let invocation = case err of
|
||||
HClient excp -> do
|
||||
let errMsg = TBS.fromLBS $ J.encode $ show excp
|
||||
mkInvocation se 1000 decodedHeaders errMsg []
|
||||
HParse _ detail -> do
|
||||
let errMsg = TBS.fromLBS $ J.encode detail
|
||||
mkInvocation se 1001 decodedHeaders errMsg []
|
||||
HStatus errResp -> do
|
||||
let respPayload = hrsBody errResp
|
||||
respHeaders = hrsHeaders errResp
|
||||
respStatus = hrsStatus errResp
|
||||
mkInvocation se respStatus decodedHeaders respPayload respHeaders
|
||||
HOther detail -> do
|
||||
let errMsg = (TBS.fromLBS $ J.encode detail)
|
||||
mkInvocation se 500 decodedHeaders errMsg []
|
||||
liftExceptTIO $
|
||||
Q.runTx pgpool (Q.RepeatableRead, Just Q.ReadWrite) $ do
|
||||
insertInvocation invocation type'
|
||||
retryOrMarkError se err type'
|
||||
|
||||
retryOrMarkError :: ScheduledEventFull -> HTTPErr a -> ScheduledEventType -> Q.TxE QErr ()
|
||||
retryOrMarkError se@ScheduledEventFull {..} err type' = do
|
||||
let mRetryHeader = getRetryAfterHeaderFromHTTPErr err
|
||||
mRetryHeaderSeconds = parseRetryHeaderValue =<< mRetryHeader
|
||||
triesExhausted = sefTries >= strcNumRetries sefRetryConf
|
||||
noRetryHeader = isNothing mRetryHeaderSeconds
|
||||
if triesExhausted && noRetryHeader
|
||||
then do
|
||||
setScheduledEventStatus sefId SESError type'
|
||||
else do
|
||||
currentTime <- liftIO getCurrentTime
|
||||
let delay = fromMaybe (round $ unNonNegativeDiffTime
|
||||
$ strcRetryIntervalSeconds sefRetryConf)
|
||||
$ mRetryHeaderSeconds
|
||||
diff = fromIntegral delay
|
||||
retryTime = addUTCTime diff currentTime
|
||||
setRetry se retryTime type'
|
||||
|
||||
{- Note [Scheduled event lifecycle]
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Scheduled events move between six different states over the course of their
|
||||
lifetime, as represented by the following flowchart:
|
||||
┌───────────┐ ┌────────┐ ┌───────────┐
|
||||
│ scheduled │─(a)─→│ locked │─(b)─→│ delivered │
|
||||
└───────────┘ └────────┘ └───────────┘
|
||||
↑ │ ┌───────┐
|
||||
└────(c)───────┼─────(d)──→│ error │
|
||||
│ └───────┘
|
||||
│ ┌──────┐
|
||||
└─────(e)──→│ dead │
|
||||
└──────┘
|
||||
|
||||
When a scheduled event is first created, it starts in the 'scheduled' state,
|
||||
and it can transition to other states in the following ways:
|
||||
a. When graphql-engine fetches a scheduled event from the database to process
|
||||
it, it sets its state to 'locked'. This prevents multiple graphql-engine
|
||||
instances running on the same database from processing the same
|
||||
scheduled event concurrently.
|
||||
b. When a scheduled event is processed successfully, it is marked 'delivered'.
|
||||
c. If a scheduled event fails to be processed, but it hasn’t yet reached
|
||||
its maximum retry limit, its retry counter is incremented and
|
||||
it is returned to the 'scheduled' state.
|
||||
d. If a scheduled event fails to be processed and *has* reached its
|
||||
retry limit, its state is set to 'error'.
|
||||
e. If for whatever reason the difference between the current time and the
|
||||
scheduled time is greater than the tolerance of the scheduled event, it
|
||||
will not be processed and its state will be set to 'dead'.
|
||||
-}
|
||||
|
||||
processSuccess
|
||||
:: (MonadIO m, MonadError QErr m)
|
||||
=> Q.PGPool -> ScheduledEventFull -> [HeaderConf] -> ScheduledEventType -> HTTPResp a -> m ()
|
||||
processSuccess pgpool se decodedHeaders type' resp = do
|
||||
let respBody = hrsBody resp
|
||||
respHeaders = hrsHeaders resp
|
||||
respStatus = hrsStatus resp
|
||||
invocation = mkInvocation se respStatus decodedHeaders respBody respHeaders
|
||||
liftExceptTIO $
|
||||
Q.runTx pgpool (Q.RepeatableRead, Just Q.ReadWrite) $ do
|
||||
insertInvocation invocation type'
|
||||
setScheduledEventStatus (sefId se) SESDelivered type'
|
||||
|
||||
processDead :: (MonadIO m, MonadError QErr m) => Q.PGPool -> ScheduledEventFull -> ScheduledEventType -> m ()
|
||||
processDead pgpool se type' =
|
||||
liftExceptTIO $
|
||||
Q.runTx pgpool (Q.RepeatableRead, Just Q.ReadWrite) $
|
||||
setScheduledEventStatus (sefId se) SESDead type'
|
||||
|
||||
setRetry :: ScheduledEventFull -> UTCTime -> ScheduledEventType -> Q.TxE QErr ()
|
||||
setRetry se time type' =
|
||||
case type' of
|
||||
CronScheduledEvent ->
|
||||
Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
UPDATE hdb_catalog.hdb_cron_events
|
||||
SET next_retry_at = $1,
|
||||
STATUS = 'scheduled'
|
||||
WHERE id = $2
|
||||
|] (time, sefId se) True
|
||||
StandAloneEvent ->
|
||||
Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
UPDATE hdb_catalog.hdb_scheduled_events
|
||||
SET next_retry_at = $1,
|
||||
STATUS = 'scheduled'
|
||||
WHERE id = $2
|
||||
|] (time, sefId se) True
|
||||
|
||||
mkInvocation
|
||||
:: ScheduledEventFull -> Int -> [HeaderConf] -> TBS.TByteString -> [HeaderConf]
|
||||
-> (Invocation 'ScheduledType)
|
||||
mkInvocation se status reqHeaders respBody respHeaders
|
||||
= let resp = if isClientError status
|
||||
then mkClientErr respBody
|
||||
else mkResp status respBody respHeaders
|
||||
in
|
||||
Invocation
|
||||
(sefId se)
|
||||
status
|
||||
(mkWebhookReq (J.toJSON se) reqHeaders invocationVersionST)
|
||||
resp
|
||||
|
||||
insertInvocation :: (Invocation 'ScheduledType) -> ScheduledEventType -> Q.TxE QErr ()
|
||||
insertInvocation invo type' = do
|
||||
case type' of
|
||||
CronScheduledEvent -> do
|
||||
Q.unitQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
INSERT INTO hdb_catalog.hdb_cron_event_invocation_logs
|
||||
(event_id, status, request, response)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
|] ( iEventId invo
|
||||
, fromIntegral $ iStatus invo :: Int64
|
||||
, Q.AltJ $ J.toJSON $ iRequest invo
|
||||
, Q.AltJ $ J.toJSON $ iResponse invo) True
|
||||
Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
UPDATE hdb_catalog.hdb_cron_events
|
||||
SET tries = tries + 1
|
||||
WHERE id = $1
|
||||
|] (Identity $ iEventId invo) True
|
||||
StandAloneEvent -> do
|
||||
Q.unitQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
INSERT INTO hdb_catalog.hdb_scheduled_event_invocation_logs
|
||||
(event_id, status, request, response)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
|] ( iEventId invo
|
||||
, fromIntegral $ iStatus invo :: Int64
|
||||
, Q.AltJ $ J.toJSON $ iRequest invo
|
||||
, Q.AltJ $ J.toJSON $ iResponse invo) True
|
||||
Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
UPDATE hdb_catalog.hdb_scheduled_events
|
||||
SET tries = tries + 1
|
||||
WHERE id = $1
|
||||
|] (Identity $ iEventId invo) True
|
||||
|
||||
setScheduledEventStatus :: Text -> ScheduledEventStatus -> ScheduledEventType -> Q.TxE QErr ()
|
||||
setScheduledEventStatus scheduledEventId status type' =
|
||||
case type' of
|
||||
CronScheduledEvent -> do
|
||||
Q.unitQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
UPDATE hdb_catalog.hdb_cron_events
|
||||
SET status = $2
|
||||
WHERE id = $1
|
||||
|] (scheduledEventId, status) True
|
||||
StandAloneEvent -> do
|
||||
Q.unitQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
UPDATE hdb_catalog.hdb_scheduled_events
|
||||
SET status = $2
|
||||
WHERE id = $1
|
||||
|] (scheduledEventId, status) True
|
||||
|
||||
getPartialCronEvents :: Q.TxE QErr [CronEventPartial]
|
||||
getPartialCronEvents = do
|
||||
map uncurryEvent <$> Q.listQE defaultTxErrorHandler [Q.sql|
|
||||
UPDATE hdb_catalog.hdb_cron_events
|
||||
SET status = 'locked'
|
||||
WHERE id IN ( SELECT t.id
|
||||
FROM hdb_catalog.hdb_cron_events t
|
||||
WHERE ( t.status = 'scheduled'
|
||||
and (
|
||||
(t.next_retry_at is NULL and t.scheduled_time <= now()) or
|
||||
(t.next_retry_at is not NULL and t.next_retry_at <= now())
|
||||
)
|
||||
)
|
||||
FOR UPDATE SKIP LOCKED
|
||||
)
|
||||
RETURNING id, trigger_name, scheduled_time, tries
|
||||
|] () True
|
||||
where uncurryEvent (i, n, st, tries) = CronEventPartial i n st tries
|
||||
|
||||
getOneOffScheduledEvents :: Q.TxE QErr [StandAloneScheduledEvent]
|
||||
getOneOffScheduledEvents = do
|
||||
map uncurryOneOffEvent <$> Q.listQE defaultTxErrorHandler [Q.sql|
|
||||
UPDATE hdb_catalog.hdb_scheduled_events
|
||||
SET status = 'locked'
|
||||
WHERE id IN ( SELECT t.id
|
||||
FROM hdb_catalog.hdb_scheduled_events t
|
||||
WHERE ( t.status = 'scheduled'
|
||||
and (
|
||||
(t.next_retry_at is NULL and t.scheduled_time <= now()) or
|
||||
(t.next_retry_at is not NULL and t.next_retry_at <= now())
|
||||
)
|
||||
)
|
||||
FOR UPDATE SKIP LOCKED
|
||||
)
|
||||
RETURNING id, webhook_conf, scheduled_time, retry_conf, payload, header_conf, tries, comment
|
||||
|] () False
|
||||
where
|
||||
uncurryOneOffEvent ( eventId
|
||||
, webhookConf
|
||||
, scheduledTime
|
||||
, retryConf
|
||||
, payload
|
||||
, headerConf
|
||||
, tries
|
||||
, comment ) =
|
||||
StandAloneScheduledEvent eventId
|
||||
scheduledTime
|
||||
tries
|
||||
(Q.getAltJ webhookConf)
|
||||
(Q.getAltJ payload)
|
||||
(Q.getAltJ retryConf)
|
||||
(Q.getAltJ headerConf)
|
||||
comment
|
||||
|
||||
|
||||
liftExceptTIO :: (MonadError e m, MonadIO m) => ExceptT e IO a -> m a
|
||||
liftExceptTIO m = liftEither =<< liftIO (runExceptT m)
|
@ -1,153 +0,0 @@
|
||||
module Hasura.Events.HTTP
|
||||
( HTTPErr(..)
|
||||
, HTTPResp(..)
|
||||
, runHTTP
|
||||
, isNetworkError
|
||||
, isNetworkErrorHC
|
||||
, ExtraContext(..)
|
||||
) where
|
||||
|
||||
import Data.Either
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Aeson.Casing as J
|
||||
import qualified Data.Aeson.TH as J
|
||||
import qualified Data.ByteString.Lazy as B
|
||||
import qualified Data.CaseInsensitive as CI
|
||||
import qualified Data.TByteString as TBS
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Text.Encoding as TE
|
||||
import qualified Data.Text.Encoding.Error as TE
|
||||
import qualified Data.Time.Clock as Time
|
||||
import qualified Network.HTTP.Client as HTTP
|
||||
import qualified Network.HTTP.Types as HTTP
|
||||
|
||||
import Control.Exception (try)
|
||||
import Control.Monad.IO.Class (MonadIO, liftIO)
|
||||
import Control.Monad.Reader (MonadReader)
|
||||
import Data.Has
|
||||
import Hasura.Logging
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.DDL.Headers
|
||||
import Hasura.RQL.Types.EventTrigger
|
||||
|
||||
data ExtraContext
|
||||
= ExtraContext
|
||||
{ elEventCreatedAt :: Time.UTCTime
|
||||
, elEventId :: EventId
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(J.deriveJSON (J.aesonDrop 2 J.snakeCase){J.omitNothingFields=True} ''ExtraContext)
|
||||
|
||||
data HTTPResp
|
||||
= HTTPResp
|
||||
{ hrsStatus :: !Int
|
||||
, hrsHeaders :: ![HeaderConf]
|
||||
, hrsBody :: !TBS.TByteString
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(J.deriveToJSON (J.aesonDrop 3 J.snakeCase){J.omitNothingFields=True} ''HTTPResp)
|
||||
|
||||
instance ToEngineLog HTTPResp Hasura where
|
||||
toEngineLog resp = (LevelInfo, eventTriggerLogType, J.toJSON resp)
|
||||
|
||||
mkHTTPResp :: HTTP.Response B.ByteString -> HTTPResp
|
||||
mkHTTPResp resp =
|
||||
HTTPResp
|
||||
{ hrsStatus = HTTP.statusCode $ HTTP.responseStatus resp
|
||||
, hrsHeaders = map decodeHeader $ HTTP.responseHeaders resp
|
||||
, hrsBody = TBS.fromLBS $ HTTP.responseBody resp
|
||||
}
|
||||
where
|
||||
decodeBS = TE.decodeUtf8With TE.lenientDecode
|
||||
decodeHeader (hdrName, hdrVal)
|
||||
= HeaderConf (decodeBS $ CI.original hdrName) (HVValue (decodeBS hdrVal))
|
||||
|
||||
data HTTPRespExtra
|
||||
= HTTPRespExtra
|
||||
{ _hreResponse :: HTTPResp
|
||||
, _hreContext :: Maybe ExtraContext
|
||||
}
|
||||
|
||||
$(J.deriveToJSON (J.aesonDrop 4 J.snakeCase){J.omitNothingFields=True} ''HTTPRespExtra)
|
||||
|
||||
instance ToEngineLog HTTPRespExtra Hasura where
|
||||
toEngineLog resp = (LevelInfo, eventTriggerLogType, J.toJSON resp)
|
||||
|
||||
data HTTPErr
|
||||
= HClient !HTTP.HttpException
|
||||
| HParse !HTTP.Status !String
|
||||
| HStatus !HTTPResp
|
||||
| HOther !String
|
||||
deriving (Show)
|
||||
|
||||
instance J.ToJSON HTTPErr where
|
||||
toJSON err = toObj $ case err of
|
||||
(HClient e) -> ("client", J.toJSON $ show e)
|
||||
(HParse st e) ->
|
||||
( "parse"
|
||||
, J.toJSON (HTTP.statusCode st, show e)
|
||||
)
|
||||
(HStatus resp) ->
|
||||
("status", J.toJSON resp)
|
||||
(HOther e) -> ("internal", J.toJSON $ show e)
|
||||
where
|
||||
toObj :: (T.Text, J.Value) -> J.Value
|
||||
toObj (k, v) = J.object [ "type" J..= k
|
||||
, "detail" J..= v]
|
||||
-- encapsulates a http operation
|
||||
instance ToEngineLog HTTPErr Hasura where
|
||||
toEngineLog err = (LevelError, eventTriggerLogType, J.toJSON err)
|
||||
|
||||
isNetworkError :: HTTPErr -> Bool
|
||||
isNetworkError = \case
|
||||
HClient he -> isNetworkErrorHC he
|
||||
_ -> False
|
||||
|
||||
isNetworkErrorHC :: HTTP.HttpException -> Bool
|
||||
isNetworkErrorHC = \case
|
||||
HTTP.HttpExceptionRequest _ (HTTP.ConnectionFailure _) -> True
|
||||
HTTP.HttpExceptionRequest _ HTTP.ConnectionTimeout -> True
|
||||
HTTP.HttpExceptionRequest _ HTTP.ResponseTimeout -> True
|
||||
_ -> False
|
||||
|
||||
anyBodyParser :: HTTP.Response B.ByteString -> Either HTTPErr HTTPResp
|
||||
anyBodyParser resp = do
|
||||
let httpResp = mkHTTPResp resp
|
||||
if respCode >= HTTP.status200 && respCode < HTTP.status300
|
||||
then return httpResp
|
||||
else throwError $ HStatus httpResp
|
||||
where
|
||||
respCode = HTTP.responseStatus resp
|
||||
|
||||
data HTTPReq
|
||||
= HTTPReq
|
||||
{ _hrqMethod :: !String
|
||||
, _hrqUrl :: !String
|
||||
, _hrqPayload :: !(Maybe J.Value)
|
||||
, _hrqTry :: !Int
|
||||
, _hrqDelay :: !(Maybe Int)
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(J.deriveJSON (J.aesonDrop 4 J.snakeCase){J.omitNothingFields=True} ''HTTPReq)
|
||||
|
||||
instance ToEngineLog HTTPReq Hasura where
|
||||
toEngineLog req = (LevelInfo, eventTriggerLogType, J.toJSON req)
|
||||
|
||||
-- | Like 'HTTP.httpLbs' but we catch 'HTTP.HttpException' and return all known
|
||||
-- error-like conditions as 'HTTPErr'.
|
||||
runHTTP
|
||||
:: ( MonadReader r m
|
||||
, Has (Logger Hasura) r
|
||||
, Has HTTP.Manager r
|
||||
, MonadIO m
|
||||
)
|
||||
=> HTTP.Request -> Maybe ExtraContext -> m (Either HTTPErr HTTPResp)
|
||||
runHTTP req exLog = do
|
||||
logger :: Logger Hasura <- asks getter
|
||||
manager <- asks getter
|
||||
res <- liftIO $ try $ HTTP.httpLbs req manager
|
||||
case res of
|
||||
Left e -> unLogger logger $ HClient e
|
||||
Right resp -> unLogger logger $ HTTPRespExtra (mkHTTPResp resp) exLog
|
||||
return $ either (Left . HClient) anyBodyParser res
|
@ -321,7 +321,7 @@ pollQuery metrics batchSize pgExecCtx pgQuery handler =
|
||||
flip A.mapConcurrently_ queryVarsBatches $ \queryVars -> do
|
||||
(dt, mxRes) <- timing _rmQuery $
|
||||
runExceptT $ runLazyTx' pgExecCtx $ executeMultiplexedQuery pgQuery queryVars
|
||||
let lqMeta = LiveQueryMetadata $ fromUnits dt
|
||||
let lqMeta = LiveQueryMetadata $ convertDuration dt
|
||||
operations = getCohortOperations cohortSnapshotMap lqMeta mxRes
|
||||
|
||||
void $ timing _rmPush $
|
||||
|
@ -51,8 +51,8 @@ runGQ reqId userInfo reqHdrs req = do
|
||||
| otherwise = Telem.Query
|
||||
(telemTimeIO, resp) <- E.execRemoteGQ reqId userInfo reqHdrs req rsi opDef
|
||||
return (telemCacheHit, Telem.Remote, (telemTimeIO, telemQueryType, resp))
|
||||
let telemTimeIO = fromUnits telemTimeIO_DT
|
||||
telemTimeTot = fromUnits telemTimeTot_DT
|
||||
let telemTimeIO = convertDuration telemTimeIO_DT
|
||||
telemTimeTot = convertDuration telemTimeTot_DT
|
||||
Telem.recordTimingMetric Telem.RequestDimensions{..} Telem.RequestTimings{..}
|
||||
return resp
|
||||
|
||||
|
@ -224,7 +224,7 @@ onConn (L.Logger logger) corsPolicy wsId requestHead = do
|
||||
CSInitialised _ expTimeM _ ->
|
||||
maybe STM.retry return expTimeM
|
||||
currTime <- TC.getCurrentTime
|
||||
sleep $ fromUnits $ TC.diffUTCTime expTime currTime
|
||||
sleep $ convertDuration $ TC.diffUTCTime expTime currTime
|
||||
|
||||
accept hdrs errType = do
|
||||
logger $ mkWsInfoLog Nothing (WsConnInfo wsId Nothing Nothing) EAccepted
|
||||
@ -356,7 +356,7 @@ onStart serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
||||
-- Telemetry. NOTE: don't time network IO:
|
||||
telemTimeTot <- Seconds <$> timerTot
|
||||
sendSuccResp encJson $ LQ.LiveQueryMetadata telemTimeIO_DT
|
||||
let telemTimeIO = fromUnits telemTimeIO_DT
|
||||
let telemTimeIO = convertDuration telemTimeIO_DT
|
||||
Telem.recordTimingMetric Telem.RequestDimensions{..} Telem.RequestTimings{..}
|
||||
|
||||
sendCompleted (Just reqId)
|
||||
@ -382,7 +382,7 @@ onStart serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
|
||||
-- Telemetry. NOTE: don't time network IO:
|
||||
telemTimeTot <- Seconds <$> timerTot
|
||||
sendRemoteResp reqId (_hrBody val) $ LQ.LiveQueryMetadata telemTimeIO_DT
|
||||
let telemTimeIO = fromUnits telemTimeIO_DT
|
||||
let telemTimeIO = convertDuration telemTimeIO_DT
|
||||
Telem.recordTimingMetric Telem.RequestDimensions{..} Telem.RequestTimings{..}
|
||||
|
||||
sendCompleted (Just reqId)
|
||||
|
@ -31,7 +31,6 @@ import Hasura.GraphQL.Validate.Field
|
||||
import Hasura.GraphQL.Validate.InputValue
|
||||
import Hasura.GraphQL.Validate.Types
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.RQL.Types.QueryCollection
|
||||
|
||||
data QueryParts
|
||||
= QueryParts
|
||||
|
@ -18,9 +18,11 @@ import Data.Functor.Classes (Eq1 (..), Eq2 (..))
|
||||
import Data.GADT.Compare
|
||||
import Data.Int
|
||||
import Data.Scientific (Scientific)
|
||||
import Data.Time.Clock
|
||||
import Data.Vector (Vector)
|
||||
import GHC.Generics ((:*:) (..), (:+:) (..), Generic (..), K1 (..),
|
||||
M1 (..), U1 (..), V1)
|
||||
import System.Cron.Types
|
||||
|
||||
import Hasura.Incremental.Select
|
||||
|
||||
@ -162,6 +164,22 @@ instance Cacheable Integer where unchanged _ = (==)
|
||||
instance Cacheable Scientific where unchanged _ = (==)
|
||||
instance Cacheable Text where unchanged _ = (==)
|
||||
instance Cacheable N.URIAuth where unchanged _ = (==)
|
||||
instance Cacheable DiffTime where unchanged _ = (==)
|
||||
instance Cacheable NominalDiffTime where unchanged _ = (==)
|
||||
instance Cacheable UTCTime where unchanged _ = (==)
|
||||
|
||||
-- instances for CronSchedule from package `cron`
|
||||
instance Cacheable StepField
|
||||
instance Cacheable RangeField
|
||||
instance Cacheable SpecificField
|
||||
instance Cacheable BaseField
|
||||
instance Cacheable CronField
|
||||
instance Cacheable MonthSpec
|
||||
instance Cacheable DayOfMonthSpec
|
||||
instance Cacheable DayOfWeekSpec
|
||||
instance Cacheable HourSpec
|
||||
instance Cacheable MinuteSpec
|
||||
instance Cacheable CronSchedule
|
||||
|
||||
instance (Cacheable a) => Cacheable (Seq a) where
|
||||
unchanged = liftEq . unchanged
|
||||
|
@ -20,6 +20,7 @@ module Hasura.Logging
|
||||
, mkLoggerCtx
|
||||
, cleanLoggerCtx
|
||||
, eventTriggerLogType
|
||||
, scheduledTriggerLogType
|
||||
, EnabledLogTypes (..)
|
||||
, defaultEnabledEngineLogTypes
|
||||
, isEngineLogTypeEnabled
|
||||
@ -95,6 +96,7 @@ data InternalLogTypes
|
||||
= ILTUnstructured
|
||||
-- ^ mostly for debug logs - see @debugT@, @debugBS@ and @debugLBS@ functions
|
||||
| ILTEventTrigger
|
||||
| ILTScheduledTrigger
|
||||
| ILTWsServer
|
||||
-- ^ internal logs for the websocket server
|
||||
| ILTPgClient
|
||||
@ -111,6 +113,7 @@ instance J.ToJSON InternalLogTypes where
|
||||
toJSON = \case
|
||||
ILTUnstructured -> "unstructured"
|
||||
ILTEventTrigger -> "event-trigger"
|
||||
ILTScheduledTrigger -> "scheduled-trigger"
|
||||
ILTWsServer -> "ws-server"
|
||||
ILTPgClient -> "pg-client"
|
||||
ILTMetadata -> "metadata"
|
||||
@ -267,3 +270,6 @@ mkLogger (LoggerCtx loggerSet serverLogLevel timeGetter enabledLogTypes) = Logge
|
||||
|
||||
eventTriggerLogType :: EngineLogType Hasura
|
||||
eventTriggerLogType = ELTInternal ILTEventTrigger
|
||||
|
||||
scheduledTriggerLogType :: EngineLogType Hasura
|
||||
scheduledTriggerLogType = ELTInternal ILTScheduledTrigger
|
||||
|
@ -31,8 +31,7 @@ import Control.Monad.Reader as M
|
||||
import Control.Monad.State.Strict as M
|
||||
import Control.Monad.Writer.Strict as M (MonadWriter (..), WriterT (..),
|
||||
execWriterT, runWriterT)
|
||||
import Data.Align as M (Align (align, alignWith))
|
||||
import Data.Align.Key as M (AlignWithKey (..))
|
||||
import Data.Align as M (Semialign (align, alignWith))
|
||||
import Data.Bool as M (bool)
|
||||
import Data.Data as M (Data (..))
|
||||
import Data.Either as M (lefts, partitionEithers, rights)
|
||||
|
@ -38,11 +38,9 @@ import qualified Data.Aeson.Casing as J
|
||||
import qualified Data.Aeson.TH as J
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
import qualified Data.HashSet as Set
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
|
||||
import Data.URL.Template (renderURLTemplate)
|
||||
import Language.Haskell.TH.Syntax (Lift)
|
||||
|
||||
getActionInfo
|
||||
@ -141,10 +139,6 @@ resolveAction customTypes allPGScalars actionDefinition = do
|
||||
inputTypeInfos = nonObjectTypeMap <> mapFromL VT.getNamedTy defaultTypes
|
||||
in Map.lookup typeName inputTypeInfos
|
||||
|
||||
resolveWebhook (InputWebhook urlTemplate) = do
|
||||
eitherRenderedTemplate <- renderURLTemplate urlTemplate
|
||||
either (throw400 Unexpected . T.pack) (pure . ResolvedWebhook) eitherRenderedTemplate
|
||||
|
||||
getObjectTypeInfo typeName =
|
||||
onNothing (Map.lookup (ObjectTypeName typeName) (snd customTypes)) $
|
||||
throw400 NotExists $ "the type: "
|
||||
|
@ -14,6 +14,8 @@ module Hasura.RQL.DDL.EventTrigger
|
||||
, mkAllTriggersQ
|
||||
, delTriggerQ
|
||||
, getEventTriggerDef
|
||||
, getWebhookInfoFromConf
|
||||
, getHeaderInfosFromConf
|
||||
, updateEventTriggerInCatalog
|
||||
) where
|
||||
|
||||
|
@ -29,6 +29,8 @@ import Hasura.RQL.DDL.Metadata.Types
|
||||
import Hasura.RQL.DDL.Permission.Internal (dropPermFromCatalog)
|
||||
import Hasura.RQL.DDL.RemoteSchema (addRemoteSchemaToCatalog, fetchRemoteSchemas,
|
||||
removeRemoteSchemaFromCatalog)
|
||||
import Hasura.RQL.DDL.ScheduledTrigger (addCronTriggerToCatalog,
|
||||
deleteCronTriggerFromCatalog)
|
||||
import Hasura.RQL.DDL.Schema.Catalog (saveTableToCatalog)
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.SQL.Types
|
||||
@ -57,6 +59,7 @@ clearUserMetadata = liftTx $ Q.catchE defaultTxErrorHandler $ do
|
||||
Q.unitQ "DELETE FROM hdb_catalog.hdb_custom_types" () False
|
||||
Q.unitQ "DELETE FROM hdb_catalog.hdb_action_permission" () False
|
||||
Q.unitQ "DELETE FROM hdb_catalog.hdb_action WHERE is_system_defined <> 'true'" () False
|
||||
Q.unitQ "DELETE FROM hdb_catalog.hdb_cron_triggers WHERE include_in_metadata" () False
|
||||
|
||||
runClearMetadata
|
||||
:: (MonadTx m, CacheRWM m)
|
||||
@ -70,7 +73,7 @@ applyQP1
|
||||
:: (QErrM m)
|
||||
=> ReplaceMetadata -> m ()
|
||||
applyQP1 (ReplaceMetadata _ tables functionsMeta schemas collections
|
||||
allowlist _ actions) = do
|
||||
allowlist _ actions cronTriggers) = do
|
||||
withPathK "tables" $ do
|
||||
|
||||
checkMultipleDecls "tables" $ map _tmTable tables
|
||||
@ -114,6 +117,9 @@ applyQP1 (ReplaceMetadata _ tables functionsMeta schemas collections
|
||||
withPathK "actions" $
|
||||
checkMultipleDecls "actions" $ map _amName actions
|
||||
|
||||
withPathK "cron_triggers" $
|
||||
checkMultipleDecls "cron triggers" $ map ctName cronTriggers
|
||||
|
||||
where
|
||||
withTableName qt = withPathK (qualObjectToText qt)
|
||||
|
||||
@ -135,7 +141,7 @@ applyQP2 replaceMetadata = do
|
||||
|
||||
saveMetadata :: (MonadTx m, HasSystemDefined m) => ReplaceMetadata -> m ()
|
||||
saveMetadata (ReplaceMetadata _ tables functionsMeta
|
||||
schemas collections allowlist customTypes actions) = do
|
||||
schemas collections allowlist customTypes actions cronTriggers) = do
|
||||
|
||||
withPathK "tables" $ do
|
||||
indexedForM_ tables $ \TableMeta{..} -> do
|
||||
@ -192,6 +198,11 @@ saveMetadata (ReplaceMetadata _ tables functionsMeta
|
||||
withPathK "custom_types" $
|
||||
CustomTypes.persistCustomTypes customTypes
|
||||
|
||||
-- cron triggers
|
||||
withPathK "cron_triggers" $
|
||||
indexedForM_ cronTriggers $ \ct -> liftTx $ do
|
||||
addCronTriggerToCatalog ct
|
||||
|
||||
-- actions
|
||||
withPathK "actions" $
|
||||
indexedForM_ actions $ \action -> do
|
||||
@ -203,6 +214,7 @@ saveMetadata (ReplaceMetadata _ tables functionsMeta
|
||||
let createActionPermission = CreateActionPermission (_amName action)
|
||||
(_apmRole permission) Nothing (_apmComment permission)
|
||||
Action.persistCreateActionPermission createActionPermission
|
||||
|
||||
where
|
||||
processPerms tableName perms = indexedForM_ perms $ Permission.addPermP2 tableName
|
||||
|
||||
@ -274,10 +286,13 @@ fetchMetadata = do
|
||||
-- fetch actions
|
||||
actions <- fetchActions
|
||||
|
||||
cronTriggers <- fetchCronTriggers
|
||||
|
||||
return $ ReplaceMetadata currentMetadataVersion (HMIns.elems postRelMap) functions
|
||||
remoteSchemas collections allowlist
|
||||
customTypes
|
||||
actions
|
||||
cronTriggers
|
||||
|
||||
where
|
||||
|
||||
@ -373,6 +388,29 @@ fetchMetadata = do
|
||||
, ComputedFieldMeta name definition comment
|
||||
)
|
||||
|
||||
fetchCronTriggers =
|
||||
map uncurryCronTrigger
|
||||
<$> Q.listQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
SELECT ct.name, ct.webhook_conf, ct.cron_schedule, ct.payload,
|
||||
ct.retry_conf, ct.header_conf, ct.include_in_metadata, ct.comment
|
||||
FROM hdb_catalog.hdb_cron_triggers ct
|
||||
WHERE include_in_metadata
|
||||
|] () False
|
||||
where
|
||||
uncurryCronTrigger
|
||||
(name, webhook, schedule, payload, retryConfig, headerConfig, includeMetadata, comment) =
|
||||
CronTriggerMetadata
|
||||
{ ctName = name,
|
||||
ctWebhook = Q.getAltJ webhook,
|
||||
ctSchedule = schedule,
|
||||
ctPayload = Q.getAltJ <$> payload,
|
||||
ctRetryConf = Q.getAltJ retryConfig,
|
||||
ctHeaders = Q.getAltJ headerConfig,
|
||||
ctIncludeInMetadata = includeMetadata,
|
||||
ctComment = comment
|
||||
}
|
||||
|
||||
fetchCustomTypes :: Q.TxE QErr CustomTypes
|
||||
fetchCustomTypes =
|
||||
Q.getAltJ . runIdentity . Q.getRow <$>
|
||||
@ -473,3 +511,4 @@ purgeMetadataObj = liftTx . \case
|
||||
MOCustomTypes -> CustomTypes.clearCustomTypes
|
||||
MOAction action -> Action.deleteActionFromCatalog action Nothing
|
||||
MOActionPermission action role -> Action.deleteActionPermissionFromCatalog action role
|
||||
MOCronTrigger ctName -> deleteCronTriggerFromCatalog ctName
|
||||
|
@ -3,32 +3,37 @@ module Hasura.RQL.DDL.Metadata.Generator
|
||||
(genReplaceMetadata)
|
||||
where
|
||||
|
||||
import Hasura.GraphQL.Utils (simpleGraphQLQuery)
|
||||
import Hasura.GraphQL.Utils (simpleGraphQLQuery)
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.DDL.Headers
|
||||
import Hasura.RQL.DDL.Metadata.Types
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Server.Utils
|
||||
import Hasura.SQL.Types
|
||||
import Hasura.RQL.Types.Common (NonNegativeDiffTime)
|
||||
|
||||
import qualified Hasura.RQL.DDL.ComputedField as ComputedField
|
||||
import qualified Hasura.RQL.DDL.Permission as Permission
|
||||
import qualified Hasura.RQL.DDL.Permission.Internal as Permission
|
||||
import qualified Hasura.RQL.DDL.QueryCollection as Collection
|
||||
import qualified Hasura.RQL.DDL.Relationship as Relationship
|
||||
import qualified Hasura.RQL.DDL.Schema as Schema
|
||||
import qualified Hasura.RQL.DDL.ComputedField as ComputedField
|
||||
import qualified Hasura.RQL.DDL.Permission as Permission
|
||||
import qualified Hasura.RQL.DDL.Permission.Internal as Permission
|
||||
import qualified Hasura.RQL.DDL.QueryCollection as Collection
|
||||
import qualified Hasura.RQL.DDL.Relationship as Relationship
|
||||
import qualified Hasura.RQL.DDL.Schema as Schema
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.HashMap.Strict as HM
|
||||
import qualified Data.List.NonEmpty as NEList
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Vector as V
|
||||
import qualified Language.GraphQL.Draft.Parser as G
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Language.Haskell.TH.Syntax as TH
|
||||
import qualified Network.URI as N
|
||||
import System.Cron.Types
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Vector as V
|
||||
import qualified Language.GraphQL.Draft.Parser as G
|
||||
import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Language.Haskell.TH.Syntax as TH
|
||||
import qualified Network.URI as N
|
||||
import qualified System.Cron.Parser as Cr
|
||||
|
||||
import Test.QuickCheck
|
||||
import Test.QuickCheck.Instances.Semigroup ()
|
||||
import Test.QuickCheck.Instances.Time ()
|
||||
import Test.QuickCheck.Instances.UnorderedContainers ()
|
||||
|
||||
genReplaceMetadata :: Gen ReplaceMetadata
|
||||
genReplaceMetadata = do
|
||||
@ -41,15 +46,13 @@ genReplaceMetadata = do
|
||||
<*> arbitrary
|
||||
<*> arbitrary
|
||||
<*> arbitrary
|
||||
<*> arbitrary
|
||||
where
|
||||
genFunctionsMetadata :: MetadataVersion -> Gen FunctionsMetadata
|
||||
genFunctionsMetadata = \case
|
||||
MVVersion1 -> FMVersion1 <$> arbitrary
|
||||
MVVersion2 -> FMVersion2 <$> arbitrary
|
||||
|
||||
instance (Eq k, Hashable k, Arbitrary k, Arbitrary v) => Arbitrary (HM.HashMap k v) where
|
||||
arbitrary = HM.fromList <$> arbitrary
|
||||
|
||||
instance Arbitrary G.Name where
|
||||
arbitrary = G.Name . T.pack <$> listOf1 (elements ['a'..'z'])
|
||||
|
||||
@ -198,9 +201,6 @@ instance Arbitrary Collection.CreateCollection where
|
||||
instance Arbitrary Collection.CollectionReq where
|
||||
arbitrary = genericArbitrary
|
||||
|
||||
instance (Arbitrary a) => Arbitrary (NEList.NonEmpty a) where
|
||||
arbitrary = NEList.fromList <$> listOf1 arbitrary
|
||||
|
||||
instance Arbitrary G.NamedType where
|
||||
arbitrary = G.NamedType <$> arbitrary
|
||||
|
||||
@ -296,3 +296,38 @@ instance Arbitrary ActionPermissionMetadata where
|
||||
|
||||
instance Arbitrary ActionMetadata where
|
||||
arbitrary = genericArbitrary
|
||||
|
||||
instance Arbitrary CronTriggerMetadata where
|
||||
arbitrary = genericArbitrary
|
||||
|
||||
instance Arbitrary WebhookConf where
|
||||
arbitrary = genericArbitrary
|
||||
|
||||
instance Arbitrary STRetryConf where
|
||||
arbitrary = genericArbitrary
|
||||
|
||||
instance Arbitrary NonNegativeDiffTime where
|
||||
arbitrary = genericArbitrary
|
||||
|
||||
instance Arbitrary CronSchedule where
|
||||
arbitrary = elements sampleCronSchedules
|
||||
|
||||
sampleCronSchedules :: [CronSchedule]
|
||||
sampleCronSchedules = rights $ map Cr.parseCronSchedule $
|
||||
[ "* * * * *"
|
||||
-- every minute
|
||||
, "5 * * * *"
|
||||
-- every hour at the 5th minute
|
||||
, "\5 * * * *"
|
||||
-- every 5 minutes
|
||||
, "* 5 * * *"
|
||||
-- every minute of the 5th hour of the day
|
||||
, "5 5 * * *"
|
||||
-- fifth minute of the fifth hour every day
|
||||
, "0 0 5 * *"
|
||||
-- 00:00 of the 5th day of every month
|
||||
, "0 0 1 1 *"
|
||||
-- 00:00 of 1st of January
|
||||
, "0 0 * * 0"
|
||||
-- Every sunday at 00:00
|
||||
]
|
||||
|
@ -164,15 +164,16 @@ instance FromJSON ClearMetadata where
|
||||
|
||||
data ReplaceMetadata
|
||||
= ReplaceMetadata
|
||||
{ aqVersion :: !MetadataVersion
|
||||
, aqTables :: ![TableMeta]
|
||||
, aqFunctions :: !FunctionsMetadata
|
||||
, aqRemoteSchemas :: ![AddRemoteSchemaQuery]
|
||||
, aqQueryCollections :: ![Collection.CreateCollection]
|
||||
, aqAllowlist :: ![Collection.CollectionReq]
|
||||
, aqCustomTypes :: !CustomTypes
|
||||
, aqActions :: ![ActionMetadata]
|
||||
} deriving (Show, Eq, Lift)
|
||||
{ aqVersion :: !MetadataVersion
|
||||
, aqTables :: ![TableMeta]
|
||||
, aqFunctions :: !FunctionsMetadata
|
||||
, aqRemoteSchemas :: ![AddRemoteSchemaQuery]
|
||||
, aqQueryCollections :: ![Collection.CreateCollection]
|
||||
, aqAllowlist :: ![Collection.CollectionReq]
|
||||
, aqCustomTypes :: !CustomTypes
|
||||
, aqActions :: ![ActionMetadata]
|
||||
, aqCronTriggers :: ![CronTriggerMetadata]
|
||||
} deriving (Show, Eq)
|
||||
|
||||
instance FromJSON ReplaceMetadata where
|
||||
parseJSON = withObject "Object" $ \o -> do
|
||||
@ -185,6 +186,7 @@ instance FromJSON ReplaceMetadata where
|
||||
<*> o .:? "allow_list" .!= []
|
||||
<*> o .:? "custom_types" .!= emptyCustomTypes
|
||||
<*> o .:? "actions" .!= []
|
||||
<*> o .:? "cron_triggers" .!= []
|
||||
where
|
||||
parseFunctions version maybeValue =
|
||||
case version of
|
||||
@ -252,6 +254,7 @@ replaceMetadataToOrdJSON ( ReplaceMetadata
|
||||
allowlist
|
||||
customTypes
|
||||
actions
|
||||
cronTriggers
|
||||
) = AO.object $ [versionPair, tablesPair] <>
|
||||
catMaybes [ functionsPair
|
||||
, remoteSchemasPair
|
||||
@ -259,6 +262,7 @@ replaceMetadataToOrdJSON ( ReplaceMetadata
|
||||
, allowlistPair
|
||||
, actionsPair
|
||||
, customTypesPair
|
||||
, cronTriggersPair
|
||||
]
|
||||
where
|
||||
versionPair = ("version", AO.toOrdered version)
|
||||
@ -274,6 +278,8 @@ replaceMetadataToOrdJSON ( ReplaceMetadata
|
||||
else Just ("custom_types", customTypesToOrdJSON customTypes)
|
||||
actionsPair = listToMaybeOrdPair "actions" actionMetadataToOrdJSON actions
|
||||
|
||||
cronTriggersPair = listToMaybeOrdPair "cron_triggers" crontriggerQToOrdJSON cronTriggers
|
||||
|
||||
tableMetaToOrdJSON :: TableMeta -> AO.Value
|
||||
tableMetaToOrdJSON ( TableMeta
|
||||
table
|
||||
@ -422,6 +428,29 @@ replaceMetadataToOrdJSON ( ReplaceMetadata
|
||||
, ("definition", AO.toOrdered definition)
|
||||
] <> catMaybes [maybeCommentToMaybeOrdPair comment]
|
||||
|
||||
crontriggerQToOrdJSON :: CronTriggerMetadata -> AO.Value
|
||||
crontriggerQToOrdJSON
|
||||
(CronTriggerMetadata name webhook schedule payload retryConf headers includeInMetadata comment) =
|
||||
AO.object $
|
||||
[ ("name", AO.toOrdered name)
|
||||
, ("webhook", AO.toOrdered webhook)
|
||||
, ("schedule", AO.toOrdered schedule)
|
||||
, ("include_in_metadata", AO.toOrdered includeInMetadata)
|
||||
]
|
||||
<> catMaybes
|
||||
[ maybeAnyToMaybeOrdPair "payload" AO.toOrdered payload
|
||||
, maybeAnyToMaybeOrdPair "retry_conf" AO.toOrdered (maybeRetryConfiguration retryConf)
|
||||
, maybeAnyToMaybeOrdPair "headers" AO.toOrdered (maybeHeader headers)
|
||||
, maybeAnyToMaybeOrdPair "comment" AO.toOrdered comment]
|
||||
where
|
||||
maybeRetryConfiguration retryConfig
|
||||
| retryConfig == defaultSTRetryConf = Nothing
|
||||
| otherwise = Just retryConfig
|
||||
|
||||
maybeHeader headerConfig
|
||||
| headerConfig == [] = Nothing
|
||||
| otherwise = Just headerConfig
|
||||
|
||||
customTypesToOrdJSON :: CustomTypes -> AO.Value
|
||||
customTypesToOrdJSON (CustomTypes inpObjs objs scalars enums) =
|
||||
AO.object . catMaybes $ [ listToMaybeOrdPair "input_objects" inputObjectToOrdJSON =<< inpObjs
|
||||
|
147
server/src-lib/Hasura/RQL/DDL/ScheduledTrigger.hs
Normal file
147
server/src-lib/Hasura/RQL/DDL/ScheduledTrigger.hs
Normal file
@ -0,0 +1,147 @@
|
||||
module Hasura.RQL.DDL.ScheduledTrigger
|
||||
( runCreateCronTrigger
|
||||
, runDeleteCronTrigger
|
||||
, addCronTriggerToCatalog
|
||||
, deleteCronTriggerFromCatalog
|
||||
, resolveCronTrigger
|
||||
, runCreateScheduledEvent
|
||||
) where
|
||||
|
||||
import Hasura.Db
|
||||
import Hasura.EncJSON
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.DDL.EventTrigger (getHeaderInfosFromConf)
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.RQL.Types.Catalog (CatalogCronTrigger(..))
|
||||
import Hasura.Eventing.ScheduledTrigger
|
||||
|
||||
import qualified Database.PG.Query as Q
|
||||
import qualified Data.Time.Clock as C
|
||||
import qualified Data.HashMap.Strict as Map
|
||||
|
||||
-- | runCreateCronTrigger will update a existing cron trigger when the 'replace'
|
||||
-- value is set to @true@ and when replace is @false@ a new cron trigger will
|
||||
-- be created
|
||||
runCreateCronTrigger :: (CacheRWM m, MonadTx m) => CreateCronTrigger -> m EncJSON
|
||||
runCreateCronTrigger CreateCronTrigger {..} = do
|
||||
let q = (CronTriggerMetadata cctName
|
||||
cctWebhook
|
||||
cctCronSchedule
|
||||
cctPayload
|
||||
cctRetryConf
|
||||
cctHeaders
|
||||
cctIncludeInMetadata
|
||||
cctComment)
|
||||
case cctReplace of
|
||||
True -> updateCronTrigger q
|
||||
False -> do
|
||||
cronTriggersMap <- scCronTriggers <$> askSchemaCache
|
||||
case Map.lookup (ctName q) cronTriggersMap of
|
||||
Nothing -> pure ()
|
||||
Just _ -> throw400 AlreadyExists $
|
||||
"cron trigger with name: "
|
||||
<> (triggerNameToTxt $ ctName q)
|
||||
<> " already exists"
|
||||
|
||||
addCronTriggerToCatalog q
|
||||
buildSchemaCacheFor $ MOCronTrigger $ ctName q
|
||||
return successMsg
|
||||
|
||||
addCronTriggerToCatalog :: (MonadTx m) => CronTriggerMetadata -> m ()
|
||||
addCronTriggerToCatalog CronTriggerMetadata {..} = liftTx $ do
|
||||
Q.unitQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
INSERT into hdb_catalog.hdb_cron_triggers
|
||||
(name, webhook_conf, cron_schedule, payload, retry_conf, header_conf, include_in_metadata, comment)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
|] (ctName, Q.AltJ ctWebhook, ctSchedule, Q.AltJ <$> ctPayload, Q.AltJ ctRetryConf
|
||||
,Q.AltJ ctHeaders, ctIncludeInMetadata, ctComment) False
|
||||
currentTime <- liftIO C.getCurrentTime
|
||||
let scheduleTimes = generateScheduleTimes currentTime 100 ctSchedule -- generate next 100 events
|
||||
insertCronEvents $ map (CronEventSeed ctName) scheduleTimes
|
||||
|
||||
resolveCronTrigger
|
||||
:: (QErrM m, MonadIO m)
|
||||
=> CatalogCronTrigger -> m CronTriggerInfo
|
||||
resolveCronTrigger CatalogCronTrigger {..} = do
|
||||
webhookInfo <- resolveWebhook _cctWebhookConf
|
||||
headerInfo <- getHeaderInfosFromConf headers
|
||||
pure $
|
||||
CronTriggerInfo _cctName
|
||||
_cctCronSchedule
|
||||
_cctPayload
|
||||
retryConf
|
||||
webhookInfo
|
||||
headerInfo
|
||||
_cctComment
|
||||
where
|
||||
retryConf = fromMaybe defaultSTRetryConf _cctRetryConf
|
||||
|
||||
headers = fromMaybe [] _cctHeaderConf
|
||||
|
||||
updateCronTrigger :: (CacheRWM m, MonadTx m) => CronTriggerMetadata -> m EncJSON
|
||||
updateCronTrigger cronTriggerMetadata = do
|
||||
checkExists $ ctName cronTriggerMetadata
|
||||
updateCronTriggerInCatalog cronTriggerMetadata
|
||||
buildSchemaCacheFor $ MOCronTrigger $ ctName cronTriggerMetadata
|
||||
return successMsg
|
||||
|
||||
updateCronTriggerInCatalog :: (MonadTx m) => CronTriggerMetadata -> m ()
|
||||
updateCronTriggerInCatalog CronTriggerMetadata {..} = liftTx $ do
|
||||
Q.unitQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
UPDATE hdb_catalog.hdb_cron_triggers
|
||||
SET webhook_conf = $2,
|
||||
cron_schedule = $3,
|
||||
payload = $4,
|
||||
retry_conf = $5,
|
||||
include_in_metadata = $6,
|
||||
comment = $7
|
||||
WHERE name = $1
|
||||
|] (ctName, Q.AltJ ctWebhook, ctSchedule, Q.AltJ <$> ctPayload, Q.AltJ ctRetryConf
|
||||
, ctIncludeInMetadata, ctComment) False
|
||||
-- since the cron trigger is updated, clear all its future events which are not retries
|
||||
Q.unitQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
DELETE FROM hdb_catalog.hdb_cron_events
|
||||
WHERE trigger_name = $1 AND scheduled_time > now() AND tries = 0
|
||||
|] (Identity ctName) False
|
||||
|
||||
runDeleteCronTrigger :: (CacheRWM m, MonadTx m) => ScheduledTriggerName -> m EncJSON
|
||||
runDeleteCronTrigger (ScheduledTriggerName stName) = do
|
||||
checkExists stName
|
||||
deleteCronTriggerFromCatalog stName
|
||||
withNewInconsistentObjsCheck buildSchemaCache
|
||||
return successMsg
|
||||
|
||||
deleteCronTriggerFromCatalog :: (MonadTx m) => TriggerName -> m ()
|
||||
deleteCronTriggerFromCatalog triggerName = liftTx $ do
|
||||
Q.unitQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
DELETE FROM hdb_catalog.hdb_cron_triggers
|
||||
WHERE name = $1
|
||||
|] (Identity triggerName) False
|
||||
|
||||
runCreateScheduledEvent :: (MonadTx m) => CreateScheduledEvent -> m EncJSON
|
||||
runCreateScheduledEvent CreateScheduledEvent {..} = do
|
||||
liftTx $ Q.unitQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
INSERT INTO hdb_catalog.hdb_scheduled_events
|
||||
(webhook_conf,scheduled_time,payload,retry_conf,header_conf,comment)
|
||||
VALUES
|
||||
($1, $2, $3, $4, $5, $6)
|
||||
|] ( Q.AltJ cseWebhook
|
||||
, cseScheduleAt
|
||||
, Q.AltJ csePayload
|
||||
, Q.AltJ cseRetryConf
|
||||
, Q.AltJ cseHeaders
|
||||
, cseComment)
|
||||
False
|
||||
pure successMsg
|
||||
|
||||
checkExists :: (CacheRM m, MonadError QErr m) => TriggerName -> m ()
|
||||
checkExists name = do
|
||||
cronTriggersMap <- scCronTriggers <$> askSchemaCache
|
||||
void $ onNothing (Map.lookup name cronTriggersMap) $
|
||||
throw400 NotExists $
|
||||
"cron trigger with name: " <> (triggerNameToTxt name) <> " does not exist"
|
@ -47,6 +47,7 @@ import Hasura.RQL.DDL.ComputedField
|
||||
import Hasura.RQL.DDL.CustomTypes
|
||||
import Hasura.RQL.DDL.Deps
|
||||
import Hasura.RQL.DDL.EventTrigger
|
||||
import Hasura.RQL.DDL.ScheduledTrigger
|
||||
import Hasura.RQL.DDL.RemoteSchema
|
||||
import Hasura.RQL.DDL.Schema.Cache.Common
|
||||
import Hasura.RQL.DDL.Schema.Cache.Dependencies
|
||||
@ -59,7 +60,6 @@ import Hasura.RQL.DDL.Schema.Table
|
||||
import Hasura.RQL.DDL.Utils (clearHdbViews)
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.RQL.Types.Catalog
|
||||
import Hasura.RQL.Types.QueryCollection
|
||||
import Hasura.Server.Version (HasVersion)
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
@ -189,6 +189,7 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||
, scDepMap = resolvedDependencies
|
||||
, scInconsistentObjs =
|
||||
inconsistentObjects <> dependencyInconsistentObjects <> toList gqlSchemaInconsistentObjects
|
||||
, scCronTriggers = _boCronTriggers resolvedOutputs
|
||||
}
|
||||
where
|
||||
buildAndCollectInfo
|
||||
@ -199,7 +200,7 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||
buildAndCollectInfo = proc (catalogMetadata, invalidationKeys) -> do
|
||||
let CatalogMetadata tables relationships permissions
|
||||
eventTriggers remoteSchemas functions allowlistDefs
|
||||
computedFields catalogCustomTypes actions = catalogMetadata
|
||||
computedFields catalogCustomTypes actions cronTriggers = catalogMetadata
|
||||
|
||||
-- tables
|
||||
tableRawInfos <- buildTableCache -< (tables, Inc.selectD #_ikMetadata invalidationKeys)
|
||||
@ -274,6 +275,8 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||
, "custom types are inconsistent" )
|
||||
returnA -< M.empty
|
||||
|
||||
cronTriggersMap <- buildCronTriggers -< ((),cronTriggers)
|
||||
|
||||
-- remote schemas
|
||||
let remoteSchemaInvalidationKeys = Inc.selectD #_ikRemoteSchemas invalidationKeys
|
||||
remoteSchemaMap <- buildRemoteSchemas -< (remoteSchemaInvalidationKeys, remoteSchemas)
|
||||
@ -287,6 +290,7 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||
-- If 'maybeResolvedCustomTypes' is 'Nothing', then custom types are inconsinstent.
|
||||
-- In such case, use empty resolved value of custom types.
|
||||
, _boCustomTypes = fromMaybe (NonObjectTypeMap mempty, mempty) maybeResolvedCustomTypes
|
||||
, _boCronTriggers = cronTriggersMap
|
||||
}
|
||||
|
||||
mkEventTriggerMetadataObject (CatalogEventTrigger qt trn configuration) =
|
||||
@ -294,6 +298,11 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||
definition = object ["table" .= qt, "configuration" .= configuration]
|
||||
in MetadataObject objectId definition
|
||||
|
||||
mkCronTriggerMetadataObject catalogCronTrigger =
|
||||
let definition = toJSON catalogCronTrigger
|
||||
in MetadataObject (MOCronTrigger (_cctName catalogCronTrigger))
|
||||
definition
|
||||
|
||||
mkActionMetadataObject (ActionMetadata name comment defn _) =
|
||||
MetadataObject (MOAction name) (toJSON $ CreateAction name defn comment)
|
||||
|
||||
@ -355,6 +364,25 @@ buildSchemaCacheRule = proc (catalogMetadata, invalidationKeys) -> do
|
||||
liftTx $ delTriggerQ triggerName -- executes DROP IF EXISTS.. sql
|
||||
mkAllTriggersQ triggerName tableName (M.elems tableColumns) triggerDefinition
|
||||
|
||||
buildCronTriggers
|
||||
:: ( ArrowChoice arr
|
||||
, Inc.ArrowDistribute arr
|
||||
, ArrowWriter (Seq CollectedInfo) arr
|
||||
, Inc.ArrowCache m arr
|
||||
, MonadIO m
|
||||
, MonadTx m)
|
||||
=> ((),[CatalogCronTrigger])
|
||||
`arr` HashMap TriggerName CronTriggerInfo
|
||||
buildCronTriggers = buildInfoMap _cctName mkCronTriggerMetadataObject buildCronTrigger
|
||||
where
|
||||
buildCronTrigger = proc (_,cronTrigger) -> do
|
||||
let triggerName = triggerNameToTxt $ _cctName cronTrigger
|
||||
addCronTriggerContext e = "in cron trigger " <> triggerName <> ": " <> e
|
||||
(| withRecordInconsistency (
|
||||
(| modifyErrA (bindErrorA -< resolveCronTrigger cronTrigger)
|
||||
|) addCronTriggerContext)
|
||||
|) (mkCronTriggerMetadataObject cronTrigger)
|
||||
|
||||
buildActions
|
||||
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
|
||||
, ArrowWriter (Seq CollectedInfo) arr, MonadIO m )
|
||||
|
@ -18,7 +18,6 @@ import qualified Hasura.Incremental as Inc
|
||||
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.RQL.Types.Catalog
|
||||
import Hasura.RQL.Types.QueryCollection
|
||||
import Hasura.RQL.Types.Run
|
||||
import Hasura.SQL.Types
|
||||
|
||||
@ -53,15 +52,16 @@ data BuildInputs
|
||||
-- 'MonadWriter' side channel.
|
||||
data BuildOutputs
|
||||
= BuildOutputs
|
||||
{ _boTables :: !TableCache
|
||||
, _boActions :: !ActionCache
|
||||
, _boFunctions :: !FunctionCache
|
||||
, _boRemoteSchemas :: !(HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject))
|
||||
{ _boTables :: !TableCache
|
||||
, _boActions :: !ActionCache
|
||||
, _boFunctions :: !FunctionCache
|
||||
, _boRemoteSchemas :: !(HashMap RemoteSchemaName (RemoteSchemaCtx, MetadataObject))
|
||||
-- ^ We preserve the 'MetadataObject' from the original catalog metadata in the output so we can
|
||||
-- reuse it later if we need to mark the remote schema inconsistent during GraphQL schema
|
||||
-- generation (because of field conflicts).
|
||||
, _boAllowlist :: !(HS.HashSet GQLQuery)
|
||||
, _boCustomTypes :: !(NonObjectTypeMap, AnnotatedObjects)
|
||||
, _boAllowlist :: !(HS.HashSet GQLQuery)
|
||||
, _boCustomTypes :: !(NonObjectTypeMap, AnnotatedObjects)
|
||||
, _boCronTriggers :: !(M.HashMap TriggerName CronTriggerInfo)
|
||||
} deriving (Show, Eq)
|
||||
$(makeLenses ''BuildOutputs)
|
||||
|
||||
|
@ -126,6 +126,7 @@ deleteMetadataObject objectId = case objectId of
|
||||
MOTable name -> boTables %~ M.delete name
|
||||
MOFunction name -> boFunctions %~ M.delete name
|
||||
MORemoteSchema name -> boRemoteSchemas %~ M.delete name
|
||||
MOCronTrigger name -> boCronTriggers %~ M.delete name
|
||||
MOTableObj tableName tableObjectId -> boTables.ix tableName %~ case tableObjectId of
|
||||
MTORel name _ -> tiCoreInfo.tciFieldInfoMap %~ M.delete (fromRel name)
|
||||
MTOComputedField name -> tiCoreInfo.tciFieldInfoMap %~ M.delete (fromComputedField name)
|
||||
|
@ -4,6 +4,7 @@ module Hasura.RQL.Instances where
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.HashSet as S
|
||||
import qualified Data.URL.Template as UT
|
||||
@ -11,10 +12,14 @@ import qualified Language.GraphQL.Draft.Syntax as G
|
||||
import qualified Language.Haskell.TH.Syntax as TH
|
||||
import qualified Text.Regex.TDFA as TDFA
|
||||
import qualified Text.Regex.TDFA.Pattern as TDFA
|
||||
import qualified Database.PG.Query as Q
|
||||
|
||||
import Data.Functor.Product
|
||||
import Data.GADT.Compare
|
||||
import Instances.TH.Lift ()
|
||||
import System.Cron.Parser
|
||||
import System.Cron.Types
|
||||
import Data.Text
|
||||
|
||||
instance NFData G.Argument
|
||||
instance NFData G.Directive
|
||||
@ -49,6 +54,19 @@ deriving instance NFData G.Description
|
||||
deriving instance (NFData a) => NFData (G.ListValueG a)
|
||||
deriving instance (NFData a) => NFData (G.ObjectValueG a)
|
||||
|
||||
-- instances for CronSchedule from package `cron`
|
||||
instance NFData StepField
|
||||
instance NFData RangeField
|
||||
instance NFData SpecificField
|
||||
instance NFData BaseField
|
||||
instance NFData CronField
|
||||
instance NFData MonthSpec
|
||||
instance NFData DayOfMonthSpec
|
||||
instance NFData DayOfWeekSpec
|
||||
instance NFData HourSpec
|
||||
instance NFData MinuteSpec
|
||||
instance NFData CronSchedule
|
||||
|
||||
instance (TH.Lift k, TH.Lift v) => TH.Lift (M.HashMap k v) where
|
||||
lift m = [| M.fromList $(TH.lift $ M.toList m) |]
|
||||
|
||||
@ -79,3 +97,22 @@ instance (GCompare f, GCompare g) => GCompare (Product f g) where
|
||||
GEQ -> GEQ
|
||||
GGT -> GGT
|
||||
GGT -> GGT
|
||||
|
||||
instance J.FromJSON CronSchedule where
|
||||
parseJSON = J.withText "CronSchedule" $ \t ->
|
||||
either fail pure $ parseCronSchedule t
|
||||
|
||||
instance J.ToJSON CronSchedule where
|
||||
toJSON = J.String . serializeCronSchedule
|
||||
|
||||
instance Q.ToPrepArg CronSchedule where
|
||||
toPrepVal = Q.toPrepVal . serializeCronSchedule
|
||||
|
||||
instance Q.FromCol CronSchedule where
|
||||
fromCol bs =
|
||||
case Q.fromCol bs of
|
||||
Left err -> fail $ unpack err
|
||||
Right dbCron ->
|
||||
case parseCronSchedule dbCron of
|
||||
Left err' -> fail $ "invalid cron schedule " <> err'
|
||||
Right cron -> Right cron
|
||||
|
@ -2,7 +2,6 @@ module Hasura.RQL.Types
|
||||
( MonadTx(..)
|
||||
|
||||
, UserInfoM(..)
|
||||
, successMsg
|
||||
|
||||
, HasHttpManager (..)
|
||||
, HasGCtxMap (..)
|
||||
@ -38,7 +37,6 @@ module Hasura.RQL.Types
|
||||
, module R
|
||||
) where
|
||||
|
||||
import Hasura.EncJSON
|
||||
import Hasura.Prelude
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
@ -56,7 +54,9 @@ import Hasura.RQL.Types.EventTrigger as R
|
||||
import Hasura.RQL.Types.Function as R
|
||||
import Hasura.RQL.Types.Metadata as R
|
||||
import Hasura.RQL.Types.Permission as R
|
||||
import Hasura.RQL.Types.QueryCollection as R
|
||||
import Hasura.RQL.Types.RemoteSchema as R
|
||||
import Hasura.RQL.Types.ScheduledTrigger as R
|
||||
import Hasura.RQL.Types.SchemaCache as R
|
||||
import Hasura.RQL.Types.SchemaCache.Build as R
|
||||
import Hasura.RQL.Types.Table as R
|
||||
@ -289,7 +289,4 @@ askFieldInfo m f =
|
||||
askCurRole :: (UserInfoM m) => m RoleName
|
||||
askCurRole = _uiRole <$> askUserInfo
|
||||
|
||||
successMsg :: EncJSON
|
||||
successMsg = "{\"message\":\"success\"}"
|
||||
|
||||
type HeaderObj = M.HashMap T.Text T.Text
|
||||
|
@ -35,11 +35,11 @@ module Hasura.RQL.Types.Action
|
||||
|
||||
|
||||
import Control.Lens (makeLenses)
|
||||
import Data.URL.Template
|
||||
import Hasura.Incremental (Cacheable)
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.DDL.Headers
|
||||
import Hasura.RQL.Types.CustomTypes
|
||||
import Hasura.RQL.Types.Common
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
import Language.Haskell.TH.Syntax (Lift)
|
||||
@ -62,10 +62,6 @@ instance Q.FromCol ActionName where
|
||||
instance Q.ToPrepArg ActionName where
|
||||
toPrepVal = Q.toPrepVal . G.unName . unActionName
|
||||
|
||||
newtype ResolvedWebhook
|
||||
= ResolvedWebhook { unResolvedWebhook :: Text}
|
||||
deriving ( Show, Eq, J.FromJSON, J.ToJSON, Hashable, DQuote, Lift)
|
||||
|
||||
data ActionMutationKind
|
||||
= ActionSynchronous
|
||||
| ActionAsynchronous
|
||||
@ -166,21 +162,6 @@ data ActionInfo
|
||||
$(J.deriveToJSON (J.aesonDrop 3 J.snakeCase) ''ActionInfo)
|
||||
$(makeLenses ''ActionInfo)
|
||||
|
||||
newtype InputWebhook
|
||||
= InputWebhook {unInputWebhook :: URLTemplate}
|
||||
deriving (Show, Eq, Lift, Generic)
|
||||
instance NFData InputWebhook
|
||||
instance Cacheable InputWebhook
|
||||
|
||||
instance J.ToJSON InputWebhook where
|
||||
toJSON = J.String . printURLTemplate . unInputWebhook
|
||||
|
||||
instance J.FromJSON InputWebhook where
|
||||
parseJSON = J.withText "String" $ \t ->
|
||||
case parseURLTemplate t of
|
||||
Left e -> fail $ "Parsing URL template failed: " ++ e
|
||||
Right v -> pure $ InputWebhook v
|
||||
|
||||
type ActionDefinitionInput = ActionDefinition InputWebhook
|
||||
|
||||
data CreateAction
|
||||
|
@ -12,6 +12,7 @@ module Hasura.RQL.Types.Catalog
|
||||
, CatalogPermission(..)
|
||||
, CatalogEventTrigger(..)
|
||||
, CatalogFunction(..)
|
||||
, CatalogCronTrigger(..)
|
||||
, CatalogCustomTypes(..)
|
||||
) where
|
||||
|
||||
@ -35,9 +36,12 @@ import Hasura.RQL.Types.Permission
|
||||
import Hasura.RQL.Types.QueryCollection
|
||||
import Hasura.RQL.Types.RemoteSchema
|
||||
import Hasura.RQL.Types.SchemaCache
|
||||
import Hasura.RQL.Types.ScheduledTrigger
|
||||
import Hasura.Session
|
||||
import Hasura.SQL.Types
|
||||
|
||||
import System.Cron.Types (CronSchedule(..))
|
||||
|
||||
newtype CatalogForeignKey
|
||||
= CatalogForeignKey
|
||||
{ unCatalogForeignKey :: ForeignKey
|
||||
@ -162,6 +166,20 @@ $(deriveFromJSON (aesonDrop 4 snakeCase) ''CatalogCustomTypes)
|
||||
|
||||
type CatalogAction = ActionMetadata
|
||||
|
||||
data CatalogCronTrigger
|
||||
= CatalogCronTrigger
|
||||
{ _cctName :: !TriggerName
|
||||
, _cctWebhookConf :: !InputWebhook
|
||||
, _cctCronSchedule :: !CronSchedule
|
||||
, _cctPayload :: !(Maybe Value)
|
||||
, _cctRetryConf :: !(Maybe STRetryConf)
|
||||
, _cctHeaderConf :: !(Maybe [HeaderConf])
|
||||
, _cctComment :: !(Maybe Text)
|
||||
} deriving (Show, Eq, Generic)
|
||||
instance NFData CatalogCronTrigger
|
||||
instance Cacheable CatalogCronTrigger
|
||||
$(deriveJSON (aesonDrop 4 snakeCase) ''CatalogCronTrigger)
|
||||
|
||||
data CatalogMetadata
|
||||
= CatalogMetadata
|
||||
{ _cmTables :: ![CatalogTable]
|
||||
@ -174,6 +192,7 @@ data CatalogMetadata
|
||||
, _cmComputedFields :: ![CatalogComputedField]
|
||||
, _cmCustomTypes :: !CatalogCustomTypes
|
||||
, _cmActions :: ![CatalogAction]
|
||||
, _cmCronTriggers :: ![CatalogCronTrigger]
|
||||
} deriving (Show, Eq, Generic)
|
||||
instance NFData CatalogMetadata
|
||||
instance Cacheable CatalogMetadata
|
||||
|
@ -33,16 +33,27 @@ module Hasura.RQL.Types.Common
|
||||
|
||||
, SystemDefined(..)
|
||||
, isSystemDefined
|
||||
|
||||
, successMsg
|
||||
, NonNegativeDiffTime(..)
|
||||
, InputWebhook(..)
|
||||
, ResolvedWebhook(..)
|
||||
, resolveWebhook
|
||||
) where
|
||||
|
||||
import Hasura.EncJSON
|
||||
import Hasura.Incremental (Cacheable)
|
||||
import Hasura.Prelude
|
||||
import Hasura.SQL.Types
|
||||
import Hasura.RQL.Types.Error
|
||||
import Hasura.RQL.DDL.Headers ()
|
||||
|
||||
|
||||
import Control.Lens (makeLenses)
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import Data.URL.Template
|
||||
import Instances.TH.Lift ()
|
||||
import Language.Haskell.TH.Syntax (Q, TExp, Lift)
|
||||
|
||||
@ -235,3 +246,40 @@ newtype SystemDefined = SystemDefined { unSystemDefined :: Bool }
|
||||
|
||||
isSystemDefined :: SystemDefined -> Bool
|
||||
isSystemDefined = unSystemDefined
|
||||
|
||||
successMsg :: EncJSON
|
||||
successMsg = "{\"message\":\"success\"}"
|
||||
|
||||
newtype NonNegativeDiffTime = NonNegativeDiffTime { unNonNegativeDiffTime :: DiffTime }
|
||||
deriving (Show, Eq,ToJSON,Generic, NFData, Cacheable)
|
||||
|
||||
instance FromJSON NonNegativeDiffTime where
|
||||
parseJSON = withScientific "NonNegativeDiffTime" $ \t -> do
|
||||
case (t > 0) of
|
||||
True -> return $ NonNegativeDiffTime . realToFrac $ t
|
||||
False -> fail "negative value not allowed"
|
||||
|
||||
newtype ResolvedWebhook
|
||||
= ResolvedWebhook { unResolvedWebhook :: Text}
|
||||
deriving ( Show, Eq, FromJSON, ToJSON, Hashable, DQuote, Lift)
|
||||
|
||||
newtype InputWebhook
|
||||
= InputWebhook {unInputWebhook :: URLTemplate}
|
||||
deriving (Show, Eq, Lift, Generic)
|
||||
instance NFData InputWebhook
|
||||
instance Cacheable InputWebhook
|
||||
|
||||
instance ToJSON InputWebhook where
|
||||
toJSON = String . printURLTemplate . unInputWebhook
|
||||
|
||||
instance FromJSON InputWebhook where
|
||||
parseJSON = withText "String" $ \t ->
|
||||
case parseURLTemplate t of
|
||||
Left e -> fail $ "Parsing URL template failed: " ++ e
|
||||
Right v -> pure $ InputWebhook v
|
||||
|
||||
resolveWebhook :: (QErrM m,MonadIO m) => InputWebhook -> m ResolvedWebhook
|
||||
resolveWebhook (InputWebhook urlTemplate) = do
|
||||
eitherRenderedTemplate <- renderURLTemplate urlTemplate
|
||||
either (throw400 Unexpected . T.pack)
|
||||
(pure . ResolvedWebhook) eitherRenderedTemplate
|
||||
|
@ -18,6 +18,7 @@ module Hasura.RQL.Types.EventTrigger
|
||||
, EventHeaderInfo(..)
|
||||
, WebhookConf(..)
|
||||
, WebhookConfInfo(..)
|
||||
, HeaderConf(..)
|
||||
|
||||
, defaultRetryConf
|
||||
, defaultTimeoutSeconds
|
||||
@ -106,10 +107,16 @@ $(deriveToJSON (aesonDrop 3 snakeCase){omitNothingFields=True} ''EventHeaderInfo
|
||||
data WebhookConf = WCValue T.Text | WCEnv T.Text
|
||||
deriving (Show, Eq, Generic, Lift)
|
||||
instance NFData WebhookConf
|
||||
instance Cacheable WebhookConf
|
||||
|
||||
instance ToJSON WebhookConf where
|
||||
toJSON (WCValue w) = String w
|
||||
toJSON (WCEnv wEnv) = String wEnv
|
||||
toJSON (WCEnv wEnv) = object ["from_env" .= wEnv ]
|
||||
|
||||
instance FromJSON WebhookConf where
|
||||
parseJSON (Object o) = WCEnv <$> o .: "from_env"
|
||||
parseJSON (String t) = pure $ WCValue t
|
||||
parseJSON _ = fail "one of string or object must be provided for webhook"
|
||||
|
||||
data WebhookConfInfo
|
||||
= WebhookConfInfo
|
||||
|
@ -31,6 +31,7 @@ data MetadataObjId
|
||||
| MOCustomTypes
|
||||
| MOAction !ActionName
|
||||
| MOActionPermission !ActionName !RoleName
|
||||
| MOCronTrigger !TriggerName
|
||||
deriving (Show, Eq, Generic)
|
||||
$(makePrisms ''MetadataObjId)
|
||||
instance Hashable MetadataObjId
|
||||
@ -40,6 +41,7 @@ moiTypeName = \case
|
||||
MOTable _ -> "table"
|
||||
MOFunction _ -> "function"
|
||||
MORemoteSchema _ -> "remote_schema"
|
||||
MOCronTrigger _ -> "cron_trigger"
|
||||
MOTableObj _ tableObjectId -> case tableObjectId of
|
||||
MTORel _ relType -> relTypeToTxt relType <> "_relation"
|
||||
MTOPerm _ permType -> permTypeToCode permType <> "_permission"
|
||||
@ -54,6 +56,7 @@ moiName objectId = moiTypeName objectId <> " " <> case objectId of
|
||||
MOTable name -> dquoteTxt name
|
||||
MOFunction name -> dquoteTxt name
|
||||
MORemoteSchema name -> dquoteTxt name
|
||||
MOCronTrigger name -> dquoteTxt name
|
||||
MOTableObj tableName tableObjectId ->
|
||||
let tableObjectName = case tableObjectId of
|
||||
MTORel name _ -> dquoteTxt name
|
||||
|
162
server/src-lib/Hasura/RQL/Types/ScheduledTrigger.hs
Normal file
162
server/src-lib/Hasura/RQL/Types/ScheduledTrigger.hs
Normal file
@ -0,0 +1,162 @@
|
||||
-- | These are types for Scheduled Trigger definition; see "Hasura.Eventing.ScheduledTrigger"
|
||||
module Hasura.RQL.Types.ScheduledTrigger
|
||||
( ScheduledTriggerName(..)
|
||||
, CronTriggerMetadata(..)
|
||||
, CreateCronTrigger(..)
|
||||
, STRetryConf(..)
|
||||
, CreateScheduledEvent(..)
|
||||
, formatTime'
|
||||
, defaultSTRetryConf
|
||||
) where
|
||||
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import Data.Time.Clock
|
||||
import Data.Time.Clock.Units
|
||||
import Data.Time.Format.ISO8601
|
||||
import Hasura.Incremental
|
||||
import Hasura.RQL.Types.Common (NonNegativeDiffTime(..))
|
||||
import Hasura.RQL.Types.Action (InputWebhook(..))
|
||||
import Hasura.Prelude
|
||||
import System.Cron.Types
|
||||
|
||||
import qualified Data.Aeson as J
|
||||
import qualified Data.Text as T
|
||||
import qualified Hasura.RQL.Types.EventTrigger as ET
|
||||
|
||||
data STRetryConf
|
||||
= STRetryConf
|
||||
{ strcNumRetries :: !Int
|
||||
, strcRetryIntervalSeconds :: !NonNegativeDiffTime
|
||||
, strcTimeoutSeconds :: !NonNegativeDiffTime
|
||||
, strcToleranceSeconds :: !NonNegativeDiffTime
|
||||
-- ^ The tolerance configuration is used to determine whether a scheduled
|
||||
-- event is not too old to process. The age of the scheduled event is the
|
||||
-- difference between the current timestamp and the scheduled event's
|
||||
-- timestamp, if the age is than the tolerance then the scheduled event
|
||||
-- is marked as dead.
|
||||
} deriving (Show, Eq, Generic)
|
||||
|
||||
instance NFData STRetryConf
|
||||
instance Cacheable STRetryConf
|
||||
|
||||
instance FromJSON STRetryConf where
|
||||
parseJSON = withObject "STRetryConf" \o -> do
|
||||
numRetries' <- o .:? "num_retries" .!= 0
|
||||
retryInterval <-
|
||||
o .:? "retry_interval_seconds" .!= (NonNegativeDiffTime $ seconds 10)
|
||||
timeout <-
|
||||
o .:? "timeout_seconds" .!= (NonNegativeDiffTime $ seconds 60)
|
||||
tolerance <-
|
||||
o .:? "tolerance_seconds" .!= (NonNegativeDiffTime $ hours 6)
|
||||
if numRetries' < 0
|
||||
then fail "num_retries cannot be a negative value"
|
||||
else pure $ STRetryConf numRetries' retryInterval timeout tolerance
|
||||
|
||||
$(deriveToJSON (aesonDrop 4 snakeCase){omitNothingFields=True} ''STRetryConf)
|
||||
|
||||
defaultSTRetryConf :: STRetryConf
|
||||
defaultSTRetryConf =
|
||||
STRetryConf
|
||||
{ strcNumRetries = 0
|
||||
, strcRetryIntervalSeconds = NonNegativeDiffTime $ seconds 10
|
||||
, strcTimeoutSeconds = NonNegativeDiffTime $ seconds 60
|
||||
, strcToleranceSeconds = NonNegativeDiffTime $ hours 6
|
||||
}
|
||||
|
||||
data CronTriggerMetadata
|
||||
= CronTriggerMetadata
|
||||
{ ctName :: !ET.TriggerName
|
||||
, ctWebhook :: !InputWebhook
|
||||
, ctSchedule :: !CronSchedule
|
||||
, ctPayload :: !(Maybe J.Value)
|
||||
, ctRetryConf :: !STRetryConf
|
||||
, ctHeaders :: ![ET.HeaderConf]
|
||||
, ctIncludeInMetadata :: !Bool
|
||||
, ctComment :: !(Maybe Text)
|
||||
} deriving (Show, Eq, Generic)
|
||||
|
||||
instance NFData CronTriggerMetadata
|
||||
instance Cacheable CronTriggerMetadata
|
||||
|
||||
instance FromJSON CronTriggerMetadata where
|
||||
parseJSON =
|
||||
withObject "CronTriggerMetadata" $ \o -> do
|
||||
ctName <- o .: "name"
|
||||
ctWebhook <- o .: "webhook"
|
||||
ctPayload <- o .:? "payload"
|
||||
ctSchedule <- o .: "schedule"
|
||||
ctRetryConf <- o .:? "retry_conf" .!= defaultSTRetryConf
|
||||
ctHeaders <- o .:? "headers" .!= []
|
||||
ctIncludeInMetadata <- o .: "include_in_metadata"
|
||||
ctComment <- o .:? "comment"
|
||||
pure CronTriggerMetadata {..}
|
||||
|
||||
$(deriveToJSON (aesonDrop 2 snakeCase){omitNothingFields=True} ''CronTriggerMetadata)
|
||||
|
||||
data CreateCronTrigger
|
||||
= CreateCronTrigger
|
||||
{ cctName :: !ET.TriggerName
|
||||
, cctWebhook :: !InputWebhook
|
||||
, cctCronSchedule :: !CronSchedule
|
||||
, cctPayload :: !(Maybe J.Value)
|
||||
, cctRetryConf :: !STRetryConf
|
||||
, cctHeaders :: ![ET.HeaderConf]
|
||||
, cctIncludeInMetadata :: !Bool
|
||||
, cctComment :: !(Maybe Text)
|
||||
, cctReplace :: !Bool
|
||||
} deriving (Show, Eq, Generic)
|
||||
|
||||
instance NFData CreateCronTrigger
|
||||
instance Cacheable CreateCronTrigger
|
||||
|
||||
instance FromJSON CreateCronTrigger where
|
||||
parseJSON =
|
||||
withObject "CreateCronTrigger" $ \o -> do
|
||||
cctName <- o .: "name"
|
||||
cctWebhook <- o .: "webhook"
|
||||
cctPayload <- o .:? "payload"
|
||||
cctCronSchedule <- o .: "schedule"
|
||||
cctRetryConf <- o .:? "retry_conf" .!= defaultSTRetryConf
|
||||
cctHeaders <- o .:? "headers" .!= []
|
||||
cctIncludeInMetadata <- o .: "include_in_metadata"
|
||||
cctComment <- o .:? "comment"
|
||||
cctReplace <- o .:? "replace" .!= False
|
||||
pure CreateCronTrigger {..}
|
||||
|
||||
$(deriveToJSON (aesonDrop 3 snakeCase){omitNothingFields=True} ''CreateCronTrigger)
|
||||
|
||||
newtype ScheduledTriggerName
|
||||
= ScheduledTriggerName { unName :: ET.TriggerName }
|
||||
deriving (Show, Eq)
|
||||
|
||||
$(deriveJSON (aesonDrop 2 snakeCase) ''ScheduledTriggerName)
|
||||
|
||||
formatTime' :: UTCTime -> T.Text
|
||||
formatTime'= T.pack . iso8601Show
|
||||
|
||||
data CreateScheduledEvent
|
||||
= CreateScheduledEvent
|
||||
{ cseWebhook :: !InputWebhook
|
||||
, cseScheduleAt :: !UTCTime
|
||||
-- ^ The timestamp should be in the
|
||||
-- <ISO 8601 https://en.wikipedia.org/wiki/ISO_8601>
|
||||
-- format (which is what @aeson@ expects by default for 'UTCTime').
|
||||
, csePayload :: !(Maybe J.Value)
|
||||
, cseHeaders :: ![ET.HeaderConf]
|
||||
, cseRetryConf :: !STRetryConf
|
||||
, cseComment :: !(Maybe Text)
|
||||
} deriving (Show, Eq, Generic)
|
||||
|
||||
instance FromJSON CreateScheduledEvent where
|
||||
parseJSON =
|
||||
withObject "CreateScheduledEvent" $ \o ->
|
||||
CreateScheduledEvent <$> o .: "webhook"
|
||||
<*> o .: "schedule_at"
|
||||
<*> o .:? "payload"
|
||||
<*> o .:? "headers" .!= []
|
||||
<*> o .:? "retry_conf" .!= defaultSTRetryConf
|
||||
<*> o .:? "comment"
|
||||
|
||||
$(deriveToJSON (aesonDrop 3 snakeCase) ''CreateScheduledEvent)
|
@ -114,6 +114,8 @@ module Hasura.RQL.Types.SchemaCache
|
||||
, FunctionCache
|
||||
, getFuncsOfTable
|
||||
, askFunctionInfo
|
||||
|
||||
, CronTriggerInfo(..)
|
||||
) where
|
||||
|
||||
import qualified Hasura.GraphQL.Context as GC
|
||||
@ -131,13 +133,17 @@ import Hasura.RQL.Types.Function
|
||||
import Hasura.RQL.Types.Metadata
|
||||
import Hasura.RQL.Types.QueryCollection
|
||||
import Hasura.RQL.Types.RemoteSchema
|
||||
import Hasura.RQL.Types.EventTrigger
|
||||
import Hasura.RQL.Types.ScheduledTrigger
|
||||
import Hasura.RQL.Types.SchemaCacheTypes
|
||||
import Hasura.RQL.Types.Table
|
||||
import Hasura.SQL.Types
|
||||
|
||||
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import System.Cron.Types
|
||||
|
||||
import qualified Data.HashMap.Strict as M
|
||||
import qualified Data.HashSet as HS
|
||||
@ -174,6 +180,19 @@ type RemoteSchemaMap = M.HashMap RemoteSchemaName RemoteSchemaCtx
|
||||
|
||||
type DepMap = M.HashMap SchemaObjId (HS.HashSet SchemaDependency)
|
||||
|
||||
data CronTriggerInfo
|
||||
= CronTriggerInfo
|
||||
{ ctiName :: !TriggerName
|
||||
, ctiSchedule :: !CronSchedule
|
||||
, ctiPayload :: !(Maybe Value)
|
||||
, ctiRetryConf :: !STRetryConf
|
||||
, ctiWebhookInfo :: !ResolvedWebhook
|
||||
, ctiHeaders :: ![EventHeaderInfo]
|
||||
, ctiComment :: !(Maybe Text)
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveToJSON (aesonDrop 3 snakeCase) ''CronTriggerInfo)
|
||||
|
||||
newtype SchemaCacheVer
|
||||
= SchemaCacheVer { unSchemaCacheVer :: Word64 }
|
||||
deriving (Show, Eq, Ord, Hashable, ToJSON, FromJSON)
|
||||
@ -200,6 +219,7 @@ data SchemaCache
|
||||
, scDefaultRemoteGCtx :: !GC.GCtx
|
||||
, scDepMap :: !DepMap
|
||||
, scInconsistentObjs :: ![InconsistentMetadata]
|
||||
, scCronTriggers :: !(M.HashMap TriggerName CronTriggerInfo)
|
||||
} deriving (Show, Eq)
|
||||
$(deriveToJSON (aesonDrop 2 snakeCase) ''SchemaCache)
|
||||
|
||||
|
@ -41,6 +41,7 @@ module Hasura.SQL.Types
|
||||
|
||||
, SchemaName(..)
|
||||
, publicSchema
|
||||
, hdbCatalogSchema
|
||||
|
||||
, TableName(..)
|
||||
, FunctionName(..)
|
||||
@ -245,6 +246,9 @@ newtype SchemaName
|
||||
publicSchema :: SchemaName
|
||||
publicSchema = SchemaName "public"
|
||||
|
||||
hdbCatalogSchema :: SchemaName
|
||||
hdbCatalogSchema = SchemaName "hdb_catalog"
|
||||
|
||||
instance IsIden SchemaName where
|
||||
toIden (SchemaName t) = Iden t
|
||||
|
||||
|
@ -8,7 +8,6 @@ import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import Data.Time (UTCTime)
|
||||
import Language.Haskell.TH.Syntax (Lift)
|
||||
|
||||
import qualified Data.HashMap.Strict as HM
|
||||
import qualified Data.Text as T
|
||||
@ -27,6 +26,7 @@ import Hasura.RQL.DDL.QueryCollection
|
||||
import Hasura.RQL.DDL.Relationship
|
||||
import Hasura.RQL.DDL.Relationship.Rename
|
||||
import Hasura.RQL.DDL.RemoteSchema
|
||||
import Hasura.RQL.DDL.ScheduledTrigger
|
||||
import Hasura.RQL.DDL.Schema
|
||||
import Hasura.RQL.DML.Count
|
||||
import Hasura.RQL.DML.Delete
|
||||
@ -92,6 +92,12 @@ data RQLQueryV1
|
||||
| RQRedeliverEvent !RedeliverEventQuery
|
||||
| RQInvokeEventTrigger !InvokeEventTriggerQuery
|
||||
|
||||
-- scheduled triggers
|
||||
| RQCreateCronTrigger !CreateCronTrigger
|
||||
| RQDeleteCronTrigger !ScheduledTriggerName
|
||||
|
||||
| RQCreateScheduledEvent !CreateScheduledEvent
|
||||
|
||||
-- query collections, allow list related
|
||||
| RQCreateQueryCollection !CreateCollection
|
||||
| RQDropQueryCollection !DropCollection
|
||||
@ -114,19 +120,20 @@ data RQLQueryV1
|
||||
| RQDropActionPermission !DropActionPermission
|
||||
|
||||
| RQDumpInternalState !DumpInternalState
|
||||
|
||||
| RQSetCustomTypes !CustomTypes
|
||||
deriving (Show, Eq, Lift)
|
||||
deriving (Show, Eq)
|
||||
|
||||
data RQLQueryV2
|
||||
= RQV2TrackTable !TrackTableV2
|
||||
| RQV2SetTableCustomFields !SetTableCustomFields
|
||||
| RQV2TrackFunction !TrackFunctionV2
|
||||
deriving (Show, Eq, Lift)
|
||||
deriving (Show, Eq)
|
||||
|
||||
data RQLQuery
|
||||
= RQV1 !RQLQueryV1
|
||||
| RQV2 !RQLQueryV2
|
||||
deriving (Show, Eq, Lift)
|
||||
deriving (Show, Eq)
|
||||
|
||||
instance FromJSON RQLQuery where
|
||||
parseJSON = withObject "Object" $ \o -> do
|
||||
@ -209,75 +216,80 @@ runQuery pgExecCtx instanceId userInfo sc hMgr sqlGenCtx systemDefined query = d
|
||||
-- by hand.
|
||||
queryModifiesSchemaCache :: RQLQuery -> Bool
|
||||
queryModifiesSchemaCache (RQV1 qi) = case qi of
|
||||
RQAddExistingTableOrView _ -> True
|
||||
RQTrackTable _ -> True
|
||||
RQUntrackTable _ -> True
|
||||
RQTrackFunction _ -> True
|
||||
RQUntrackFunction _ -> True
|
||||
RQSetTableIsEnum _ -> True
|
||||
RQAddExistingTableOrView _ -> True
|
||||
RQTrackTable _ -> True
|
||||
RQUntrackTable _ -> True
|
||||
RQTrackFunction _ -> True
|
||||
RQUntrackFunction _ -> True
|
||||
RQSetTableIsEnum _ -> True
|
||||
|
||||
RQCreateObjectRelationship _ -> True
|
||||
RQCreateArrayRelationship _ -> True
|
||||
RQDropRelationship _ -> True
|
||||
RQSetRelationshipComment _ -> False
|
||||
RQRenameRelationship _ -> True
|
||||
RQCreateObjectRelationship _ -> True
|
||||
RQCreateArrayRelationship _ -> True
|
||||
RQDropRelationship _ -> True
|
||||
RQSetRelationshipComment _ -> False
|
||||
RQRenameRelationship _ -> True
|
||||
|
||||
RQAddComputedField _ -> True
|
||||
RQDropComputedField _ -> True
|
||||
RQAddComputedField _ -> True
|
||||
RQDropComputedField _ -> True
|
||||
|
||||
RQCreateInsertPermission _ -> True
|
||||
RQCreateSelectPermission _ -> True
|
||||
RQCreateUpdatePermission _ -> True
|
||||
RQCreateDeletePermission _ -> True
|
||||
RQCreateInsertPermission _ -> True
|
||||
RQCreateSelectPermission _ -> True
|
||||
RQCreateUpdatePermission _ -> True
|
||||
RQCreateDeletePermission _ -> True
|
||||
|
||||
RQDropInsertPermission _ -> True
|
||||
RQDropSelectPermission _ -> True
|
||||
RQDropUpdatePermission _ -> True
|
||||
RQDropDeletePermission _ -> True
|
||||
RQSetPermissionComment _ -> False
|
||||
RQDropInsertPermission _ -> True
|
||||
RQDropSelectPermission _ -> True
|
||||
RQDropUpdatePermission _ -> True
|
||||
RQDropDeletePermission _ -> True
|
||||
RQSetPermissionComment _ -> False
|
||||
|
||||
RQGetInconsistentMetadata _ -> False
|
||||
RQDropInconsistentMetadata _ -> True
|
||||
RQGetInconsistentMetadata _ -> False
|
||||
RQDropInconsistentMetadata _ -> True
|
||||
|
||||
RQInsert _ -> False
|
||||
RQSelect _ -> False
|
||||
RQUpdate _ -> False
|
||||
RQDelete _ -> False
|
||||
RQCount _ -> False
|
||||
RQInsert _ -> False
|
||||
RQSelect _ -> False
|
||||
RQUpdate _ -> False
|
||||
RQDelete _ -> False
|
||||
RQCount _ -> False
|
||||
|
||||
RQAddRemoteSchema _ -> True
|
||||
RQRemoveRemoteSchema _ -> True
|
||||
RQReloadRemoteSchema _ -> True
|
||||
RQAddRemoteSchema _ -> True
|
||||
RQRemoveRemoteSchema _ -> True
|
||||
RQReloadRemoteSchema _ -> True
|
||||
|
||||
RQCreateEventTrigger _ -> True
|
||||
RQDeleteEventTrigger _ -> True
|
||||
RQRedeliverEvent _ -> False
|
||||
RQInvokeEventTrigger _ -> False
|
||||
RQCreateEventTrigger _ -> True
|
||||
RQDeleteEventTrigger _ -> True
|
||||
RQRedeliverEvent _ -> False
|
||||
RQInvokeEventTrigger _ -> False
|
||||
|
||||
RQCreateQueryCollection _ -> True
|
||||
RQDropQueryCollection _ -> True
|
||||
RQAddQueryToCollection _ -> True
|
||||
RQDropQueryFromCollection _ -> True
|
||||
RQAddCollectionToAllowlist _ -> True
|
||||
RQDropCollectionFromAllowlist _ -> True
|
||||
RQCreateCronTrigger _ -> True
|
||||
RQDeleteCronTrigger _ -> True
|
||||
|
||||
RQRunSql q -> isSchemaCacheBuildRequiredRunSQL q
|
||||
RQCreateScheduledEvent _ -> False
|
||||
|
||||
RQReplaceMetadata _ -> True
|
||||
RQExportMetadata _ -> False
|
||||
RQClearMetadata _ -> True
|
||||
RQReloadMetadata _ -> True
|
||||
RQCreateQueryCollection _ -> True
|
||||
RQDropQueryCollection _ -> True
|
||||
RQAddQueryToCollection _ -> True
|
||||
RQDropQueryFromCollection _ -> True
|
||||
RQAddCollectionToAllowlist _ -> True
|
||||
RQDropCollectionFromAllowlist _ -> True
|
||||
|
||||
RQCreateAction _ -> True
|
||||
RQDropAction _ -> True
|
||||
RQUpdateAction _ -> True
|
||||
RQCreateActionPermission _ -> True
|
||||
RQDropActionPermission _ -> True
|
||||
RQRunSql q -> isSchemaCacheBuildRequiredRunSQL q
|
||||
|
||||
RQDumpInternalState _ -> False
|
||||
RQSetCustomTypes _ -> True
|
||||
RQReplaceMetadata _ -> True
|
||||
RQExportMetadata _ -> False
|
||||
RQClearMetadata _ -> True
|
||||
RQReloadMetadata _ -> True
|
||||
|
||||
RQBulk qs -> any queryModifiesSchemaCache qs
|
||||
RQCreateAction _ -> True
|
||||
RQDropAction _ -> True
|
||||
RQUpdateAction _ -> True
|
||||
RQCreateActionPermission _ -> True
|
||||
RQDropActionPermission _ -> True
|
||||
|
||||
RQDumpInternalState _ -> False
|
||||
RQSetCustomTypes _ -> True
|
||||
|
||||
RQBulk qs -> any queryModifiesSchemaCache qs
|
||||
queryModifiesSchemaCache (RQV2 qi) = case qi of
|
||||
RQV2TrackTable _ -> True
|
||||
RQV2SetTableCustomFields _ -> True
|
||||
@ -376,6 +388,11 @@ runQueryM rq = withPathK "args" $ case rq of
|
||||
RQRedeliverEvent q -> runRedeliverEvent q
|
||||
RQInvokeEventTrigger q -> runInvokeEventTrigger q
|
||||
|
||||
RQCreateCronTrigger q -> runCreateCronTrigger q
|
||||
RQDeleteCronTrigger q -> runDeleteCronTrigger q
|
||||
|
||||
RQCreateScheduledEvent q -> runCreateScheduledEvent q
|
||||
|
||||
RQCreateQueryCollection q -> runCreateCollection q
|
||||
RQDropQueryCollection q -> runDropCollection q
|
||||
RQAddQueryToCollection q -> runAddQueryToCollection q
|
||||
@ -411,76 +428,81 @@ runQueryM rq = withPathK "args" $ case rq of
|
||||
requiresAdmin :: RQLQuery -> Bool
|
||||
requiresAdmin = \case
|
||||
RQV1 q -> case q of
|
||||
RQAddExistingTableOrView _ -> True
|
||||
RQTrackTable _ -> True
|
||||
RQUntrackTable _ -> True
|
||||
RQSetTableIsEnum _ -> True
|
||||
RQAddExistingTableOrView _ -> True
|
||||
RQTrackTable _ -> True
|
||||
RQUntrackTable _ -> True
|
||||
RQSetTableIsEnum _ -> True
|
||||
|
||||
RQTrackFunction _ -> True
|
||||
RQUntrackFunction _ -> True
|
||||
RQTrackFunction _ -> True
|
||||
RQUntrackFunction _ -> True
|
||||
|
||||
RQCreateObjectRelationship _ -> True
|
||||
RQCreateArrayRelationship _ -> True
|
||||
RQDropRelationship _ -> True
|
||||
RQSetRelationshipComment _ -> True
|
||||
RQRenameRelationship _ -> True
|
||||
RQCreateObjectRelationship _ -> True
|
||||
RQCreateArrayRelationship _ -> True
|
||||
RQDropRelationship _ -> True
|
||||
RQSetRelationshipComment _ -> True
|
||||
RQRenameRelationship _ -> True
|
||||
|
||||
RQAddComputedField _ -> True
|
||||
RQDropComputedField _ -> True
|
||||
RQAddComputedField _ -> True
|
||||
RQDropComputedField _ -> True
|
||||
|
||||
RQCreateInsertPermission _ -> True
|
||||
RQCreateSelectPermission _ -> True
|
||||
RQCreateUpdatePermission _ -> True
|
||||
RQCreateDeletePermission _ -> True
|
||||
RQCreateInsertPermission _ -> True
|
||||
RQCreateSelectPermission _ -> True
|
||||
RQCreateUpdatePermission _ -> True
|
||||
RQCreateDeletePermission _ -> True
|
||||
|
||||
RQDropInsertPermission _ -> True
|
||||
RQDropSelectPermission _ -> True
|
||||
RQDropUpdatePermission _ -> True
|
||||
RQDropDeletePermission _ -> True
|
||||
RQSetPermissionComment _ -> True
|
||||
RQDropInsertPermission _ -> True
|
||||
RQDropSelectPermission _ -> True
|
||||
RQDropUpdatePermission _ -> True
|
||||
RQDropDeletePermission _ -> True
|
||||
RQSetPermissionComment _ -> True
|
||||
|
||||
RQGetInconsistentMetadata _ -> True
|
||||
RQDropInconsistentMetadata _ -> True
|
||||
RQGetInconsistentMetadata _ -> True
|
||||
RQDropInconsistentMetadata _ -> True
|
||||
|
||||
RQInsert _ -> False
|
||||
RQSelect _ -> False
|
||||
RQUpdate _ -> False
|
||||
RQDelete _ -> False
|
||||
RQCount _ -> False
|
||||
RQInsert _ -> False
|
||||
RQSelect _ -> False
|
||||
RQUpdate _ -> False
|
||||
RQDelete _ -> False
|
||||
RQCount _ -> False
|
||||
|
||||
RQAddRemoteSchema _ -> True
|
||||
RQRemoveRemoteSchema _ -> True
|
||||
RQReloadRemoteSchema _ -> True
|
||||
RQAddRemoteSchema _ -> True
|
||||
RQRemoveRemoteSchema _ -> True
|
||||
RQReloadRemoteSchema _ -> True
|
||||
|
||||
RQCreateEventTrigger _ -> True
|
||||
RQDeleteEventTrigger _ -> True
|
||||
RQRedeliverEvent _ -> True
|
||||
RQInvokeEventTrigger _ -> True
|
||||
RQCreateEventTrigger _ -> True
|
||||
RQDeleteEventTrigger _ -> True
|
||||
RQRedeliverEvent _ -> True
|
||||
RQInvokeEventTrigger _ -> True
|
||||
|
||||
RQCreateQueryCollection _ -> True
|
||||
RQDropQueryCollection _ -> True
|
||||
RQAddQueryToCollection _ -> True
|
||||
RQDropQueryFromCollection _ -> True
|
||||
RQAddCollectionToAllowlist _ -> True
|
||||
RQDropCollectionFromAllowlist _ -> True
|
||||
RQCreateCronTrigger _ -> True
|
||||
RQDeleteCronTrigger _ -> True
|
||||
|
||||
RQReplaceMetadata _ -> True
|
||||
RQClearMetadata _ -> True
|
||||
RQExportMetadata _ -> True
|
||||
RQReloadMetadata _ -> True
|
||||
RQCreateScheduledEvent _ -> True
|
||||
|
||||
RQCreateAction _ -> True
|
||||
RQDropAction _ -> True
|
||||
RQUpdateAction _ -> True
|
||||
RQCreateActionPermission _ -> True
|
||||
RQDropActionPermission _ -> True
|
||||
RQCreateQueryCollection _ -> True
|
||||
RQDropQueryCollection _ -> True
|
||||
RQAddQueryToCollection _ -> True
|
||||
RQDropQueryFromCollection _ -> True
|
||||
RQAddCollectionToAllowlist _ -> True
|
||||
RQDropCollectionFromAllowlist _ -> True
|
||||
|
||||
RQDumpInternalState _ -> True
|
||||
RQSetCustomTypes _ -> True
|
||||
RQReplaceMetadata _ -> True
|
||||
RQClearMetadata _ -> True
|
||||
RQExportMetadata _ -> True
|
||||
RQReloadMetadata _ -> True
|
||||
|
||||
RQRunSql _ -> True
|
||||
RQCreateAction _ -> True
|
||||
RQDropAction _ -> True
|
||||
RQUpdateAction _ -> True
|
||||
RQCreateActionPermission _ -> True
|
||||
RQDropActionPermission _ -> True
|
||||
|
||||
RQBulk qs -> any requiresAdmin qs
|
||||
RQDumpInternalState _ -> True
|
||||
RQSetCustomTypes _ -> True
|
||||
|
||||
RQRunSql _ -> True
|
||||
|
||||
RQBulk qs -> any requiresAdmin qs
|
||||
|
||||
RQV2 q -> case q of
|
||||
RQV2TrackTable _ -> True
|
||||
|
@ -125,7 +125,7 @@ mkJwtCtx JWTConfig{..} httpManager logger = do
|
||||
Nothing -> return ref
|
||||
Just time -> do
|
||||
void $ liftIO $ forkImmortal "jwkRefreshCtrl" logger $
|
||||
jwkRefreshCtrl logger httpManager url ref (fromUnits time)
|
||||
jwkRefreshCtrl logger httpManager url ref (convertDuration time)
|
||||
return ref
|
||||
|
||||
withJwkError act = do
|
||||
|
@ -128,7 +128,7 @@ jwkRefreshCtrl logger manager url ref time = liftIO $ do
|
||||
res <- runExceptT $ updateJwkRef logger manager url ref
|
||||
mTime <- either (const $ logNotice >> return Nothing) return res
|
||||
-- if can't parse time from header, defaults to 1 min
|
||||
let delay = maybe (minutes 1) fromUnits mTime
|
||||
let delay = maybe (minutes 1) (convertDuration) mTime
|
||||
C.sleep delay
|
||||
where
|
||||
logNotice = do
|
||||
|
@ -77,7 +77,7 @@ instance ToEngineLog MigrationResult Hasura where
|
||||
}
|
||||
|
||||
-- A migration and (hopefully) also its inverse if we have it.
|
||||
-- Polymorphic because `m` can be any `MonadTx`, `MonadIO` when
|
||||
-- Polymorphic because `m` can be any `MonadTx`, `MonadIO` when
|
||||
-- used in the `migrations` function below.
|
||||
data MigrationPair m = MigrationPair
|
||||
{ mpMigrate :: m ()
|
||||
@ -159,12 +159,12 @@ migrateCatalog migrationTime = do
|
||||
pure (MRMigrated previousVersion, schemaCache)
|
||||
where
|
||||
neededMigrations = dropWhile ((/= previousVersion) . fst) (migrations False)
|
||||
|
||||
|
||||
buildCacheAndRecreateSystemMetadata :: m (RebuildableSchemaCache m)
|
||||
buildCacheAndRecreateSystemMetadata = do
|
||||
schemaCache <- buildRebuildableSchemaCache
|
||||
view _2 <$> runCacheRWT schemaCache recreateSystemMetadata
|
||||
|
||||
|
||||
updateCatalogVersion = setCatalogVersion latestCatalogVersionString migrationTime
|
||||
|
||||
doesSchemaExist schemaName =
|
||||
@ -197,29 +197,29 @@ downgradeCatalog opts time = do
|
||||
downgradeFrom previousVersion
|
||||
| previousVersion == dgoTargetVersion opts = do
|
||||
pure MRNothingToDo
|
||||
| otherwise =
|
||||
| otherwise =
|
||||
case neededDownMigrations (dgoTargetVersion opts) of
|
||||
Left reason ->
|
||||
Left reason ->
|
||||
throw400 NotSupported $
|
||||
"This downgrade path (from "
|
||||
<> previousVersion <> " to "
|
||||
<> dgoTargetVersion opts <>
|
||||
<> previousVersion <> " to "
|
||||
<> dgoTargetVersion opts <>
|
||||
") is not supported, because "
|
||||
<> reason
|
||||
Right path -> do
|
||||
sequence_ path
|
||||
sequence_ path
|
||||
unless (dgoDryRun opts) do
|
||||
setCatalogVersion (dgoTargetVersion opts) time
|
||||
pure (MRMigrated previousVersion)
|
||||
|
||||
|
||||
where
|
||||
neededDownMigrations newVersion =
|
||||
downgrade previousVersion newVersion
|
||||
neededDownMigrations newVersion =
|
||||
downgrade previousVersion newVersion
|
||||
(reverse (migrations (dgoDryRun opts)))
|
||||
|
||||
downgrade
|
||||
downgrade
|
||||
:: T.Text
|
||||
-> T.Text
|
||||
-> T.Text
|
||||
-> [(T.Text, MigrationPair m)]
|
||||
-> Either T.Text [m ()]
|
||||
downgrade lower upper = skipFutureDowngrades where
|
||||
@ -237,7 +237,7 @@ downgradeCatalog opts time = do
|
||||
| otherwise = skipFutureDowngrades xs
|
||||
|
||||
dropOlderDowngrades [] = Left "the target version is unrecognized."
|
||||
dropOlderDowngrades ((x, MigrationPair{ mpDown = Nothing }):_) =
|
||||
dropOlderDowngrades ((x, MigrationPair{ mpDown = Nothing }):_) =
|
||||
Left $ "there is no available migration back to version " <> x <> "."
|
||||
dropOlderDowngrades ((x, MigrationPair{ mpDown = Just y }):xs)
|
||||
| x == upper = Right [y]
|
||||
@ -271,7 +271,7 @@ migrations dryRun =
|
||||
if exists
|
||||
then [| Just (runTxOrPrint $(Q.sqlFromFile path)) |]
|
||||
else [| Nothing |]
|
||||
|
||||
|
||||
migrationsFromFile = map $ \(to :: Integer) ->
|
||||
let from = to - 1
|
||||
in [| ( $(TH.lift $ T.pack (show from))
|
||||
@ -288,7 +288,7 @@ migrations dryRun =
|
||||
where
|
||||
runTxOrPrint :: Q.Query -> m ()
|
||||
runTxOrPrint
|
||||
| dryRun =
|
||||
| dryRun =
|
||||
liftIO . TIO.putStrLn . Q.getQueryText
|
||||
| otherwise = runTx
|
||||
|
||||
@ -419,6 +419,25 @@ recreateSystemMetadata = do
|
||||
, arrayRel $$(nonEmptyText "permissions") $ manualConfig "hdb_catalog" "hdb_permission_agg"
|
||||
[("role_name", "role_name")]
|
||||
]
|
||||
, table "hdb_catalog" "hdb_cron_triggers"
|
||||
[ arrayRel $$(nonEmptyText "cron_events") $ RUFKeyOn $
|
||||
ArrRelUsingFKeyOn (QualifiedObject "hdb_catalog" "hdb_cron_events") "trigger_name"
|
||||
]
|
||||
, table "hdb_catalog" "hdb_cron_events"
|
||||
[ objectRel $$(nonEmptyText "cron_trigger") $ RUFKeyOn "trigger_name"
|
||||
, arrayRel $$(nonEmptyText "cron_event_logs") $ RUFKeyOn $
|
||||
ArrRelUsingFKeyOn (QualifiedObject "hdb_catalog" "hdb_cron_event_invocation_logs") "event_id"
|
||||
]
|
||||
, table "hdb_catalog" "hdb_cron_event_invocation_logs"
|
||||
[ objectRel $$(nonEmptyText "cron_event") $ RUFKeyOn "event_id"
|
||||
]
|
||||
, table "hdb_catalog" "hdb_scheduled_events"
|
||||
[ arrayRel $$(nonEmptyText "scheduled_event_logs") $ RUFKeyOn $
|
||||
ArrRelUsingFKeyOn (QualifiedObject "hdb_catalog" "hdb_scheduled_event_invocation_logs") "event_id"
|
||||
]
|
||||
, table "hdb_catalog" "hdb_scheduled_event_invocation_logs"
|
||||
[ objectRel $$(nonEmptyText "scheduled_event") $ RUFKeyOn "event_id"
|
||||
]
|
||||
]
|
||||
|
||||
tableNameMapping =
|
||||
|
@ -16,11 +16,11 @@ import Data.FileEmbed (embedStringFile)
|
||||
|
||||
-- | The current catalog schema version. We store this in a file
|
||||
-- because we want to append the current verson to the catalog_versions file
|
||||
-- when tagging a new release, in @tag-release.sh@.
|
||||
-- when tagging a new release, in @tag-release.sh@.
|
||||
latestCatalogVersion :: Integer
|
||||
latestCatalogVersion =
|
||||
latestCatalogVersion =
|
||||
$(do let s = $(embedStringFile "src-rsr/catalog_version.txt")
|
||||
TH.lift (read s :: Integer))
|
||||
TH.lift (read s :: Integer))
|
||||
|
||||
latestCatalogVersionString :: T.Text
|
||||
latestCatalogVersionString = T.pack $ show latestCatalogVersion
|
||||
|
@ -148,36 +148,34 @@ recordTimingMetric reqDimensions RequestTimings{..} = liftIO $ do
|
||||
-- | The final shape of this part of our metrics data JSON. This should allow
|
||||
-- reasonably efficient querying using GIN indexes and JSONB containment
|
||||
-- operations (which treat arrays as sets).
|
||||
data ServiceTimingMetrics
|
||||
data ServiceTimingMetrics
|
||||
= ServiceTimingMetrics
|
||||
{ collectionTag :: Int
|
||||
-- ^ This is set to a new unique value when the counters reset (e.g. because of a restart)
|
||||
, serviceTimingMetrics :: [ServiceTimingMetric]
|
||||
}
|
||||
}
|
||||
deriving (Show, Generic, Eq)
|
||||
data ServiceTimingMetric
|
||||
data ServiceTimingMetric
|
||||
= ServiceTimingMetric
|
||||
{ dimensions :: RequestDimensions
|
||||
, bucket :: RunningTimeBucket
|
||||
, metrics :: RequestTimingsCount
|
||||
, bucket :: RunningTimeBucket
|
||||
, metrics :: RequestTimingsCount
|
||||
}
|
||||
deriving (Show, Generic, Eq)
|
||||
|
||||
|
||||
$(A.deriveJSON (A.aesonDrop 5 A.snakeCase) ''RequestTimingsCount)
|
||||
$(A.deriveJSON (A.aesonDrop 5 A.snakeCase) ''RequestDimensions)
|
||||
$(A.deriveJSON (A.aesonDrop 5 A.snakeCase) ''RequestTimingsCount)
|
||||
$(A.deriveJSON (A.aesonDrop 5 A.snakeCase) ''RequestDimensions)
|
||||
|
||||
instance A.ToJSON ServiceTimingMetric
|
||||
instance A.FromJSON ServiceTimingMetric
|
||||
instance A.ToJSON ServiceTimingMetrics
|
||||
instance A.FromJSON ServiceTimingMetrics
|
||||
|
||||
dumpServiceTimingMetrics :: MonadIO m=> m ServiceTimingMetrics
|
||||
dumpServiceTimingMetrics :: MonadIO m => m ServiceTimingMetrics
|
||||
dumpServiceTimingMetrics = liftIO $ do
|
||||
cs <- readIORef requestCounters
|
||||
let serviceTimingMetrics = flip map (HM.toList cs) $
|
||||
\((dimensions, bucket), metrics)-> ServiceTimingMetric{..}
|
||||
collectionTag = round approxStartTime
|
||||
return ServiceTimingMetrics{..}
|
||||
|
||||
|
||||
|
@ -9,10 +9,9 @@ import Data.Aeson
|
||||
import Data.Aeson.Types
|
||||
import Data.Hashable
|
||||
import Hasura.Prelude
|
||||
import Language.Haskell.TH.Syntax (Lift)
|
||||
import Network.URI
|
||||
|
||||
import qualified Data.Text as T
|
||||
import qualified Data.Text as T
|
||||
|
||||
instance {-# INCOHERENT #-} FromJSON URI where
|
||||
parseJSON (String uri) = do
|
||||
@ -26,7 +25,5 @@ instance {-# INCOHERENT #-} ToJSON URI where
|
||||
instance {-# INCOHERENT #-} ToJSONKey URI where
|
||||
toJSONKey = toJSONKeyText (T.pack . show)
|
||||
|
||||
instance Lift URI
|
||||
|
||||
instance Hashable URI where
|
||||
hashWithSalt i = hashWithSalt i . (T.pack . show)
|
||||
|
@ -9,7 +9,8 @@ select
|
||||
'allowlist_collections', allowlist.item,
|
||||
'computed_fields', computed_field.items,
|
||||
'custom_types', custom_types.item,
|
||||
'actions', actions.items
|
||||
'actions', actions.items,
|
||||
'cron_triggers', cron_triggers.items
|
||||
)
|
||||
from
|
||||
(
|
||||
@ -214,4 +215,23 @@ from
|
||||
hdb_catalog.hdb_action_permission hap
|
||||
where hap.action_name = ha.action_name
|
||||
) p on 'true'
|
||||
) as actions
|
||||
) as actions,
|
||||
(
|
||||
select
|
||||
coalesce(
|
||||
json_agg(
|
||||
json_build_object(
|
||||
'name', name,
|
||||
'webhook_conf', webhook_conf :: json,
|
||||
'cron_schedule', cron_schedule,
|
||||
'payload', payload :: json,
|
||||
'retry_conf', retry_conf :: json,
|
||||
'header_conf', header_conf :: json,
|
||||
'comment', comment
|
||||
)
|
||||
),
|
||||
'[]'
|
||||
) as items
|
||||
from
|
||||
hdb_catalog.hdb_cron_triggers
|
||||
) as cron_triggers
|
||||
|
@ -1 +1 @@
|
||||
34
|
||||
35
|
||||
|
@ -719,3 +719,89 @@ CREATE VIEW hdb_catalog.hdb_role AS
|
||||
SELECT role_name FROM hdb_catalog.hdb_action_permission
|
||||
) q
|
||||
);
|
||||
|
||||
CREATE TABLE hdb_catalog.hdb_cron_triggers
|
||||
(
|
||||
name TEXT PRIMARY KEY,
|
||||
webhook_conf JSON NOT NULL,
|
||||
cron_schedule TEXT NOT NULL,
|
||||
payload JSON,
|
||||
retry_conf JSON,
|
||||
header_conf JSON,
|
||||
include_in_metadata BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
comment TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE hdb_catalog.hdb_cron_events
|
||||
(
|
||||
id TEXT DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
trigger_name TEXT NOT NULL,
|
||||
scheduled_time TIMESTAMPTZ NOT NULL,
|
||||
status TEXT NOT NULL DEFAULT 'scheduled',
|
||||
tries INTEGER NOT NULL DEFAULT 0,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
next_retry_at TIMESTAMPTZ,
|
||||
|
||||
FOREIGN KEY (trigger_name) REFERENCES hdb_catalog.hdb_cron_triggers(name)
|
||||
ON UPDATE CASCADE ON DELETE CASCADE,
|
||||
CONSTRAINT valid_status CHECK (status IN ('scheduled','locked','delivered','error','dead'))
|
||||
);
|
||||
|
||||
CREATE INDEX hdb_cron_event_status ON hdb_catalog.hdb_cron_events (status);
|
||||
|
||||
CREATE TABLE hdb_catalog.hdb_cron_event_invocation_logs
|
||||
(
|
||||
id TEXT DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
event_id TEXT,
|
||||
status INTEGER,
|
||||
request JSON,
|
||||
response JSON,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
FOREIGN KEY (event_id) REFERENCES hdb_catalog.hdb_cron_events (id)
|
||||
ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE VIEW hdb_catalog.hdb_cron_events_stats AS
|
||||
SELECT ct.name,
|
||||
COALESCE(ce.upcoming_events_count,0) as upcoming_events_count,
|
||||
COALESCE(ce.max_scheduled_time, now()) as max_scheduled_time
|
||||
FROM hdb_catalog.hdb_cron_triggers ct
|
||||
LEFT JOIN
|
||||
( SELECT trigger_name, count(*) as upcoming_events_count, max(scheduled_time) as max_scheduled_time
|
||||
FROM hdb_catalog.hdb_cron_events
|
||||
WHERE tries = 0 and status = 'scheduled'
|
||||
GROUP BY trigger_name
|
||||
) ce
|
||||
ON ct.name = ce.trigger_name;
|
||||
|
||||
CREATE TABLE hdb_catalog.hdb_scheduled_events
|
||||
(
|
||||
id TEXT DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
webhook_conf JSON NOT NULL,
|
||||
scheduled_time TIMESTAMPTZ NOT NULL,
|
||||
retry_conf JSON,
|
||||
payload JSON,
|
||||
header_conf JSON,
|
||||
status TEXT NOT NULL DEFAULT 'scheduled',
|
||||
tries INTEGER NOT NULL DEFAULT 0,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
next_retry_at TIMESTAMPTZ,
|
||||
comment TEXT,
|
||||
CONSTRAINT valid_status CHECK (status IN ('scheduled','locked','delivered','error','dead'))
|
||||
);
|
||||
|
||||
CREATE INDEX hdb_scheduled_event_status ON hdb_catalog.hdb_scheduled_events (status);
|
||||
|
||||
CREATE TABLE hdb_catalog.hdb_scheduled_event_invocation_logs
|
||||
(
|
||||
id TEXT DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
event_id TEXT,
|
||||
status INTEGER,
|
||||
request JSON,
|
||||
response JSON,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
FOREIGN KEY (event_id) REFERENCES hdb_catalog.hdb_scheduled_events (id)
|
||||
ON DELETE CASCADE ON UPDATE CASCADE
|
||||
);
|
||||
|
@ -2,4 +2,4 @@ DO language plpgsql $$
|
||||
BEGIN
|
||||
RAISE NOTICE 'Nothing to do';
|
||||
END
|
||||
$$;
|
||||
$$;
|
||||
|
86
server/src-rsr/migrations/34_to_35.sql
Normal file
86
server/src-rsr/migrations/34_to_35.sql
Normal file
@ -0,0 +1,86 @@
|
||||
CREATE TABLE hdb_catalog.hdb_cron_triggers
|
||||
(
|
||||
name TEXT PRIMARY KEY,
|
||||
webhook_conf JSON NOT NULL,
|
||||
cron_schedule TEXT NOT NULL,
|
||||
payload JSON,
|
||||
retry_conf JSON,
|
||||
header_conf JSON,
|
||||
include_in_metadata BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
comment TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE hdb_catalog.hdb_cron_events
|
||||
(
|
||||
id TEXT DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
trigger_name TEXT NOT NULL,
|
||||
scheduled_time TIMESTAMPTZ NOT NULL,
|
||||
additional_payload JSON,
|
||||
status TEXT NOT NULL DEFAULT 'scheduled',
|
||||
tries INTEGER NOT NULL DEFAULT 0,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
next_retry_at TIMESTAMPTZ,
|
||||
|
||||
FOREIGN KEY (trigger_name) REFERENCES hdb_catalog.hdb_cron_triggers(name)
|
||||
ON UPDATE CASCADE ON DELETE CASCADE,
|
||||
CONSTRAINT valid_status CHECK (status IN ('scheduled','locked','delivered','error','dead'))
|
||||
);
|
||||
|
||||
CREATE INDEX hdb_cron_event_status ON hdb_catalog.hdb_cron_events (status);
|
||||
|
||||
CREATE TABLE hdb_catalog.hdb_cron_event_invocation_logs
|
||||
(
|
||||
id TEXT DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
event_id TEXT,
|
||||
status INTEGER,
|
||||
request JSON,
|
||||
response JSON,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
FOREIGN KEY (event_id) REFERENCES hdb_catalog.hdb_cron_events (id)
|
||||
ON UPDATE CASCADE ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE VIEW hdb_catalog.hdb_cron_events_stats AS
|
||||
SELECT ct.name,
|
||||
COALESCE(ce.upcoming_events_count,0) as upcoming_events_count,
|
||||
COALESCE(ce.max_scheduled_time, now()) as max_scheduled_time
|
||||
FROM hdb_catalog.hdb_cron_triggers ct
|
||||
LEFT JOIN
|
||||
( SELECT trigger_name, count(*) as upcoming_events_count, max(scheduled_time) as max_scheduled_time
|
||||
FROM hdb_catalog.hdb_cron_events
|
||||
WHERE tries = 0 AND status = 'scheduled'
|
||||
GROUP BY trigger_name
|
||||
) ce
|
||||
ON ct.name = ce.trigger_name;
|
||||
|
||||
CREATE TABLE hdb_catalog.hdb_scheduled_events
|
||||
(
|
||||
id TEXT DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
webhook_conf JSON NOT NULL,
|
||||
scheduled_time TIMESTAMPTZ NOT NULL,
|
||||
retry_conf JSON,
|
||||
payload JSON,
|
||||
header_conf JSON,
|
||||
status TEXT NOT NULL DEFAULT 'scheduled',
|
||||
tries INTEGER NOT NULL DEFAULT 0,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
next_retry_at TIMESTAMPTZ,
|
||||
comment TEXT,
|
||||
CONSTRAINT valid_status CHECK (status IN ('scheduled','locked','delivered','error','dead'))
|
||||
);
|
||||
|
||||
CREATE INDEX hdb_scheduled_event_status ON hdb_catalog.hdb_scheduled_events (status);
|
||||
|
||||
CREATE TABLE hdb_catalog.hdb_scheduled_event_invocation_logs
|
||||
(
|
||||
id TEXT DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
event_id TEXT,
|
||||
status INTEGER,
|
||||
request JSON,
|
||||
response JSON,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
|
||||
FOREIGN KEY (event_id) REFERENCES hdb_catalog.hdb_scheduled_events (id)
|
||||
ON DELETE CASCADE ON UPDATE CASCADE
|
||||
);
|
18
server/src-rsr/migrations/35_to_34.sql
Normal file
18
server/src-rsr/migrations/35_to_34.sql
Normal file
@ -0,0 +1,18 @@
|
||||
DROP TABLE hdb_catalog.hdb_scheduled_event_invocation_logs;
|
||||
DROP TABLE hdb_catalog.hdb_scheduled_events;
|
||||
DROP VIEW hdb_catalog.hdb_cron_events_stats;
|
||||
DROP TABLE hdb_catalog.hdb_cron_event_invocation_logs;
|
||||
DROP TABLE hdb_catalog.hdb_cron_events;
|
||||
DROP TABLE hdb_catalog.hdb_cron_triggers;
|
||||
|
||||
DELETE FROM hdb_catalog.hdb_relationship
|
||||
where table_schema = 'hdb_catalog' and
|
||||
table_name in
|
||||
('hdb_scheduled_event_invocation_logs','hdb_scheduled_events','hdb_cron_event_invocation_logs','hdb_cron_events'
|
||||
,'hdb_cron_triggers');
|
||||
|
||||
DELETE FROM hdb_catalog.hdb_table
|
||||
where table_schema = 'hdb_catalog' and
|
||||
table_name in
|
||||
('hdb_scheduled_event_invocation_logs','hdb_scheduled_events','hdb_cron_event_invocation_logs','hdb_cron_events'
|
||||
,'hdb_cron_triggers');
|
@ -1,11 +1,11 @@
|
||||
module Data.TimeSpec (spec) where
|
||||
-- | Time-related properties we care about.
|
||||
|
||||
import Prelude
|
||||
import Data.Time.Clock.Units
|
||||
import Data.Time
|
||||
import Data.Aeson
|
||||
import Test.Hspec
|
||||
import Data.Aeson
|
||||
import Data.Time
|
||||
import Data.Time.Clock.Units
|
||||
import Prelude
|
||||
import Test.Hspec
|
||||
|
||||
spec :: Spec
|
||||
spec = do
|
||||
@ -31,9 +31,9 @@ timeUnitsSpec =
|
||||
toJSON (1 :: Seconds) `shouldBe` Number 1
|
||||
decode "1.0" `shouldBe` Just (1 :: Seconds)
|
||||
|
||||
it "converts with fromUnits" $ do
|
||||
fromUnits (2 :: Minutes) `shouldBe` (120 :: NominalDiffTime)
|
||||
fromUnits (60 :: Seconds) `shouldBe` (1 :: Minutes)
|
||||
it "converts with convertDuration" $ do
|
||||
convertDuration (2 :: Minutes) `shouldBe` (120 :: NominalDiffTime)
|
||||
convertDuration (60 :: Seconds) `shouldBe` (1 :: Minutes)
|
||||
|
||||
diffTimeSpec :: Spec
|
||||
diffTimeSpec =
|
||||
|
@ -270,12 +270,23 @@ def actions_fixture(hge_ctx):
|
||||
webhook_httpd.server_close()
|
||||
web_server.join()
|
||||
|
||||
@pytest.fixture(scope='class')
|
||||
def scheduled_triggers_evts_webhook(request):
|
||||
webhook_httpd = EvtsWebhookServer(server_address=('127.0.0.1', 5594))
|
||||
web_server = threading.Thread(target=webhook_httpd.serve_forever)
|
||||
web_server.start()
|
||||
yield webhook_httpd
|
||||
webhook_httpd.shutdown()
|
||||
webhook_httpd.server_close()
|
||||
web_server.join()
|
||||
|
||||
@pytest.fixture(scope='class')
|
||||
def gql_server(request, hge_ctx):
|
||||
server = HGECtxGQLServer(request.config.getoption('--pg-urls'), 5991)
|
||||
yield server
|
||||
server.teardown()
|
||||
|
||||
|
||||
@pytest.fixture(scope='class')
|
||||
def ws_client(request, hge_ctx):
|
||||
"""
|
||||
|
@ -412,6 +412,9 @@ class EvtsWebhookServer(ThreadedHTTPServer):
|
||||
sz = sz + 1
|
||||
return sz
|
||||
|
||||
def is_queue_empty(self):
|
||||
return self.resp_queue.empty
|
||||
|
||||
def teardown(self):
|
||||
self.evt_trggr_httpd.shutdown()
|
||||
self.evt_trggr_httpd.server_close()
|
||||
|
@ -3,8 +3,7 @@ url: /v1/query
|
||||
status: 400
|
||||
response:
|
||||
path: $.args
|
||||
error: |-
|
||||
Error in $.types[1].possibleTypes[0].name: expected Text, encountered Null
|
||||
error: 'Error in $.types[1].possibleTypes[0].name: parsing Text failed, expected String, but encountered Null'
|
||||
code: remote-schema-error
|
||||
query:
|
||||
type: add_remote_schema
|
||||
|
@ -3,7 +3,7 @@ url: /v1/query
|
||||
status: 400
|
||||
response:
|
||||
path: $
|
||||
error: expected Object, encountered String
|
||||
error: parsing Object failed, expected Object, but encountered String
|
||||
code: parse-failed
|
||||
query: |
|
||||
type: count
|
||||
|
@ -3,7 +3,7 @@ url: /v1/query
|
||||
status: 400
|
||||
response:
|
||||
path: $
|
||||
error: expected Object, encountered String
|
||||
error: parsing Object failed, expected Object, but encountered String
|
||||
code: parse-failed
|
||||
query: |
|
||||
type: count
|
||||
|
@ -3,7 +3,7 @@ url: /v1/query
|
||||
status: 400
|
||||
response:
|
||||
code: parse-failed
|
||||
error: 'expected Int, encountered String'
|
||||
error: parsing Int failed, expected Number, but encountered String
|
||||
path: $.limit
|
||||
query:
|
||||
type: select
|
||||
|
@ -3,7 +3,7 @@ url: /v1/query
|
||||
status: 400
|
||||
response:
|
||||
code: parse-failed
|
||||
error: expected Int, encountered String
|
||||
error: parsing Int failed, expected Number, but encountered String
|
||||
path: $.offset
|
||||
query:
|
||||
type: select
|
||||
|
@ -3,7 +3,7 @@ url: /v1/query
|
||||
status: 400
|
||||
response:
|
||||
path: $
|
||||
error: key "where" not present
|
||||
error: key "where" not found
|
||||
code: parse-failed
|
||||
query:
|
||||
type: update
|
||||
|
@ -5,6 +5,7 @@ attrs==19.3.0
|
||||
certifi==2019.9.11
|
||||
cffi==1.13.2
|
||||
chardet==3.0.4
|
||||
croniter==0.3.31
|
||||
cryptography==2.8
|
||||
execnet==1.7.1
|
||||
graphene==2.1.8
|
||||
|
185
server/tests-py/test_scheduled_triggers.py
Normal file
185
server/tests-py/test_scheduled_triggers.py
Normal file
@ -0,0 +1,185 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import pytest
|
||||
from datetime import datetime,timedelta
|
||||
from croniter import croniter
|
||||
from validate import validate_event_webhook,validate_event_headers
|
||||
from queue import Empty
|
||||
import time
|
||||
|
||||
# The create and delete tests should ideally go in setup and teardown YAML files,
|
||||
# We can't use that here because, the payload is dynamic i.e. in case of adhoc Scheduled Triggers
|
||||
# the value is the current timestamp and in case of cron Triggers, the cron schedule is
|
||||
# derived based on the current timestamp
|
||||
|
||||
def stringify_datetime(dt):
|
||||
return dt.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
|
||||
class TestScheduledEvent(object):
|
||||
|
||||
webhook_payload = {"foo":"baz"}
|
||||
|
||||
header_conf = [
|
||||
{
|
||||
"name":"header-key",
|
||||
"value":"header-value"
|
||||
}
|
||||
]
|
||||
|
||||
url = "/v1/query"
|
||||
|
||||
webhook_domain = "http://127.0.0.1:5594"
|
||||
|
||||
|
||||
def test_create_scheduled_event(self,hge_ctx):
|
||||
query = {
|
||||
"type":"create_scheduled_event",
|
||||
"args":{
|
||||
"webhook":'{{SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN}}/test',
|
||||
"schedule_at":stringify_datetime(datetime.utcnow()),
|
||||
"payload":self.webhook_payload,
|
||||
"headers":self.header_conf
|
||||
}
|
||||
}
|
||||
st, resp = hge_ctx.v1q(query)
|
||||
assert st == 200,resp
|
||||
|
||||
def test_create_scheduled_event_with_very_old_scheduled_time(self,hge_ctx):
|
||||
query = {
|
||||
"type":"create_scheduled_event",
|
||||
"args":{
|
||||
"webhook":"{{SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN}}/",
|
||||
"schedule_at": "2020-01-01T00:00:00Z",
|
||||
"payload":self.webhook_payload,
|
||||
"headers":self.header_conf
|
||||
}
|
||||
}
|
||||
st, resp = hge_ctx.v1q(query)
|
||||
assert st == 200,resp
|
||||
|
||||
def test_create_trigger_with_error_returning_webhook(self,hge_ctx):
|
||||
query = {
|
||||
"type":"create_scheduled_event",
|
||||
"args":{
|
||||
"webhook":self.webhook_domain + '/fail',
|
||||
"schedule_at": stringify_datetime(datetime.utcnow()),
|
||||
"payload":self.webhook_payload,
|
||||
"headers":self.header_conf,
|
||||
"retry_conf":{
|
||||
"num_retries":1,
|
||||
"retry_interval_seconds":1,
|
||||
"timeout_seconds":1,
|
||||
"tolerance_seconds": 21600
|
||||
}
|
||||
}
|
||||
}
|
||||
st, resp = hge_ctx.v1q(query)
|
||||
assert st == 200, resp
|
||||
|
||||
def test_check_fired_webhook_event(self,hge_ctx,scheduled_triggers_evts_webhook):
|
||||
event = scheduled_triggers_evts_webhook.get_event(65)
|
||||
validate_event_webhook(event['path'],'/test')
|
||||
validate_event_headers(event['headers'],{"header-key":"header-value"})
|
||||
assert event['body'] == self.webhook_payload
|
||||
assert scheduled_triggers_evts_webhook.is_queue_empty()
|
||||
|
||||
def test_check_events_statuses(self,hge_ctx):
|
||||
time.sleep(65) # need to sleep here for atleast a minute for the failed event to be retried
|
||||
query = {
|
||||
"type":"run_sql",
|
||||
"args":{
|
||||
"sql":"select status,tries from hdb_catalog.hdb_scheduled_events"
|
||||
}
|
||||
}
|
||||
st, resp = hge_ctx.v1q(query)
|
||||
assert st == 200, resp
|
||||
scheduled_event_statuses = dict(resp['result'])
|
||||
# 3 scheduled events have been created
|
||||
# one should be dead because the timestamp was past the tolerance limit
|
||||
# one should be delivered because all the parameters were reasonable
|
||||
# one should be error because the webhook returns an error state
|
||||
assert "dead" in scheduled_event_statuses
|
||||
assert "delivered" in scheduled_event_statuses
|
||||
assert int(scheduled_event_statuses['error']) == 2 # num_retries + 1
|
||||
|
||||
def test_teardown_scheduled_events(self,hge_ctx):
|
||||
query = {
|
||||
"type":"run_sql",
|
||||
"args": {
|
||||
"sql":"delete from hdb_catalog.hdb_scheduled_events"
|
||||
}
|
||||
}
|
||||
st, resp = hge_ctx.v1q(query)
|
||||
assert st == 200,resp
|
||||
|
||||
class TestCronTrigger(object):
|
||||
|
||||
cron_trigger_name = "cron_trigger"
|
||||
|
||||
def test_create_cron_schedule_triggers(self,hge_ctx):
|
||||
# setting the test to be after 30 mins, to make sure that
|
||||
# any of the events are not delivered.
|
||||
min_after_30_mins = (datetime.utcnow() + timedelta(minutes=30)).minute
|
||||
TestCronTrigger.cron_schedule = "{} * * * *".format(min_after_30_mins)
|
||||
|
||||
cron_st_api_query = {
|
||||
"type":"create_cron_trigger",
|
||||
"args":{
|
||||
"name":self.cron_trigger_name,
|
||||
"webhook":"{{SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN}}" + "/foo",
|
||||
"schedule":self.cron_schedule,
|
||||
"headers":[
|
||||
{
|
||||
"name":"foo",
|
||||
"value":"baz"
|
||||
}
|
||||
],
|
||||
"payload":{"foo":"baz"},
|
||||
"include_in_metadata":False
|
||||
}
|
||||
}
|
||||
cron_st_code,cron_st_resp = hge_ctx.v1q(cron_st_api_query)
|
||||
TestCronTrigger.init_time = datetime.utcnow()
|
||||
# the cron events will be generated based on the current time, they
|
||||
# will not be exactly the same though(the server now and now here)
|
||||
assert cron_st_code == 200,resp
|
||||
assert cron_st_resp['message'] == 'success'
|
||||
|
||||
def test_check_generated_cron_scheduled_events(self,hge_ctx):
|
||||
expected_schedule_timestamps = []
|
||||
iter = croniter(self.cron_schedule,self.init_time)
|
||||
for i in range(100):
|
||||
expected_schedule_timestamps.append(iter.next(datetime))
|
||||
# Get timestamps in UTC from the db to compare it with
|
||||
# the croniter generated timestamps
|
||||
sql = '''
|
||||
select timezone('utc',scheduled_time) as scheduled_time
|
||||
from hdb_catalog.hdb_cron_events where
|
||||
trigger_name = '{}' order by scheduled_time asc;'''
|
||||
q = {
|
||||
"type":"run_sql",
|
||||
"args":{
|
||||
"sql":sql.format(self.cron_trigger_name)
|
||||
}
|
||||
}
|
||||
st,resp = hge_ctx.v1q(q)
|
||||
assert st == 200,resp
|
||||
ts_resp = resp['result'][1:]
|
||||
assert len(ts_resp) == 100
|
||||
# 100 scheduled events are generated in a single batch when the
|
||||
# scheduled events need hydration
|
||||
actual_schedule_timestamps = []
|
||||
for ts in ts_resp:
|
||||
datetime_ts = datetime.strptime(ts[0],"%Y-%m-%d %H:%M:%S")
|
||||
actual_schedule_timestamps.append(datetime_ts)
|
||||
assert actual_schedule_timestamps == expected_schedule_timestamps
|
||||
|
||||
def test_delete_cron_scheduled_trigger(self,hge_ctx):
|
||||
q = {
|
||||
"type":"delete_cron_trigger",
|
||||
"args":{
|
||||
"name":self.cron_trigger_name
|
||||
}
|
||||
}
|
||||
st,resp = hge_ctx.v1q(q)
|
||||
assert st == 200,resp
|
Loading…
Reference in New Issue
Block a user