-
The fourth annual Hasura User Conference
+
The HasuraCon 2024 CFP is open!
Read more
diff --git a/docs/src/components/HasuraConBanner/styles.module.scss b/docs/src/components/HasuraConBanner/styles.module.scss
index 470dc51eb1e..d5eefc7fdb3 100644
--- a/docs/src/components/HasuraConBanner/styles.module.scss
+++ b/docs/src/components/HasuraConBanner/styles.module.scss
@@ -71,10 +71,13 @@
font-size: var(--ifm-small-font-size);
font-weight: var(--ifm-font-weight-semibold);
align-self: center;
+ display: grid;
img {
width: 97px;
}
-
+ svg {
+ width: 170px;
+ }
.hasuracon23-img {
min-width: 159px;
// margin-right: 42px;
@@ -216,7 +219,7 @@ html[data-theme='dark'] {
@media (min-width: 997px) and (max-width: 1380px) {
.hasura-con-banner {
grid-template-columns: 1fr;
- grid-gap: 20px;
+ grid-gap: 20px !important;
.hasura-con-register-button {
margin-top: 20px;
}
diff --git a/docs/static/img/docs-bot-profile-pic.webp b/docs/static/img/docs-bot-profile-pic.webp
new file mode 100644
index 00000000000..6dfda860470
Binary files /dev/null and b/docs/static/img/docs-bot-profile-pic.webp differ
diff --git a/docs/static/img/hasura-ai-profile-pic.png b/docs/static/img/hasura-ai-profile-pic.png
deleted file mode 100644
index 84cce70fb6e..00000000000
Binary files a/docs/static/img/hasura-ai-profile-pic.png and /dev/null differ
diff --git a/flake.lock b/flake.lock
index 20439309d37..f7b17440852 100644
--- a/flake.lock
+++ b/flake.lock
@@ -5,11 +5,11 @@
"systems": "systems"
},
"locked": {
- "lastModified": 1694529238,
- "narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
+ "lastModified": 1710146030,
+ "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
- "rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
+ "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
@@ -20,11 +20,11 @@
},
"nixpkgs": {
"locked": {
- "lastModified": 1699914561,
- "narHash": "sha256-b296O45c3Jgj8GEFg/NN7ZOJjBBCHr1o2iA4yoJ3OKE=",
+ "lastModified": 1710754590,
+ "narHash": "sha256-9LA94zYvr5a6NawEftuSdTP8HYMV0ZYdB5WG6S9Z7tI=",
"owner": "NixOS",
"repo": "nixpkgs",
- "rev": "2f8742189e9ef86961ab90a30c68eb844565578a",
+ "rev": "a089e2dc4cf2421ca29f2d5ced81badd5911fcdf",
"type": "github"
},
"original": {
diff --git a/frontend/libs/console/legacy-ce/src/lib/components/Services/Data/TableRelationships/autoRelations.js b/frontend/libs/console/legacy-ce/src/lib/components/Services/Data/TableRelationships/autoRelations.js
index fce62f82247..3166e7ab416 100644
--- a/frontend/libs/console/legacy-ce/src/lib/components/Services/Data/TableRelationships/autoRelations.js
+++ b/frontend/libs/console/legacy-ce/src/lib/components/Services/Data/TableRelationships/autoRelations.js
@@ -69,7 +69,7 @@ const isExistingArrRel = (currentArrRels, relCols, relTable) => {
currRCol = Object.values(arrRelDef.manual_configuration.column_mapping);
}
- if (currTable.name === relTable && sameRelCols(currRCol, relCols)) {
+ if (currTable?.name === relTable && sameRelCols(currRCol, relCols)) {
_isExistingArrRel = true;
break;
}
diff --git a/frontend/package.json b/frontend/package.json
index a2e6d0ffbb6..54c213fa640 100644
--- a/frontend/package.json
+++ b/frontend/package.json
@@ -85,7 +85,7 @@
"dom-parser": "0.1.6",
"form-urlencoded": "^6.1.0",
"format-graphql": "^1.4.0",
- "graphiql": "1.4.7",
+ "graphiql": "1.0.0-alpha.0",
"graphiql-code-exporter": "2.0.8",
"graphiql-explorer": "0.6.2",
"graphql": "14.5.8",
diff --git a/frontend/yarn.lock b/frontend/yarn.lock
index 3d4d5abc49f..b88d1453fe1 100644
--- a/frontend/yarn.lock
+++ b/frontend/yarn.lock
@@ -2647,6 +2647,18 @@ __metadata:
languageName: node
linkType: hard
+"@emotion/cache@npm:^10.0.27":
+ version: 10.0.29
+ resolution: "@emotion/cache@npm:10.0.29"
+ dependencies:
+ "@emotion/sheet": 0.9.4
+ "@emotion/stylis": 0.8.5
+ "@emotion/utils": 0.11.3
+ "@emotion/weak-memoize": 0.2.5
+ checksum: 78b37fb0c2e513c90143a927abef229e995b6738ef8a92ce17abe2ed409b38859ddda7c14d7f4854d6f4e450b6db50231532f53a7fec4903d7ae775b2ae3fd64
+ languageName: node
+ linkType: hard
+
"@emotion/cache@npm:^11.11.0, @emotion/cache@npm:^11.4.0":
version: 11.11.0
resolution: "@emotion/cache@npm:11.11.0"
@@ -2660,6 +2672,40 @@ __metadata:
languageName: node
linkType: hard
+"@emotion/core@npm:^10.0.22":
+ version: 10.3.1
+ resolution: "@emotion/core@npm:10.3.1"
+ dependencies:
+ "@babel/runtime": ^7.5.5
+ "@emotion/cache": ^10.0.27
+ "@emotion/css": ^10.0.27
+ "@emotion/serialize": ^0.11.15
+ "@emotion/sheet": 0.9.4
+ "@emotion/utils": 0.11.3
+ peerDependencies:
+ react: ">=16.3.0"
+ checksum: d2dad428e1b2cf0777badfb55e262d369273be9b2e6e9e7d61c953066c00811d544a6234db36b17ee07872ed092f4dd102bf6ffe2c76fc38d53eef3a60fddfd0
+ languageName: node
+ linkType: hard
+
+"@emotion/css@npm:^10.0.27":
+ version: 10.0.27
+ resolution: "@emotion/css@npm:10.0.27"
+ dependencies:
+ "@emotion/serialize": ^0.11.15
+ "@emotion/utils": 0.11.3
+ babel-plugin-emotion: ^10.0.27
+ checksum: 1420f5b514fc3a8500bcf90384b309b0d9acc9f687ec3a655166b55dc81d1661d6b6132ea6fe6730d0071c10da93bf9427937c22a90a18088af4ba5e11d59141
+ languageName: node
+ linkType: hard
+
+"@emotion/hash@npm:0.8.0":
+ version: 0.8.0
+ resolution: "@emotion/hash@npm:0.8.0"
+ checksum: 4b35d88a97e67275c1d990c96d3b0450451d089d1508619488fc0acb882cb1ac91e93246d471346ebd1b5402215941ef4162efe5b51534859b39d8b3a0e3ffaa
+ languageName: node
+ linkType: hard
+
"@emotion/hash@npm:^0.9.1":
version: 0.9.1
resolution: "@emotion/hash@npm:0.9.1"
@@ -2667,7 +2713,7 @@ __metadata:
languageName: node
linkType: hard
-"@emotion/is-prop-valid@npm:^0.8.3":
+"@emotion/is-prop-valid@npm:^0.8.1, @emotion/is-prop-valid@npm:^0.8.3":
version: 0.8.8
resolution: "@emotion/is-prop-valid@npm:0.8.8"
dependencies:
@@ -2720,6 +2766,19 @@ __metadata:
languageName: node
linkType: hard
+"@emotion/serialize@npm:^0.11.15, @emotion/serialize@npm:^0.11.16":
+ version: 0.11.16
+ resolution: "@emotion/serialize@npm:0.11.16"
+ dependencies:
+ "@emotion/hash": 0.8.0
+ "@emotion/memoize": 0.7.4
+ "@emotion/unitless": 0.7.5
+ "@emotion/utils": 0.11.3
+ csstype: ^2.5.7
+ checksum: 2949832fab9d803e6236f2af6aad021c09c6b6722ae910b06b4ec3bfb84d77cbecfe3eab9a7dcc269ac73e672ef4b696c7836825931670cb110731712e331438
+ languageName: node
+ linkType: hard
+
"@emotion/serialize@npm:^1.1.2":
version: 1.1.2
resolution: "@emotion/serialize@npm:1.1.2"
@@ -2733,6 +2792,13 @@ __metadata:
languageName: node
linkType: hard
+"@emotion/sheet@npm:0.9.4":
+ version: 0.9.4
+ resolution: "@emotion/sheet@npm:0.9.4"
+ checksum: 53bb833b4bb69ea2af04e1ecad164f78fb2614834d2820f584c909686a8e047c44e96a6e824798c5c558e6d95e10772454a9e5c473c5dbe0d198e50deb2815bc
+ languageName: node
+ linkType: hard
+
"@emotion/sheet@npm:^1.2.2":
version: 1.2.2
resolution: "@emotion/sheet@npm:1.2.2"
@@ -2760,14 +2826,14 @@ __metadata:
languageName: node
linkType: hard
-"@emotion/stylis@npm:^0.8.4":
+"@emotion/stylis@npm:0.8.5, @emotion/stylis@npm:^0.8.4":
version: 0.8.5
resolution: "@emotion/stylis@npm:0.8.5"
checksum: 67ff5958449b2374b329fb96e83cb9025775ffe1e79153b499537c6c8b2eb64b77f32d7b5d004d646973662356ceb646afd9269001b97c54439fceea3203ce65
languageName: node
linkType: hard
-"@emotion/unitless@npm:^0.7.4":
+"@emotion/unitless@npm:0.7.5, @emotion/unitless@npm:^0.7.4":
version: 0.7.5
resolution: "@emotion/unitless@npm:0.7.5"
checksum: f976e5345b53fae9414a7b2e7a949aa6b52f8bdbcc84458b1ddc0729e77ba1d1dfdff9960e0da60183877873d3a631fa24d9695dd714ed94bcd3ba5196586a6b
@@ -2790,6 +2856,13 @@ __metadata:
languageName: node
linkType: hard
+"@emotion/utils@npm:0.11.3":
+ version: 0.11.3
+ resolution: "@emotion/utils@npm:0.11.3"
+ checksum: 9c4204bda84f9acd153a9be9478a83f9baa74d5d7a4c21882681c4d1b86cd113b84540cb1f92e1c30313b5075f024da2658dbc553f5b00776ef9b6ec7991c0c9
+ languageName: node
+ linkType: hard
+
"@emotion/utils@npm:^1.2.1":
version: 1.2.1
resolution: "@emotion/utils@npm:1.2.1"
@@ -2797,6 +2870,13 @@ __metadata:
languageName: node
linkType: hard
+"@emotion/weak-memoize@npm:0.2.5":
+ version: 0.2.5
+ resolution: "@emotion/weak-memoize@npm:0.2.5"
+ checksum: 27d402b0c683b94658220b6d47840346ee582329ca2a15ec9c233492e0f1a27687ccb233b76eedc922f2e185e444cc89f7b97a81a1d3e5ae9f075bab08e965ea
+ languageName: node
+ linkType: hard
+
"@emotion/weak-memoize@npm:^0.3.1":
version: 0.3.1
resolution: "@emotion/weak-memoize@npm:0.3.1"
@@ -3114,19 +3194,6 @@ __metadata:
languageName: node
linkType: hard
-"@graphiql/toolkit@npm:^0.3.2":
- version: 0.3.2
- resolution: "@graphiql/toolkit@npm:0.3.2"
- dependencies:
- "@n1ru4l/push-pull-async-iterable-iterator": ^3.0.0
- graphql-ws: ^4.9.0
- meros: ^1.1.4
- peerDependencies:
- graphql: ">= v14.5.0 <= 15.6.1"
- checksum: 3d69ba8a75047d3d5eb4226d6366e3664ac5326afddd72690f230de4a9bbec173f96d648376c1b4472219b917c7e99844a34d54d683f0bc3b25a0f119b5a338e
- languageName: node
- linkType: hard
-
"@graphql-codegen/cli@npm:2.13.8":
version: 2.13.8
resolution: "@graphql-codegen/cli@npm:2.13.8"
@@ -5192,6 +5259,15 @@ __metadata:
languageName: node
linkType: hard
+"@mdx-js/react@npm:^1.5.2":
+ version: 1.6.22
+ resolution: "@mdx-js/react@npm:1.6.22"
+ peerDependencies:
+ react: ^16.13.1 || ^17.0.0
+ checksum: bc84bd514bc127f898819a0c6f1a6915d9541011bd8aefa1fcc1c9bea8939f31051409e546bdec92babfa5b56092a16d05ef6d318304ac029299df5181dc94c8
+ languageName: node
+ linkType: hard
+
"@mdx-js/react@npm:^2.1.5":
version: 2.3.0
resolution: "@mdx-js/react@npm:2.3.0"
@@ -5246,13 +5322,6 @@ __metadata:
languageName: node
linkType: hard
-"@n1ru4l/push-pull-async-iterable-iterator@npm:^3.0.0":
- version: 3.2.0
- resolution: "@n1ru4l/push-pull-async-iterable-iterator@npm:3.2.0"
- checksum: 2c7bdbc6c3d8f0aa05c2e3e80c4a856f766e6113a86198fd0df2448117f7cfa71ee2946f6aa7e745caec6ac04d19a5a61c6c80c6fdbf686d43984b3791f0a04d
- languageName: node
- linkType: hard
-
"@ndelangen/get-tarball@npm:^3.0.7":
version: 3.0.9
resolution: "@ndelangen/get-tarball@npm:3.0.9"
@@ -9407,7 +9476,7 @@ __metadata:
languageName: node
linkType: hard
-"@styled-system/css@npm:^5.1.5":
+"@styled-system/css@npm:^5.0.16, @styled-system/css@npm:^5.1.5":
version: 5.1.5
resolution: "@styled-system/css@npm:5.1.5"
checksum: 0d3579ae82f5f53412c22e675aec9f77fa17b52deddc03d680340d8187006f1698ef0577db30a3c57ee0204f83ec61bb8a01105c3f0d60ca5c925a70175b5358
@@ -13418,6 +13487,24 @@ __metadata:
languageName: node
linkType: hard
+"babel-plugin-emotion@npm:^10.0.27":
+ version: 10.2.2
+ resolution: "babel-plugin-emotion@npm:10.2.2"
+ dependencies:
+ "@babel/helper-module-imports": ^7.0.0
+ "@emotion/hash": 0.8.0
+ "@emotion/memoize": 0.7.4
+ "@emotion/serialize": ^0.11.16
+ babel-plugin-macros: ^2.0.0
+ babel-plugin-syntax-jsx: ^6.18.0
+ convert-source-map: ^1.5.0
+ escape-string-regexp: ^1.0.5
+ find-root: ^1.1.0
+ source-map: ^0.5.7
+ checksum: 763f38c67ffbe7d091691d68c74686ba478296cc24716699fb5b0feddce1b1b47878a20b0bbe2aa4dea17f41074ead4deae7935d2cf6823638766709812c5b40
+ languageName: node
+ linkType: hard
+
"babel-plugin-istanbul@npm:5.2.0":
version: 5.2.0
resolution: "babel-plugin-istanbul@npm:5.2.0"
@@ -13467,7 +13554,7 @@ __metadata:
languageName: node
linkType: hard
-"babel-plugin-macros@npm:^2.8.0":
+"babel-plugin-macros@npm:^2.0.0, babel-plugin-macros@npm:^2.8.0":
version: 2.8.0
resolution: "babel-plugin-macros@npm:2.8.0"
dependencies:
@@ -15248,7 +15335,7 @@ __metadata:
languageName: node
linkType: hard
-"codemirror@npm:^5.58.2":
+"codemirror@npm:^5.47.0":
version: 5.65.16
resolution: "codemirror@npm:5.65.16"
checksum: 1c5036bfffcce19b1ff91d8b158dcb45faba27047c4093f55ea7ad1165975179eb47c9ef604baa9c4f4ea6bf9817886c767f33e72fa9c62710404029be3c4744
@@ -16270,7 +16357,7 @@ __metadata:
languageName: node
linkType: hard
-"csstype@npm:^2.0.0, csstype@npm:^2.2.0, csstype@npm:^2.5.2, csstype@npm:^2.6.9":
+"csstype@npm:^2.0.0, csstype@npm:^2.2.0, csstype@npm:^2.5.2, csstype@npm:^2.5.7, csstype@npm:^2.6.9":
version: 2.6.21
resolution: "csstype@npm:2.6.21"
checksum: 2ce8bc832375146eccdf6115a1f8565a27015b74cce197c35103b4494955e9516b246140425ad24103864076aa3e1257ac9bab25a06c8d931dd87a6428c9dccf
@@ -16600,7 +16687,7 @@ __metadata:
languageName: node
linkType: hard
-"deepmerge@npm:^4.2.2":
+"deepmerge@npm:^4.0.0, deepmerge@npm:^4.2.2":
version: 4.3.1
resolution: "deepmerge@npm:4.3.1"
checksum: 2024c6a980a1b7128084170c4cf56b0fd58a63f2da1660dcfe977415f27b17dbe5888668b59d0b063753f3220719d5e400b7f113609489c90160bb9a5518d052
@@ -17240,13 +17327,6 @@ __metadata:
languageName: node
linkType: hard
-"dset@npm:^3.1.0":
- version: 3.1.3
- resolution: "dset@npm:3.1.3"
- checksum: 5db964a36c60c51aa3f7088bfe1dc5c0eedd9a6ef3b216935bb70ef4a7b8fc40fd2f9bb16b9a4692c9c9772cea60cfefb108d2d09fbd53c85ea8f6cd54502d6a
- languageName: node
- linkType: hard
-
"dset@npm:^3.1.2":
version: 3.1.2
resolution: "dset@npm:3.1.2"
@@ -17522,20 +17602,13 @@ __metadata:
languageName: node
linkType: hard
-"entities@npm:~2.0":
+"entities@npm:~2.0, entities@npm:~2.0.0":
version: 2.0.3
resolution: "entities@npm:2.0.3"
checksum: 5a7899fcc622e0d76afdeafe4c58a6b40ae3a8ee4772e5825a648c11a2ca324a9a02515386f512e466baac4aeb551f3d3b79eaece5cd98369b9f8601be336b1a
languageName: node
linkType: hard
-"entities@npm:~2.1.0":
- version: 2.1.0
- resolution: "entities@npm:2.1.0"
- checksum: a10a877e489586a3f6a691fe49bf3fc4e58f06c8e80522f08214a5150ba457e7017b447d4913a3fa041bda06ee4c92517baa4d8d75373eaa79369e9639225ffd
- languageName: node
- linkType: hard
-
"env-paths@npm:^2.2.0":
version: 2.2.1
resolution: "env-paths@npm:2.2.1"
@@ -17875,7 +17948,7 @@ __metadata:
languageName: node
linkType: hard
-"escape-html@npm:^1.0.3, escape-html@npm:~1.0.3":
+"escape-html@npm:~1.0.3":
version: 1.0.3
resolution: "escape-html@npm:1.0.3"
checksum: 6213ca9ae00d0ab8bccb6d8d4e0a98e76237b2410302cf7df70aaa6591d509a2a37ce8998008cbecae8fc8ffaadf3fb0229535e6a145f3ce0b211d060decbb24
@@ -19717,7 +19790,7 @@ __metadata:
form-urlencoded: ^6.1.0
format-graphql: ^1.4.0
glob: ^9.3.1
- graphiql: 1.4.7
+ graphiql: 1.0.0-alpha.0
graphiql-code-exporter: 2.0.8
graphiql-explorer: 0.6.2
graphql: 14.5.8
@@ -20600,24 +20673,25 @@ __metadata:
languageName: node
linkType: hard
-"graphiql@npm:1.4.7":
- version: 1.4.7
- resolution: "graphiql@npm:1.4.7"
+"graphiql@npm:1.0.0-alpha.0":
+ version: 1.0.0-alpha.0
+ resolution: "graphiql@npm:1.0.0-alpha.0"
dependencies:
- "@graphiql/toolkit": ^0.3.2
- codemirror: ^5.58.2
- codemirror-graphql: ^1.0.3
+ "@emotion/core": ^10.0.22
+ "@mdx-js/react": ^1.5.2
+ codemirror: ^5.47.0
+ codemirror-graphql: ^0.12.0-alpha.0
copy-to-clipboard: ^3.2.0
- dset: ^3.1.0
entities: ^2.0.0
- escape-html: ^1.0.3
- graphql-language-service: ^3.1.6
- markdown-it: ^12.2.0
+ markdown-it: ^10.0.0
+ regenerator-runtime: ^0.13.3
+ theme-ui: ^0.2.52
peerDependencies:
- graphql: ">= v14.5.0 <= 15.5.0"
- react: ^16.8.0 || ^17.0.0 || ^18.0.0
- react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0
- checksum: b62790da23a54209c469f628c1d87bdc7b975e1857de77a6c34e0e69348704d81f32c020b29d8ae56a035075bed49cf3c59bbacdda31d7a9b888cf17676b4e7a
+ graphql: ^0.12.0 || ^0.13.0 || ^14.0.0
+ prop-types: ">=15.5.0"
+ react: ^16.8.0
+ react-dom: ^16.8.0
+ checksum: fbd3787cdecdc9c7dbdec2ae1f767bd17d7c743d4e9a23f15dd8b0e7911330f24a0d8f1d9c1039579e1afb7a5c3fc896aa061370a26aa273db4bb5b96fd81a74
languageName: node
linkType: hard
@@ -20710,7 +20784,7 @@ __metadata:
languageName: node
linkType: hard
-"graphql-language-service-parser@npm:^1.10.3, graphql-language-service-parser@npm:^1.5.3-alpha.0":
+"graphql-language-service-parser@npm:^1.5.3-alpha.0":
version: 1.10.4
resolution: "graphql-language-service-parser@npm:1.10.4"
dependencies:
@@ -20721,7 +20795,7 @@ __metadata:
languageName: node
linkType: hard
-"graphql-language-service-types@npm:^1.6.0-alpha.0, graphql-language-service-types@npm:^1.8.6, graphql-language-service-types@npm:^1.8.7":
+"graphql-language-service-types@npm:^1.6.0-alpha.0, graphql-language-service-types@npm:^1.8.7":
version: 1.8.7
resolution: "graphql-language-service-types@npm:1.8.7"
dependencies:
@@ -20733,7 +20807,7 @@ __metadata:
languageName: node
linkType: hard
-"graphql-language-service-utils@npm:^2.4.0-alpha.0, graphql-language-service-utils@npm:^2.6.3":
+"graphql-language-service-utils@npm:^2.4.0-alpha.0":
version: 2.7.1
resolution: "graphql-language-service-utils@npm:2.7.1"
dependencies:
@@ -20746,22 +20820,6 @@ __metadata:
languageName: node
linkType: hard
-"graphql-language-service@npm:^3.1.6":
- version: 3.2.5
- resolution: "graphql-language-service@npm:3.2.5"
- dependencies:
- graphql-language-service-interface: ^2.9.5
- graphql-language-service-parser: ^1.10.3
- graphql-language-service-types: ^1.8.6
- graphql-language-service-utils: ^2.6.3
- peerDependencies:
- graphql: ^15.5.0 || ^16.0.0
- bin:
- graphql: dist/temp-bin.js
- checksum: bf42d5db27d12fba4a0ba7fba81ef9601e00076ad7e2ac1dd8713d98f67004529b63ecac7099767f85a7c2577c17d518aebd9de3cbb5dc316a8074aaa37be4bc
- languageName: node
- linkType: hard
-
"graphql-mqtt-subscriptions@npm:^1.2.0":
version: 1.2.0
resolution: "graphql-mqtt-subscriptions@npm:1.2.0"
@@ -20894,15 +20952,6 @@ __metadata:
languageName: node
linkType: hard
-"graphql-ws@npm:^4.9.0":
- version: 4.9.0
- resolution: "graphql-ws@npm:4.9.0"
- peerDependencies:
- graphql: ">=0.11 <=15"
- checksum: f74f5d42843798136202bed9766d2ac6ce614950d31a69d5b935b4f41255d3ace8329b659658fe88a45a4dad43c0d668361b826889d0191859839856084c1eb9
- languageName: node
- linkType: hard
-
"graphql@npm:0.13.1 - 16, graphql@npm:^15.0.0 || ^16.0.0":
version: 16.6.0
resolution: "graphql@npm:16.6.0"
@@ -24923,12 +24972,12 @@ __metadata:
languageName: node
linkType: hard
-"linkify-it@npm:^3.0.1":
- version: 3.0.3
- resolution: "linkify-it@npm:3.0.3"
+"linkify-it@npm:^2.0.0":
+ version: 2.2.0
+ resolution: "linkify-it@npm:2.2.0"
dependencies:
uc.micro: ^1.0.1
- checksum: 31367a4bb70c5bbc9703246236b504b0a8e049bcd4e0de4291fa50f0ebdebf235b5eb54db6493cb0b1319357c6eeafc4324c9f4aa34b0b943d9f2e11a1268fbc
+ checksum: d198871d0b3f3cfdb745dae564bfd6743474f20cd0ef1057e6ca29451834749e7f3da52b59b4de44e98f31a1e5c71bdad160490d4ae54de251cbcde57e4d7837
languageName: node
linkType: hard
@@ -25556,18 +25605,18 @@ __metadata:
languageName: node
linkType: hard
-"markdown-it@npm:^12.2.0":
- version: 12.3.2
- resolution: "markdown-it@npm:12.3.2"
+"markdown-it@npm:^10.0.0":
+ version: 10.0.0
+ resolution: "markdown-it@npm:10.0.0"
dependencies:
- argparse: ^2.0.1
- entities: ~2.1.0
- linkify-it: ^3.0.1
+ argparse: ^1.0.7
+ entities: ~2.0.0
+ linkify-it: ^2.0.0
mdurl: ^1.0.1
uc.micro: ^1.0.5
bin:
markdown-it: bin/markdown-it.js
- checksum: 890555711c1c00fa03b936ca2b213001a3b9b37dea140d8445ae4130ce16628392aad24b12e2a0a9935336ca5951f2957a38f4e5309a2e38eab44e25ff32a41e
+ checksum: 69f5ee640cbebb451b80d3cce308fff7230767e05c0f8c206a1e413775b7a6e5a08e91e9f3ec59f9b5c5a45493f9ce7ac089379cffb60c9d3e6677ed9d535086
languageName: node
linkType: hard
@@ -25872,7 +25921,7 @@ __metadata:
languageName: node
linkType: hard
-"meros@npm:^1.1.4, meros@npm:^1.2.1":
+"meros@npm:^1.2.1":
version: 1.3.0
resolution: "meros@npm:1.3.0"
peerDependencies:
@@ -34463,6 +34512,21 @@ __metadata:
languageName: node
linkType: hard
+"theme-ui@npm:^0.2.52":
+ version: 0.2.52
+ resolution: "theme-ui@npm:0.2.52"
+ dependencies:
+ "@emotion/is-prop-valid": ^0.8.1
+ "@styled-system/css": ^5.0.16
+ deepmerge: ^4.0.0
+ peerDependencies:
+ "@emotion/core": ^10.0.0
+ "@mdx-js/react": ^1.0.0
+ react: ^16.8.0
+ checksum: f00c61c2a7cf247b4b94ea0f7e64a0fc97ba78eeab1a472a3e4755fefa6fc412e7b56fee0d567f266837a02628899a7582fd1f261147a8566df8ac015de4a0bd
+ languageName: node
+ linkType: hard
+
"throttleit@npm:^1.0.0":
version: 1.0.0
resolution: "throttleit@npm:1.0.0"
diff --git a/install-manifests/azure-container-with-pg/azuredeploy.json b/install-manifests/azure-container-with-pg/azuredeploy.json
index 8c7c2d80299..9512822ec1c 100644
--- a/install-manifests/azure-container-with-pg/azuredeploy.json
+++ b/install-manifests/azure-container-with-pg/azuredeploy.json
@@ -98,7 +98,7 @@
"firewallRuleName": "allow-all-azure-firewall-rule",
"containerGroupName": "[concat(parameters('name'), '-container-group')]",
"containerName": "hasura-graphql-engine",
- "containerImage": "hasura/graphql-engine:v2.37.0"
+ "containerImage": "hasura/graphql-engine:v2.38.0"
},
"resources": [
{
diff --git a/install-manifests/azure-container/azuredeploy.json b/install-manifests/azure-container/azuredeploy.json
index 270d35936af..c9ea8dcb862 100644
--- a/install-manifests/azure-container/azuredeploy.json
+++ b/install-manifests/azure-container/azuredeploy.json
@@ -55,7 +55,7 @@
"dbName": "[parameters('postgresDatabaseName')]",
"containerGroupName": "[concat(parameters('name'), '-container-group')]",
"containerName": "hasura-graphql-engine",
- "containerImage": "hasura/graphql-engine:v2.37.0"
+ "containerImage": "hasura/graphql-engine:v2.38.0"
},
"resources": [
{
diff --git a/install-manifests/docker-compose-cockroach/docker-compose.yaml b/install-manifests/docker-compose-cockroach/docker-compose.yaml
index a8e2465b83e..2f2021519a1 100644
--- a/install-manifests/docker-compose-cockroach/docker-compose.yaml
+++ b/install-manifests/docker-compose-cockroach/docker-compose.yaml
@@ -27,7 +27,7 @@ services:
- "${PWD}/cockroach-data:/cockroach/cockroach-data"
graphql-engine:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
ports:
- "8080:8080"
depends_on:
diff --git a/install-manifests/docker-compose-https/docker-compose.yaml b/install-manifests/docker-compose-https/docker-compose.yaml
index 042a131da0e..5486a02a16e 100644
--- a/install-manifests/docker-compose-https/docker-compose.yaml
+++ b/install-manifests/docker-compose-https/docker-compose.yaml
@@ -8,7 +8,7 @@ services:
environment:
POSTGRES_PASSWORD: postgrespassword
graphql-engine:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
depends_on:
- "postgres"
restart: always
diff --git a/install-manifests/docker-compose-ms-sql-server/docker-compose.yaml b/install-manifests/docker-compose-ms-sql-server/docker-compose.yaml
index 906c8d859ee..ced8f0bf31f 100644
--- a/install-manifests/docker-compose-ms-sql-server/docker-compose.yaml
+++ b/install-manifests/docker-compose-ms-sql-server/docker-compose.yaml
@@ -15,7 +15,7 @@ services:
volumes:
- mssql_data:/var/opt/mssql
graphql-engine:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
ports:
- "8080:8080"
depends_on:
diff --git a/install-manifests/docker-compose-pgadmin/docker-compose.yaml b/install-manifests/docker-compose-pgadmin/docker-compose.yaml
index 854aca0dfda..d53a49cce8d 100644
--- a/install-manifests/docker-compose-pgadmin/docker-compose.yaml
+++ b/install-manifests/docker-compose-pgadmin/docker-compose.yaml
@@ -19,7 +19,7 @@ services:
PGADMIN_DEFAULT_EMAIL: pgadmin@example.com
PGADMIN_DEFAULT_PASSWORD: admin
graphql-engine:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
ports:
- "8080:8080"
depends_on:
diff --git a/install-manifests/docker-compose-postgis/docker-compose.yaml b/install-manifests/docker-compose-postgis/docker-compose.yaml
index f679bfda7e0..288b922c3dd 100644
--- a/install-manifests/docker-compose-postgis/docker-compose.yaml
+++ b/install-manifests/docker-compose-postgis/docker-compose.yaml
@@ -8,7 +8,7 @@ services:
environment:
POSTGRES_PASSWORD: postgrespassword
graphql-engine:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
ports:
- "8080:8080"
depends_on:
diff --git a/install-manifests/docker-compose-yugabyte/docker-compose.yaml b/install-manifests/docker-compose-yugabyte/docker-compose.yaml
index d372c241842..56c12229184 100644
--- a/install-manifests/docker-compose-yugabyte/docker-compose.yaml
+++ b/install-manifests/docker-compose-yugabyte/docker-compose.yaml
@@ -23,7 +23,7 @@ services:
- yugabyte-data:/var/lib/postgresql/data
graphql-engine:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
ports:
- "8080:8080"
depends_on:
diff --git a/install-manifests/docker-compose/docker-compose.yaml b/install-manifests/docker-compose/docker-compose.yaml
index 65229d42619..02a8ed474d1 100644
--- a/install-manifests/docker-compose/docker-compose.yaml
+++ b/install-manifests/docker-compose/docker-compose.yaml
@@ -8,7 +8,7 @@ services:
environment:
POSTGRES_PASSWORD: postgrespassword
graphql-engine:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
ports:
- "8080:8080"
restart: always
@@ -31,7 +31,7 @@ services:
data-connector-agent:
condition: service_healthy
data-connector-agent:
- image: hasura/graphql-data-connector:v2.37.0
+ image: hasura/graphql-data-connector:v2.38.0
restart: always
ports:
- 8081:8081
diff --git a/install-manifests/docker-run/docker-run.sh b/install-manifests/docker-run/docker-run.sh
index 4532c929e74..0b9b537ba9a 100755
--- a/install-manifests/docker-run/docker-run.sh
+++ b/install-manifests/docker-run/docker-run.sh
@@ -3,4 +3,4 @@ docker run -d -p 8080:8080 \
-e HASURA_GRAPHQL_DATABASE_URL=postgres://username:password@hostname:port/dbname \
-e HASURA_GRAPHQL_ENABLE_CONSOLE=true \
-e HASURA_GRAPHQL_DEV_MODE=true \
- hasura/graphql-engine:v2.37.0
+ hasura/graphql-engine:v2.38.0
diff --git a/install-manifests/enterprise/athena/docker-compose.yaml b/install-manifests/enterprise/athena/docker-compose.yaml
index 8f066976710..4306e36024b 100644
--- a/install-manifests/enterprise/athena/docker-compose.yaml
+++ b/install-manifests/enterprise/athena/docker-compose.yaml
@@ -13,7 +13,7 @@ services:
environment:
POSTGRES_PASSWORD: postgrespassword
hasura:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
restart: always
ports:
- 8080:8080
@@ -48,7 +48,7 @@ services:
data-connector-agent:
condition: service_healthy
data-connector-agent:
- image: hasura/graphql-data-connector:v2.37.0
+ image: hasura/graphql-data-connector:v2.38.0
restart: always
ports:
- 8081:8081
diff --git a/install-manifests/enterprise/aws-ecs/hasura-fargate-task.json b/install-manifests/enterprise/aws-ecs/hasura-fargate-task.json
index ee3a43e84c9..1bca65f5ed2 100644
--- a/install-manifests/enterprise/aws-ecs/hasura-fargate-task.json
+++ b/install-manifests/enterprise/aws-ecs/hasura-fargate-task.json
@@ -4,7 +4,7 @@
"containerDefinitions": [
{
"name": "hasura",
- "image": "hasura/graphql-engine:v2.37.0",
+ "image": "hasura/graphql-engine:v2.38.0",
"portMappings": [
{
"hostPort": 8080,
diff --git a/install-manifests/enterprise/clickhouse/docker-compose.yaml b/install-manifests/enterprise/clickhouse/docker-compose.yaml
index 976988ada5c..5b2f8a0b31f 100644
--- a/install-manifests/enterprise/clickhouse/docker-compose.yaml
+++ b/install-manifests/enterprise/clickhouse/docker-compose.yaml
@@ -13,7 +13,7 @@ services:
environment:
POSTGRES_PASSWORD: postgrespassword
hasura:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
restart: always
ports:
- 8080:8080
@@ -48,7 +48,7 @@ services:
data-connector-agent:
condition: service_healthy
data-connector-agent:
- image: hasura/clickhouse-data-connector:v2.37.0
+ image: hasura/clickhouse-data-connector:v2.38.0
restart: always
ports:
- 8080:8081
diff --git a/install-manifests/enterprise/docker-compose/docker-compose.yaml b/install-manifests/enterprise/docker-compose/docker-compose.yaml
index 2d0f7515a86..ce78ce17996 100644
--- a/install-manifests/enterprise/docker-compose/docker-compose.yaml
+++ b/install-manifests/enterprise/docker-compose/docker-compose.yaml
@@ -15,7 +15,7 @@ services:
environment:
POSTGRES_PASSWORD: postgrespassword
graphql-engine:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
ports:
- "8080:8080"
restart: always
@@ -47,7 +47,7 @@ services:
data-connector-agent:
condition: service_healthy
data-connector-agent:
- image: hasura/graphql-data-connector:v2.37.0
+ image: hasura/graphql-data-connector:v2.38.0
restart: always
ports:
- 8081:8081
diff --git a/install-manifests/enterprise/kubernetes/deployment.yaml b/install-manifests/enterprise/kubernetes/deployment.yaml
index 9fe5ff06591..a92db79afe5 100644
--- a/install-manifests/enterprise/kubernetes/deployment.yaml
+++ b/install-manifests/enterprise/kubernetes/deployment.yaml
@@ -18,7 +18,7 @@ spec:
fsGroup: 1001
runAsUser: 1001
containers:
- - image: hasura/graphql-engine:v2.37.0
+ - image: hasura/graphql-engine:v2.38.0
imagePullPolicy: IfNotPresent
name: hasura
readinessProbe:
diff --git a/install-manifests/enterprise/mariadb/docker-compose.yaml b/install-manifests/enterprise/mariadb/docker-compose.yaml
index f50c1de5491..0dcd62dfefe 100644
--- a/install-manifests/enterprise/mariadb/docker-compose.yaml
+++ b/install-manifests/enterprise/mariadb/docker-compose.yaml
@@ -13,7 +13,7 @@ services:
environment:
POSTGRES_PASSWORD: postgrespassword
hasura:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
restart: always
ports:
- 8080:8080
@@ -48,7 +48,7 @@ services:
data-connector-agent:
condition: service_healthy
data-connector-agent:
- image: hasura/graphql-data-connector:v2.37.0
+ image: hasura/graphql-data-connector:v2.38.0
restart: always
ports:
- 8081:8081
diff --git a/install-manifests/enterprise/mongodb/docker-compose.yaml b/install-manifests/enterprise/mongodb/docker-compose.yaml
index f9e08c81881..bcbcf9a0e65 100644
--- a/install-manifests/enterprise/mongodb/docker-compose.yaml
+++ b/install-manifests/enterprise/mongodb/docker-compose.yaml
@@ -30,7 +30,7 @@ services:
MONGO_INITDB_ROOT_USERNAME: mongouser
MONGO_INITDB_ROOT_PASSWORD: mongopassword
hasura:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
restart: always
ports:
- 8080:8080
@@ -60,7 +60,7 @@ services:
postgres:
condition: service_healthy
mongo-data-connector:
- image: hasura/mongo-data-connector:v2.37.0
+ image: hasura/mongo-data-connector:v2.38.0
ports:
- 3000:3000
volumes:
diff --git a/install-manifests/enterprise/mysql/docker-compose.yaml b/install-manifests/enterprise/mysql/docker-compose.yaml
index 9c533177092..0ca88393ae1 100644
--- a/install-manifests/enterprise/mysql/docker-compose.yaml
+++ b/install-manifests/enterprise/mysql/docker-compose.yaml
@@ -13,7 +13,7 @@ services:
environment:
POSTGRES_PASSWORD: postgrespassword
hasura:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
restart: always
ports:
- 8080:8080
@@ -48,7 +48,7 @@ services:
data-connector-agent:
condition: service_healthy
data-connector-agent:
- image: hasura/graphql-data-connector:v2.37.0
+ image: hasura/graphql-data-connector:v2.38.0
restart: always
ports:
- 8081:8081
diff --git a/install-manifests/enterprise/oracle/docker-compose.yaml b/install-manifests/enterprise/oracle/docker-compose.yaml
index 816a16a759f..e5ece3b12b2 100644
--- a/install-manifests/enterprise/oracle/docker-compose.yaml
+++ b/install-manifests/enterprise/oracle/docker-compose.yaml
@@ -13,7 +13,7 @@ services:
environment:
POSTGRES_PASSWORD: postgrespassword
hasura:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
restart: always
ports:
- 8080:8080
@@ -48,7 +48,7 @@ services:
data-connector-agent:
condition: service_healthy
data-connector-agent:
- image: hasura/graphql-data-connector:v2.37.0
+ image: hasura/graphql-data-connector:v2.38.0
restart: always
ports:
- 8081:8081
diff --git a/install-manifests/enterprise/redshift/docker-compose.yaml b/install-manifests/enterprise/redshift/docker-compose.yaml
index aefdd290dea..12bbe36530d 100644
--- a/install-manifests/enterprise/redshift/docker-compose.yaml
+++ b/install-manifests/enterprise/redshift/docker-compose.yaml
@@ -13,7 +13,7 @@ services:
environment:
POSTGRES_PASSWORD: postgrespassword
hasura:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
restart: always
ports:
- 8080:8080
@@ -48,7 +48,7 @@ services:
data-connector-agent:
condition: service_healthy
data-connector-agent:
- image: hasura/graphql-data-connector:v2.37.0
+ image: hasura/graphql-data-connector:v2.38.0
restart: always
ports:
- 8081:8081
diff --git a/install-manifests/enterprise/snowflake/docker-compose.yaml b/install-manifests/enterprise/snowflake/docker-compose.yaml
index c815abaa855..2fd52915135 100644
--- a/install-manifests/enterprise/snowflake/docker-compose.yaml
+++ b/install-manifests/enterprise/snowflake/docker-compose.yaml
@@ -13,7 +13,7 @@ services:
environment:
POSTGRES_PASSWORD: postgrespassword
hasura:
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
restart: always
ports:
- 8080:8080
@@ -48,7 +48,7 @@ services:
data-connector-agent:
condition: service_healthy
data-connector-agent:
- image: hasura/graphql-data-connector:v2.37.0
+ image: hasura/graphql-data-connector:v2.38.0
restart: always
ports:
- 8081:8081
diff --git a/install-manifests/google-cloud-k8s-sql/deployment.yaml b/install-manifests/google-cloud-k8s-sql/deployment.yaml
index 9caeaee0710..f7f09264a37 100644
--- a/install-manifests/google-cloud-k8s-sql/deployment.yaml
+++ b/install-manifests/google-cloud-k8s-sql/deployment.yaml
@@ -16,7 +16,7 @@ spec:
spec:
containers:
- name: graphql-engine
- image: hasura/graphql-engine:v2.37.0
+ image: hasura/graphql-engine:v2.38.0
ports:
- containerPort: 8080
readinessProbe:
diff --git a/install-manifests/kubernetes/deployment.yaml b/install-manifests/kubernetes/deployment.yaml
index 922e5950d09..ef3d4f7da7e 100644
--- a/install-manifests/kubernetes/deployment.yaml
+++ b/install-manifests/kubernetes/deployment.yaml
@@ -18,7 +18,7 @@ spec:
app: hasura
spec:
containers:
- - image: hasura/graphql-engine:v2.37.0
+ - image: hasura/graphql-engine:v2.38.0
imagePullPolicy: IfNotPresent
name: hasura
env:
diff --git a/nix/overlays/graphql-parser.nix b/nix/overlays/graphql-parser.nix
index abd1b6e27f9..d46f9188220 100644
--- a/nix/overlays/graphql-parser.nix
+++ b/nix/overlays/graphql-parser.nix
@@ -5,11 +5,7 @@ final: prev: {
overrides = prev.lib.composeExtensions
(old.overrides or (_: _: { }))
(hfinal: hprev: {
- graphql-parser = (final.haskell.packages.${prev.ghcName}.callCabal2nix "graphql-parser" ../../server/lib/graphql-parser-hs { }).overrideScope (
- final: prev: {
- hedgehog = final.hedgehog_1_2;
- }
- );
+ graphql-parser = (final.haskell.packages.${prev.ghcName}.callCabal2nix "graphql-parser" ../../server/lib/graphql-parser { });
});
});
};
diff --git a/nix/shell.nix b/nix/shell.nix
index e8d05bd3c0b..808a1f0972a 100644
--- a/nix/shell.nix
+++ b/nix/shell.nix
@@ -78,17 +78,12 @@ let
pkgs.jq
];
- consoleInputs = [
- pkgs.google-cloud-sdk
- pkgs."nodejs-${versions.nodejsVersion}_x"
- pkgs."nodejs-${versions.nodejsVersion}_x".pkgs.typescript-language-server
- ];
-
docsInputs = [
pkgs.yarn
];
integrationTestInputs = [
+ pkgs.nodejs
pkgs.python3
pkgs.pyright # Python type checker
];
@@ -101,7 +96,7 @@ let
hls
pkgs.haskell.packages.${pkgs.ghcName}.alex
- # pkgs.haskell.packages.${pkgs.ghcName}.apply-refact
+ pkgs.haskell.packages.${pkgs.ghcName}.apply-refact
(versions.ensureVersion pkgs.haskell.packages.${pkgs.ghcName}.cabal-install)
(pkgs.haskell.lib.dontCheck (pkgs.haskell.packages.${pkgs.ghcName}.ghcid))
pkgs.haskell.packages.${pkgs.ghcName}.happy
@@ -163,7 +158,7 @@ let
++ integrationTestInputs;
in
pkgs.mkShell ({
- buildInputs = baseInputs ++ consoleInputs ++ docsInputs ++ serverDeps ++ devInputs ++ ciInputs;
+ buildInputs = baseInputs ++ docsInputs ++ serverDeps ++ devInputs ++ ciInputs;
} // pkgs.lib.optionalAttrs pkgs.stdenv.isDarwin {
shellHook = ''
export DYLD_LIBRARY_PATH='${dynamicLibraryPath}'
diff --git a/nix/versions.nix b/nix/versions.nix
index 744a52513d3..e5aaa2c1524 100644
--- a/nix/versions.nix
+++ b/nix/versions.nix
@@ -11,6 +11,4 @@ in
else throw "Invalid version for package ${package.pname}: expected ${expected}, got ${package.version}";
ghcVersion = pkgs.lib.strings.fileContents ../.ghcversion;
-
- nodejsVersion = pkgs.lib.strings.fileContents ../.nvmrc;
}
diff --git a/packaging/graphql-engine-base/ubuntu.dockerfile b/packaging/graphql-engine-base/ubuntu.dockerfile
index c47dac4d7dd..dc699e4e987 100644
--- a/packaging/graphql-engine-base/ubuntu.dockerfile
+++ b/packaging/graphql-engine-base/ubuntu.dockerfile
@@ -1,7 +1,7 @@
-# DATE VERSION: 2024-01-23
+# DATE VERSION: 2024-03-13
# Modify the above date version (YYYY-MM-DD) if you want to rebuild the image
-FROM ubuntu:jammy-20240111
+FROM ubuntu:jammy-20240227
### NOTE! Shared libraries here need to be kept in sync with `server-builder.dockerfile`!
diff --git a/server/VERSIONS.json b/server/VERSIONS.json
index 67ceec2fec5..0b846511d90 100644
--- a/server/VERSIONS.json
+++ b/server/VERSIONS.json
@@ -1,5 +1,5 @@
{
- "cabal-install": "3.10.1.0",
+ "cabal-install": "3.10.2.1",
"ghc": "9.6.4",
"hlint": "3.6.1",
"ormolu": "0.7.2.0"
diff --git a/server/graphql-engine.cabal b/server/graphql-engine.cabal
index 53bdb8adf23..c5a5a8eab32 100644
--- a/server/graphql-engine.cabal
+++ b/server/graphql-engine.cabal
@@ -424,7 +424,6 @@ common lib-depends
-- logging related
, base64-bytestring >= 1.0
- , auto-update
-- regex related
, regex-tdfa >=1.3.1 && <1.4
@@ -662,6 +661,7 @@ library
-- Exposed for benchmark:
, Hasura.Cache.Bounded
, Hasura.CredentialCache
+ , Hasura.CachedTime
, Hasura.Logging
, Hasura.HTTP
, Hasura.PingSources
diff --git a/server/lib/pg-client/pg-client.cabal b/server/lib/pg-client/pg-client.cabal
index c750e51a0cf..45e2af0b9ef 100644
--- a/server/lib/pg-client/pg-client.cabal
+++ b/server/lib/pg-client/pg-client.cabal
@@ -54,6 +54,7 @@ library
Database.PG.Query.Pool
Database.PG.Query.PTI
Database.PG.Query.Transaction
+ Database.PG.Query.URL
build-depends:
, aeson
@@ -65,6 +66,9 @@ library
, ekg-prometheus
, hashable
, hashtables
+ -- for our HASURA_SECRETS_BLOCKING_FORCE_REFRESH_URL hook
+ , http-client
+ , http-types
, mmorph
, monad-control
, mtl
@@ -94,19 +98,20 @@ test-suite pg-client-tests
Interrupt
Timeout
Jsonb
+ URL
build-depends:
+ , aeson
, async
, base
, bytestring
, hspec
+ , mtl
, pg-client
+ , postgresql-libpq
, safe-exceptions
, time
, transformers
- , aeson
- , mtl
- , postgresql-libpq
benchmark pg-client-bench
import: common-all
@@ -123,5 +128,4 @@ benchmark pg-client-bench
, hasql-transaction
, pg-client
, tasty-bench
- , text
, transformers
diff --git a/server/lib/pg-client/src/Database/PG/Query/Connection.hs b/server/lib/pg-client/src/Database/PG/Query/Connection.hs
index ff727b73892..b6789d819cb 100644
--- a/server/lib/pg-client/src/Database/PG/Query/Connection.hs
+++ b/server/lib/pg-client/src/Database/PG/Query/Connection.hs
@@ -48,13 +48,14 @@ where
import Control.Concurrent.Interrupt (interruptOnAsyncException)
import Control.Exception.Safe (Exception, SomeException (..), catch, throwIO)
+import Control.Monad (unless)
import Control.Monad.Except (MonadError (throwError))
import Control.Monad.IO.Class (MonadIO (liftIO))
import Control.Monad.Trans.Class (lift)
import Control.Monad.Trans.Except (ExceptT, runExceptT, withExceptT)
import Control.Retry (RetryPolicyM)
import Control.Retry qualified as Retry
-import Data.Aeson (ToJSON (toJSON), Value (String), genericToJSON, object, (.=))
+import Data.Aeson (ToJSON (toJSON), Value (String), encode, genericToJSON, object, (.=))
import Data.Aeson.Casing (aesonDrop, snakeCase)
import Data.Aeson.TH (mkToJSON)
import Data.Bool (bool)
@@ -74,9 +75,13 @@ import Data.Text.Encoding (decodeUtf8, decodeUtf8With, encodeUtf8)
import Data.Text.Encoding.Error (lenientDecode)
import Data.Time (NominalDiffTime, UTCTime)
import Data.Word (Word16, Word32)
+import Database.PG.Query.URL (encodeURLPassword)
import Database.PostgreSQL.LibPQ qualified as PQ
import Database.PostgreSQL.Simple.Options qualified as Options
import GHC.Generics (Generic)
+import Network.HTTP.Client
+import Network.HTTP.Types.Status (statusCode)
+import System.Environment (lookupEnv)
import Prelude
{-# ANN module ("HLint: ignore Use tshow" :: String) #-}
@@ -118,7 +123,7 @@ readDynamicURIFile path = do
<> Text.pack path
<> ": "
<> Text.pack (show e)
- pure $ Text.strip uriDirty
+ pure $ encodeURLPassword $ Text.strip uriDirty
where
-- Text.readFile but explicit, ignoring locale:
readFileUtf8 = fmap decodeUtf8 . BS.readFile
@@ -209,6 +214,7 @@ readConnErr conn = do
pgRetrying ::
(MonadIO m) =>
Maybe String ->
+ -- | An action to perform on error
IO () ->
PGRetryPolicyM m ->
PGLogger ->
@@ -242,6 +248,36 @@ initPQConn ::
IO PQ.Connection
initPQConn ci logger = do
host <- extractHost (ciDetails ci)
+ -- if this is a dynamic connection, we'll signal to refresh the secret (if
+ -- configured) during each retry, ensuring we don't make too many connection
+ -- attempts with the wrong credentials and risk getting locked out
+ resetFn <- do
+ mbUrl <- lookupEnv "HASURA_SECRETS_BLOCKING_FORCE_REFRESH_URL"
+ case (mbUrl, ciDetails ci) of
+ (Just url, CDDynamicDatabaseURI path) -> do
+ manager <- newManager defaultManagerSettings
+
+ -- Create the request
+ let body = encode $ object ["filename" .= path]
+ initialRequest <- parseRequest url
+ let request =
+ initialRequest
+ { method = "POST",
+ requestBody = RequestBodyLBS body,
+ requestHeaders = [("Content-Type", "application/json")]
+ }
+
+ -- The action to perform on each retry. This must only return after
+ -- the secrets file has been refreshed.
+ return $ do
+ status <- statusCode . responseStatus <$> httpLbs request manager
+ unless (status >= 200 && status < 300) $
+ logger $
+ PLERetryMsg $
+ object
+ ["message" .= String "Forcing refresh of secret file at HASURA_SECRETS_BLOCKING_FORCE_REFRESH_URL seems to have failed. Retrying anyway."]
+ _ -> pure $ pure ()
+
-- Retry if postgres connection error occurs
pgRetrying host resetFn retryP logger $ do
-- Initialise the connection
@@ -252,7 +288,6 @@ initPQConn ci logger = do
let connOk = s == PQ.ConnectionOk
bool (whenConnNotOk conn) (whenConnOk conn) connOk
where
- resetFn = return ()
retryP = mkPGRetryPolicy $ ciRetries ci
whenConnNotOk conn = Left . PGConnErr <$> readConnErr conn
diff --git a/server/lib/pg-client/src/Database/PG/Query/Pool.hs b/server/lib/pg-client/src/Database/PG/Query/Pool.hs
index 7d28fb24035..ff11ad16dc0 100644
--- a/server/lib/pg-client/src/Database/PG/Query/Pool.hs
+++ b/server/lib/pg-client/src/Database/PG/Query/Pool.hs
@@ -15,6 +15,7 @@ module Database.PG.Query.Pool
PGPoolStats (..),
PGPoolMetrics (..),
getInUseConnections,
+ getMaxConnections,
defaultConnParams,
initPGPool,
resizePGPool,
@@ -97,6 +98,9 @@ data PGPoolMetrics = PGPoolMetrics
getInUseConnections :: PGPool -> IO Int
getInUseConnections = RP.getInUseResourceCount . _pool
+getMaxConnections :: PGPool -> IO Int
+getMaxConnections = RP.getMaxResources . _pool
+
data ConnParams = ConnParams
{ cpStripes :: !Int,
cpConns :: !Int,
diff --git a/server/lib/pg-client/src/Database/PG/Query/URL.hs b/server/lib/pg-client/src/Database/PG/Query/URL.hs
new file mode 100644
index 00000000000..97cdf347565
--- /dev/null
+++ b/server/lib/pg-client/src/Database/PG/Query/URL.hs
@@ -0,0 +1,32 @@
+{-# LANGUAGE DerivingStrategies #-}
+{-# LANGUAGE OverloadedStrings #-}
+
+module Database.PG.Query.URL
+ ( encodeURLPassword,
+ )
+where
+
+import Data.Text (Text)
+import Data.Text qualified as Text
+import Data.Text.Encoding (decodeUtf8, encodeUtf8)
+import Network.HTTP.Types.URI (urlEncode)
+import Prelude
+
+-- | It is possible and common for postgres url's to have passwords with special
+-- characters in them (ex AWS Secrets Manager passwords). Current URI parsing
+-- libraries fail at parsing postgres uri's with special characters. Also note
+-- that encoding the whole URI causes postgres to fail as well. This only
+-- encodes the password when given a url.
+encodeURLPassword :: Text -> Text
+encodeURLPassword url =
+ case Text.breakOnEnd "://" url of
+ (_, "") -> url
+ (scheme, urlWOScheme) -> case Text.breakOnEnd "@" urlWOScheme of
+ ("", _) -> url
+ (auth, rest) -> case Text.splitOn ":" $ Text.dropEnd 1 auth of
+ [user] -> scheme <> user <> "@" <> rest
+ (user : pass) -> scheme <> user <> ":" <> encode' pass <> "@" <> rest
+ _ -> url
+ where
+ encode' arg =
+ decodeUtf8 $ urlEncode True (encodeUtf8 $ Text.intercalate ":" arg)
diff --git a/server/lib/pg-client/test/Spec.hs b/server/lib/pg-client/test/Spec.hs
index 134c65de631..e491ad78f4a 100644
--- a/server/lib/pg-client/test/Spec.hs
+++ b/server/lib/pg-client/test/Spec.hs
@@ -24,6 +24,7 @@ import Jsonb (specJsonb)
import System.Environment qualified as Env
import Test.Hspec (describe, hspec, it, shouldBe, shouldReturn)
import Timeout (specTimeout)
+import URL (specURL)
import Prelude
-------------------------------------------------------------------------------
@@ -82,6 +83,7 @@ main = hspec $ do
specInterrupt
specTimeout
specJsonb
+ specURL
mkPool :: IO PGPool
mkPool = do
diff --git a/server/lib/pg-client/test/URL.hs b/server/lib/pg-client/test/URL.hs
new file mode 100644
index 00000000000..18c0c6d9364
--- /dev/null
+++ b/server/lib/pg-client/test/URL.hs
@@ -0,0 +1,58 @@
+{-# LANGUAGE DerivingStrategies #-}
+{-# LANGUAGE FlexibleInstances #-}
+{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE ScopedTypeVariables #-}
+{-# OPTIONS_GHC -Wno-unused-imports -Wno-orphans -Wno-name-shadowing #-}
+
+module URL (specURL) where
+
+import Database.PG.Query.URL
+import Test.Hspec
+import Prelude
+
+specURL :: Spec
+specURL = do
+ describe "Only the password from a postgres url is encoded if if exists" $ do
+ it "None Postgres connection urls succeed" $ do
+ let url = "jdbc:mysql://localhostok?user=root&password=pass&allowMultiQueries=true"
+ url `shouldBe` encodeURLPassword url
+
+ it "Postgres simple urls succeed" $ do
+ let url = "postgres://localhost"
+ url `shouldBe` encodeURLPassword url
+
+ it "Postgres urls with no username, password, or database succeed" $ do
+ let url = "postgres://localhost:5432"
+ url `shouldBe` encodeURLPassword url
+
+ it "Postgres urls with no username or password succeed" $ do
+ let url = "postgres://localhost:5432/chinook"
+ url `shouldBe` encodeURLPassword url
+
+ it "Postgres urls with no password succeed" $ do
+ let url = "postgres://user@localhost:5432/chinook"
+ url `shouldBe` encodeURLPassword url
+
+ it "Postgres urls with no password but a : succeed" $ do
+ let url = "postgres://user:@localhost:5432/chinook"
+ url `shouldBe` encodeURLPassword url
+
+ it "Postgres urls with no username succeed" $ do
+ let url = "postgres://:pass@localhost:5432/chinook"
+ url `shouldBe` encodeURLPassword url
+
+ it "Postgres urls with simple passwords succeed" $ do
+ let url = "postgres://user:pass@localhost:5432/chinook"
+ url `shouldBe` encodeURLPassword url
+
+ it "Postgres urls with special characters passwords succeed" $ do
+ let url = "postgres://user:a[:sdf($#)]@localhost:5432/chinook"
+ expected = "postgres://user:a%5B%3Asdf%28%24%23%29%5D@localhost:5432/chinook"
+
+ expected `shouldBe` encodeURLPassword url
+
+ it "Postgres urls with special characters with @ passwords succeed" $ do
+ let url = "postgres://user:a@[:sdf($@#@)]@localhost:5432/chinook"
+ expected = "postgres://user:a%40%5B%3Asdf%28%24%40%23%40%29%5D@localhost:5432/chinook"
+
+ expected `shouldBe` encodeURLPassword url
diff --git a/server/lib/resource-pool/Data/Pool.hs b/server/lib/resource-pool/Data/Pool.hs
index 861c44a3f0e..edcc3bc447d 100644
--- a/server/lib/resource-pool/Data/Pool.hs
+++ b/server/lib/resource-pool/Data/Pool.hs
@@ -34,6 +34,7 @@ module Data.Pool
createPool,
createPool',
resizePool,
+ getMaxResources,
tryTrimLocalPool,
tryTrimPool,
withResource,
@@ -231,6 +232,9 @@ resizePool Pool {..} maxResources' = do
"invalid maximum resource count " ++ show maxResources'
atomically $ writeTVar maxResources maxResources'
+getMaxResources :: Pool a -> IO Int
+getMaxResources Pool {..} = readTVarIO maxResources
+
-- | Attempt to reduce resource allocation below maximum by dropping some unused
-- resources
tryTrimLocalPool :: (a -> IO ()) -> TVar Int -> LocalPool a -> IO ()
diff --git a/server/src-lib/Hasura/App.hs b/server/src-lib/Hasura/App.hs
index 58b9f94e0de..c0f8c6fc705 100644
--- a/server/src-lib/Hasura/App.hs
+++ b/server/src-lib/Hasura/App.hs
@@ -713,10 +713,10 @@ instance HttpLog AppM where
buildExtraHttpLogMetadata _ _ = ()
- logHttpError logger loggingSettings userInfoM reqId waiReq req qErr headers _ _ =
+ logHttpError logger loggingSettings userInfoM reqId waiReq req qErr qTime cType headers _ _ =
unLoggerTracing logger
$ mkHttpLog
- $ mkHttpErrorLogContext userInfoM loggingSettings reqId waiReq req qErr Nothing Nothing headers
+ $ mkHttpErrorLogContext userInfoM loggingSettings reqId waiReq req qErr qTime cType headers
logHttpSuccess logger loggingSettings userInfoM reqId waiReq reqBody response compressedResponse qTime cType headers (CommonHttpLogMetadata rb batchQueryOpLogs, ()) _ =
unLoggerTracing logger
diff --git a/server/src-lib/Hasura/CachedTime.hs b/server/src-lib/Hasura/CachedTime.hs
new file mode 100644
index 00000000000..4300610f875
--- /dev/null
+++ b/server/src-lib/Hasura/CachedTime.hs
@@ -0,0 +1,45 @@
+-- safety for unsafePerformIO below
+{-# OPTIONS_GHC -fno-cse -fno-full-laziness #-}
+
+module Hasura.CachedTime (cachedRecentFormattedTimeAndZone) where
+
+import Control.Concurrent (forkIO, threadDelay)
+import Control.Exception (uninterruptibleMask_)
+import Data.ByteString.Char8 qualified as B8
+import Data.IORef
+import Data.Time.Clock qualified as Time
+import Data.Time.Format
+import Data.Time.LocalTime qualified as Time
+import Hasura.Prelude
+import System.IO.Unsafe
+
+-- | A fast timestamp source, updated every 1sec, at the whims of the RTS, calling
+-- 'Time.getCurrentTimeZone' and 'Time.getCurrentTime'
+--
+-- We also store an equivalent RFC7231 timestamp for use in the @Date@ HTTP
+-- header, avoiding 6% latency regression from computing it every time.
+-- We use this at call sites to try to avoid warp's code path that uses the
+-- auto-update library to do this same thing.
+--
+-- Formerly we used the auto-update library but observed bugs. See
+-- "Hasura.Logging" and #10662
+--
+-- NOTE: if we wanted to make this more resilient to this thread being
+-- descheduled for long periods, we could store monotonic timestamp here (fast)
+-- then logging threads could do the same and determine if the time is stale. I
+-- considered doing the same to also get more granular timestamps but it seems
+-- the addUTCTime makes this just as slow as getCurrentTime
+cachedRecentFormattedTimeAndZone :: IORef (Time.UTCTime, Time.TimeZone, B8.ByteString)
+{-# NOINLINE cachedRecentFormattedTimeAndZone #-}
+cachedRecentFormattedTimeAndZone = unsafePerformIO do
+ tRef <- getTimeAndZone >>= newIORef
+ void $ forkIO $ uninterruptibleMask_ $ forever do
+ threadDelay $ 1000 * 1000
+ getTimeAndZone >>= writeIORef tRef
+ pure tRef
+ where
+ getTimeAndZone = do
+ !tz <- Time.getCurrentTimeZone
+ !t <- Time.getCurrentTime
+ let !tRFC7231 = B8.pack $ formatTime defaultTimeLocale "%a, %d %b %Y %H:%M:%S GMT" t
+ pure (t, tz, tRFC7231)
diff --git a/server/src-lib/Hasura/GC.hs b/server/src-lib/Hasura/GC.hs
index aea58b8d55b..47de9e77bbf 100644
--- a/server/src-lib/Hasura/GC.hs
+++ b/server/src-lib/Hasura/GC.hs
@@ -75,7 +75,7 @@ ourIdleGC (Logger logger) idleInterval minGCInterval maxNoGCInterval =
else do
when (areOverdue && not areIdle)
$ logger
- $ UnstructuredLog LevelWarn
+ $ UnstructuredLog LevelInfo
$ "Overdue for a major GC: forcing one even though we don't appear to be idle"
performMajorGC
startTimer >>= go (gcs + 1) (major_gcs + 1) True
diff --git a/server/src-lib/Hasura/GraphQL/Execute/Action.hs b/server/src-lib/Hasura/GraphQL/Execute/Action.hs
index 81cd334378a..9121c3845cf 100644
--- a/server/src-lib/Hasura/GraphQL/Execute/Action.hs
+++ b/server/src-lib/Hasura/GraphQL/Execute/Action.hs
@@ -363,7 +363,7 @@ resolveAsyncActionQuery userInfo annAction responseErrorsConfig =
\response -> makeActionResponseNoRelations annFields outputType HashMap.empty False <$> decodeValue response
IR.AsyncId -> pure $ AO.String $ actionIdToText actionId
IR.AsyncCreatedAt -> pure $ AO.toOrdered $ J.toJSON _alrCreatedAt
- IR.AsyncErrors -> pure $ AO.toOrdered $ J.toJSON $ mkQErrFromErrorValue _alrErrors
+ IR.AsyncErrors -> pure $ AO.toOrdered $ J.toJSON $ mkQErrFromErrorValue <$> _alrErrors
pure $ encJFromOrderedValue $ AO.object resolvedFields
IR.ASISource sourceName sourceConfig ->
let jsonAggSelect = mkJsonAggSelect outputType
@@ -413,12 +413,12 @@ resolveAsyncActionQuery userInfo annAction responseErrorsConfig =
tablePermissions = RS.TablePerm annBoolExpTrue Nothing
in RS.AnnSelectG annotatedFields tableFromExp tablePermissions tableArguments stringifyNumerics Nothing
where
- mkQErrFromErrorValue :: Maybe J.Value -> QErr
+ mkQErrFromErrorValue :: J.Value -> QErr
mkQErrFromErrorValue actionLogResponseError =
- let internal = ExtraInternal <$> (actionLogResponseError >>= (^? key "internal"))
+ let internal = ExtraInternal <$> (actionLogResponseError ^? key "internal")
internal' = if shouldIncludeInternal (_uiRole userInfo) responseErrorsConfig then internal else Nothing
- errorMessageText = fromMaybe "internal: error in parsing the action log" $ actionLogResponseError >>= (^? key "error" . _String)
- codeMaybe = actionLogResponseError >>= (^? key "code" . _String)
+ errorMessageText = fromMaybe "internal: error in parsing the action log" $ actionLogResponseError ^? key "error" . _String
+ codeMaybe = actionLogResponseError ^? key "code" . _String
code = maybe Unexpected ActionWebhookCode codeMaybe
in QErr [] HTTP.status500 errorMessageText code internal'
IR.AnnActionAsyncQuery _ actionId outputType asyncFields definitionList stringifyNumerics _ actionSource = annAction
diff --git a/server/src-lib/Hasura/GraphQL/Execute/Subscription/Poll/LiveQuery.hs b/server/src-lib/Hasura/GraphQL/Execute/Subscription/Poll/LiveQuery.hs
index 18e11f8000b..9fa56532e44 100644
--- a/server/src-lib/Hasura/GraphQL/Execute/Subscription/Poll/LiveQuery.hs
+++ b/server/src-lib/Hasura/GraphQL/Execute/Subscription/Poll/LiveQuery.hs
@@ -39,7 +39,7 @@ import Hasura.RQL.Types.Common (SourceName)
import Hasura.RQL.Types.Roles (RoleName)
import Hasura.RQL.Types.Subscription (SubscriptionType (..))
import Hasura.Server.Logging (ModelInfo (..), ModelInfoLog (..))
-import Hasura.Server.Prometheus (PrometheusMetrics (..), SubscriptionMetrics (..), liveQuerySubscriptionLabel, recordSubcriptionMetric)
+import Hasura.Server.Prometheus (PrometheusMetrics (..), SubscriptionMetrics (..), liveQuerySubscriptionLabel, recordSubscriptionMetric)
import Hasura.Server.Types (GranularPrometheusMetricsState (..), ModelInfoLogState (..))
import Refined (unrefine)
import System.Metrics.Prometheus.Gauge qualified as Prometheus.Gauge
@@ -121,7 +121,7 @@ pollLiveQuery pollerId pollerResponseState lqOpts (sourceName, sourceConfig) rol
(queryExecutionTime, mxRes) <- runDBSubscription @b sourceConfig query (over (each . _2) C._csVariables cohorts) resolvedConnectionTemplate
let dbExecTimeMetric = submDBExecTotalTime $ pmSubscriptionMetrics $ prometheusMetrics
- recordSubcriptionMetric
+ recordSubscriptionMetric
granularPrometheusMetricsState
True
operationNamesMap
@@ -215,7 +215,7 @@ pollLiveQuery pollerId pollerResponseState lqOpts (sourceName, sourceConfig) rol
when (modelInfoLogStatus' == ModelInfoLogOn) $ do
for_ (modelInfoList) $ \(ModelInfoPart modelName modelType modelSourceName modelSourceType modelQueryType) -> do
L.unLogger logger $ ModelInfoLog L.LevelInfo $ ModelInfo modelName (toTxt modelType) (toTxt <$> modelSourceName) (toTxt <$> modelSourceType) (toTxt modelQueryType) False
- recordSubcriptionMetric
+ recordSubscriptionMetric
granularPrometheusMetricsState
True
operationNamesMap
diff --git a/server/src-lib/Hasura/GraphQL/Execute/Subscription/Poll/StreamingQuery.hs b/server/src-lib/Hasura/GraphQL/Execute/Subscription/Poll/StreamingQuery.hs
index 3aa397f38be..ca552f1bd7a 100644
--- a/server/src-lib/Hasura/GraphQL/Execute/Subscription/Poll/StreamingQuery.hs
+++ b/server/src-lib/Hasura/GraphQL/Execute/Subscription/Poll/StreamingQuery.hs
@@ -41,7 +41,7 @@ import Hasura.RQL.Types.Roles (RoleName)
import Hasura.RQL.Types.Subscription (SubscriptionType (..))
import Hasura.SQL.Value (TxtEncodedVal (..))
import Hasura.Server.Logging (ModelInfo (..), ModelInfoLog (..))
-import Hasura.Server.Prometheus (PrometheusMetrics (..), SubscriptionMetrics (..), recordSubcriptionMetric, streamingSubscriptionLabel)
+import Hasura.Server.Prometheus (PrometheusMetrics (..), SubscriptionMetrics (..), recordSubscriptionMetric, streamingSubscriptionLabel)
import Hasura.Server.Types (GranularPrometheusMetricsState (..), ModelInfoLogState (..))
import Language.GraphQL.Draft.Syntax qualified as G
import Refined (unrefine)
@@ -289,7 +289,7 @@ pollStreamingQuery pollerId pollerResponseState streamingQueryOpts (sourceName,
(over (each . _2) C._csVariables $ fmap (fmap fst) cohorts)
resolvedConnectionTemplate
let dbExecTimeMetric = submDBExecTotalTime $ pmSubscriptionMetrics $ prometheusMetrics
- recordSubcriptionMetric
+ recordSubscriptionMetric
granularPrometheusMetricsState
True
operationNames
@@ -470,7 +470,7 @@ pollStreamingQuery pollerId pollerResponseState streamingQueryOpts (sourceName,
unLogger logger $ ModelInfoLog LevelInfo $ ModelInfo modelName (toTxt modelType) (toTxt <$> modelSourceName) (toTxt <$> modelSourceType) (toTxt modelQueryType) False
postPollHook pollDetails
let totalTimeMetric = submTotalTime $ pmSubscriptionMetrics $ prometheusMetrics
- recordSubcriptionMetric
+ recordSubscriptionMetric
granularPrometheusMetricsState
True
operationNames
diff --git a/server/src-lib/Hasura/GraphQL/Execute/Subscription/State.hs b/server/src-lib/Hasura/GraphQL/Execute/Subscription/State.hs
index 667e2540101..3688397ef1a 100644
--- a/server/src-lib/Hasura/GraphQL/Execute/Subscription/State.hs
+++ b/server/src-lib/Hasura/GraphQL/Execute/Subscription/State.hs
@@ -56,7 +56,7 @@ import Hasura.RQL.Types.Common (SourceName)
import Hasura.SQL.AnyBackend qualified as AB
import Hasura.Server.Metrics (ServerMetrics (..))
import Hasura.Server.Prometheus
- ( DynamicSubscriptionLabel (..),
+ ( DynamicGraphqlOperationLabel (..),
PrometheusMetrics (..),
SubscriptionLabel (..),
SubscriptionMetrics (..),
@@ -258,7 +258,7 @@ addLiveQuery
liftIO $ Prometheus.Gauge.inc $ submActiveLiveQueryPollers $ pmSubscriptionMetrics $ prometheusMetrics
liftIO $ EKG.Gauge.inc $ smActiveSubscriptions serverMetrics
- let promMetricGranularLabel = SubscriptionLabel liveQuerySubscriptionLabel (Just $ DynamicSubscriptionLabel (Just parameterizedQueryHash) operationName)
+ let promMetricGranularLabel = SubscriptionLabel liveQuerySubscriptionLabel (Just $ DynamicGraphqlOperationLabel (Just parameterizedQueryHash) operationName)
promMetricLabel = SubscriptionLabel liveQuerySubscriptionLabel Nothing
let numSubscriptionMetric = submActiveSubscriptions $ pmSubscriptionMetrics $ prometheusMetrics
recordMetricWithLabel
@@ -390,7 +390,7 @@ addStreamSubscriptionQuery
EKG.Gauge.inc $ smActiveSubscriptions serverMetrics
EKG.Gauge.inc $ smActiveStreamingSubscriptions serverMetrics
- let promMetricGranularLabel = SubscriptionLabel streamingSubscriptionLabel (Just $ DynamicSubscriptionLabel (Just parameterizedQueryHash) operationName)
+ let promMetricGranularLabel = SubscriptionLabel streamingSubscriptionLabel (Just $ DynamicGraphqlOperationLabel (Just parameterizedQueryHash) operationName)
promMetricLabel = SubscriptionLabel streamingSubscriptionLabel Nothing
numSubscriptionMetric = submActiveSubscriptions $ pmSubscriptionMetrics $ prometheusMetrics
recordMetricWithLabel
@@ -470,7 +470,7 @@ removeLiveQuery logger serverMetrics prometheusMetrics lqState lqId@(SubscriberD
<*> TMap.null newOps
when cohortIsEmpty $ TMap.delete cohortId cohortMap
handlerIsEmpty <- TMap.null cohortMap
- let promMetricGranularLabel = SubscriptionLabel liveQuerySubscriptionLabel (Just $ DynamicSubscriptionLabel (Just parameterizedQueryHash) maybeOperationName)
+ let promMetricGranularLabel = SubscriptionLabel liveQuerySubscriptionLabel (Just $ DynamicGraphqlOperationLabel (Just parameterizedQueryHash) maybeOperationName)
promMetricLabel = SubscriptionLabel liveQuerySubscriptionLabel Nothing
-- when there is no need for handler i.e, this happens to be the last
-- operation, take the ref for the polling thread to cancel it
@@ -569,7 +569,7 @@ removeStreamingQuery logger serverMetrics prometheusMetrics subscriptionState (S
<*> TMap.null newOps
when cohortIsEmpty $ TMap.delete currentCohortId cohortMap
handlerIsEmpty <- TMap.null cohortMap
- let promMetricGranularLabel = SubscriptionLabel streamingSubscriptionLabel (Just $ DynamicSubscriptionLabel (Just parameterizedQueryHash) maybeOperationName)
+ let promMetricGranularLabel = SubscriptionLabel streamingSubscriptionLabel (Just $ DynamicGraphqlOperationLabel (Just parameterizedQueryHash) maybeOperationName)
promMetricLabel = SubscriptionLabel streamingSubscriptionLabel Nothing
-- when there is no need for handler i.e,
-- operation, take the ref for the polling thread to cancel it
diff --git a/server/src-lib/Hasura/GraphQL/Transport/HTTP.hs b/server/src-lib/Hasura/GraphQL/Transport/HTTP.hs
index 4d9073eaf4b..ceb56803fb5 100644
--- a/server/src-lib/Hasura/GraphQL/Transport/HTTP.hs
+++ b/server/src-lib/Hasura/GraphQL/Transport/HTTP.hs
@@ -88,8 +88,11 @@ import Hasura.Server.Limits
import Hasura.Server.Logging
import Hasura.Server.Logging qualified as L
import Hasura.Server.Prometheus
- ( GraphQLRequestMetrics (..),
+ ( GranularPrometheusMetricsState,
+ GraphQLRequestMetrics (..),
PrometheusMetrics (..),
+ ResponseStatus (..),
+ recordGraphqlOperationMetric,
)
import Hasura.Server.Telemetry.Counters qualified as Telem
import Hasura.Server.Types (HeaderPrecedence, ModelInfoLogState (..), MonadGetPolicies (..), ReadOnlyMode (..), RemoteSchemaResponsePriority (..), RequestId (..))
@@ -100,7 +103,7 @@ import Hasura.Tracing qualified as Tracing
import Language.GraphQL.Draft.Syntax qualified as G
import Network.HTTP.Types qualified as HTTP
import Network.Wai.Extended qualified as Wai
-import System.Metrics.Prometheus.Counter qualified as Prometheus.Counter
+import System.Metrics.Prometheus.CounterVector qualified as Prometheus.CounterVector
import System.Metrics.Prometheus.Histogram qualified as Prometheus.Histogram
-- | Encapsulates a function that stores a query response in the cache.
@@ -329,12 +332,13 @@ runGQ ::
ResponseInternalErrorsConfig ->
m (GQLQueryOperationSuccessLog, HttpResponse (Maybe GQResponse, EncJSON))
runGQ env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePriority headerPrecedence prometheusMetrics logger agentLicenseKey reqId userInfo ipAddress reqHeaders queryType reqUnparsed responseErrorsConfig = do
+ granularPrometheusMetricsState <- runGetPrometheusMetricsGranularity
getModelInfoLogStatus' <- runGetModelInfoLogStatus
modelInfoLogStatus <- liftIO getModelInfoLogStatus'
let gqlMetrics = pmGraphQLRequestMetrics prometheusMetrics
- (totalTime, (response, parameterizedQueryHash, gqlOpType, modelInfoListForLogging, queryCachedStatus)) <- withElapsedTime $ do
- (reqParsed, runLimits, queryParts) <- Tracing.newSpan "Parse GraphQL" $ observeGQLQueryError gqlMetrics Nothing $ do
+ (totalTime, (response, parameterizedQueryHash, gqlOpType, gqlOperationName, modelInfoListForLogging, queryCachedStatus)) <- withElapsedTime $ do
+ (reqParsed, runLimits, queryParts) <- Tracing.newSpan "Parse GraphQL" $ observeGQLQueryError granularPrometheusMetricsState gqlMetrics Nothing (_grOperationName reqUnparsed) Nothing $ do
-- 1. Run system authorization on the 'reqUnparsed :: GQLReqUnparsed' query.
reqParsed <-
E.checkGQLExecution userInfo (reqHeaders, ipAddress) enableAL sc reqUnparsed reqId
@@ -348,7 +352,8 @@ runGQ env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePriority header
return (reqParsed, runLimits, queryParts)
let gqlOpType = G._todType queryParts
- observeGQLQueryError gqlMetrics (Just gqlOpType) $ do
+ let gqlOperationName = getOpNameFromParsedReq reqParsed
+ observeGQLQueryError granularPrometheusMetricsState gqlMetrics (Just gqlOpType) gqlOperationName Nothing $ do
-- 3. Construct the remainder of the execution plan.
let maybeOperationName = _unOperationName <$> getOpNameFromParsedReq reqParsed
for_ maybeOperationName $ \nm ->
@@ -374,13 +379,13 @@ runGQ env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePriority header
-- 4. Execute the execution plan producing a 'AnnotatedResponse'.
(response, queryCachedStatus, modelInfoFromExecution) <- executePlan reqParsed runLimits execPlan
- return (response, parameterizedQueryHash, gqlOpType, ((modelInfoList <> (modelInfoFromExecution))), queryCachedStatus)
+ return (response, parameterizedQueryHash, gqlOpType, gqlOperationName, ((modelInfoList <> (modelInfoFromExecution))), queryCachedStatus)
-- 5. Record telemetry
recordTimings totalTime response
-- 6. Record Prometheus metrics (query successes)
- liftIO $ recordGQLQuerySuccess gqlMetrics totalTime gqlOpType
+ liftIO $ recordGQLQuerySuccess granularPrometheusMetricsState gqlMetrics totalTime gqlOperationName parameterizedQueryHash gqlOpType
-- 7. Return the response along with logging metadata.
let requestSize = LBS.length $ J.encode reqUnparsed
@@ -603,42 +608,45 @@ runGQ env sqlGenCtx sc enableAL readOnlyMode remoteSchemaResponsePriority header
( MonadIO n,
MonadError e n
) =>
+ IO GranularPrometheusMetricsState ->
GraphQLRequestMetrics ->
Maybe G.OperationType ->
+ Maybe OperationName ->
+ Maybe ParameterizedQueryHash ->
n a ->
n a
- observeGQLQueryError gqlMetrics mOpType action =
+ observeGQLQueryError granularPrometheusMetricsState gqlMetrics mOpType mOpName mQHash action =
catchError (fmap Right action) (pure . Left) >>= \case
Right result ->
pure result
Left err -> do
- case mOpType of
- Nothing ->
- liftIO $ Prometheus.Counter.inc (gqlRequestsUnknownFailure gqlMetrics)
- Just opType -> case opType of
- G.OperationTypeQuery ->
- liftIO $ Prometheus.Counter.inc (gqlRequestsQueryFailure gqlMetrics)
- G.OperationTypeMutation ->
- liftIO $ Prometheus.Counter.inc (gqlRequestsMutationFailure gqlMetrics)
- G.OperationTypeSubscription ->
- -- We do not collect metrics for subscriptions at the request level.
- pure ()
+ recordGraphqlOperationMetric
+ granularPrometheusMetricsState
+ mOpType
+ Failed
+ mOpName
+ mQHash
+ (Prometheus.CounterVector.inc $ gqlRequests gqlMetrics)
throwError err
-- Tally and record execution times for successful GraphQL requests.
recordGQLQuerySuccess ::
- GraphQLRequestMetrics -> DiffTime -> G.OperationType -> IO ()
- recordGQLQuerySuccess gqlMetrics totalTime = \case
- G.OperationTypeQuery -> liftIO $ do
- Prometheus.Counter.inc (gqlRequestsQuerySuccess gqlMetrics)
- Prometheus.Histogram.observe (gqlExecutionTimeSecondsQuery gqlMetrics) (realToFrac totalTime)
- G.OperationTypeMutation -> liftIO $ do
- Prometheus.Counter.inc (gqlRequestsMutationSuccess gqlMetrics)
- Prometheus.Histogram.observe (gqlExecutionTimeSecondsMutation gqlMetrics) (realToFrac totalTime)
- G.OperationTypeSubscription ->
- -- We do not collect metrics for subscriptions at the request level.
- -- Furthermore, we do not serve GraphQL subscriptions over HTTP.
- pure ()
+ IO GranularPrometheusMetricsState -> GraphQLRequestMetrics -> DiffTime -> Maybe OperationName -> ParameterizedQueryHash -> G.OperationType -> IO ()
+ recordGQLQuerySuccess granularPrometheusMetricsState gqlMetrics totalTime opName qHash opType = do
+ recordGraphqlOperationMetric
+ granularPrometheusMetricsState
+ (Just opType)
+ Success
+ opName
+ (Just qHash)
+ (Prometheus.CounterVector.inc $ gqlRequests gqlMetrics)
+ case opType of
+ G.OperationTypeQuery -> liftIO $ Prometheus.Histogram.observe (gqlExecutionTimeSecondsQuery gqlMetrics) (realToFrac totalTime)
+ G.OperationTypeMutation -> liftIO $ Prometheus.Histogram.observe (gqlExecutionTimeSecondsMutation gqlMetrics) (realToFrac totalTime)
+ G.OperationTypeSubscription ->
+ -- We do not collect metrics for subscriptions at the request level.
+ -- Furthermore, we do not serve GraphQL subscriptions over HTTP.
+ pure ()
coalescePostgresMutations ::
EB.ExecutionPlan ->
diff --git a/server/src-lib/Hasura/GraphQL/Transport/WebSocket.hs b/server/src-lib/Hasura/GraphQL/Transport/WebSocket.hs
index c68bd268e98..c5ac0300107 100644
--- a/server/src-lib/Hasura/GraphQL/Transport/WebSocket.hs
+++ b/server/src-lib/Hasura/GraphQL/Transport/WebSocket.hs
@@ -101,6 +101,8 @@ import Hasura.Server.Metrics (ServerMetrics (..))
import Hasura.Server.Prometheus
( GraphQLRequestMetrics (..),
PrometheusMetrics (..),
+ ResponseStatus (..),
+ recordGraphqlOperationMetric,
)
import Hasura.Server.Telemetry.Counters qualified as Telem
import Hasura.Server.Types (GranularPrometheusMetricsState (..), HeaderPrecedence, ModelInfoLogState (..), MonadGetPolicies (..), RemoteSchemaResponsePriority, RequestId, getRequestId)
@@ -115,7 +117,7 @@ import Network.HTTP.Types qualified as HTTP
import Network.WebSockets qualified as WS
import Refined (unrefine)
import StmContainers.Map qualified as STMMap
-import System.Metrics.Prometheus.Counter qualified as Prometheus.Counter
+import System.Metrics.Prometheus.CounterVector qualified as Prometheus.CounterVector
import System.Metrics.Prometheus.Histogram qualified as Prometheus.Histogram
-- | 'ES.SubscriberDetails' comes from 'Hasura.GraphQL.Execute.LiveQuery.State.addLiveQuery'. We use
@@ -451,6 +453,7 @@ onStart ::
onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables (StartMsg opId q) onMessageActions responseErrorsConfig headerPrecedence = catchAndIgnore $ do
modelInfoLogStatus' <- runGetModelInfoLogStatus
modelInfoLogStatus <- liftIO modelInfoLogStatus'
+ granularPrometheusMetricsState <- runGetPrometheusMetricsGranularity
timerTot <- startTimer
op <- liftIO $ STM.atomically $ STMMap.lookup opId opMap
@@ -458,7 +461,7 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
-- we process all operations on a websocket connection serially:
when (isJust op)
$ withComplete
- $ sendStartErr
+ $ sendStartErr granularPrometheusMetricsState (snd =<< op)
$ "an operation already exists with this id: "
<> unOperationId opId
@@ -467,10 +470,10 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
CSInitialised WsClientState {..} -> return (wscsUserInfo, wscsReqHeaders, wscsIpAddress)
CSInitError initErr -> do
let e = "cannot start as connection_init failed with: " <> initErr
- withComplete $ sendStartErr e
+ withComplete $ sendStartErr granularPrometheusMetricsState (_grOperationName q) e
CSNotInitialised _ _ -> do
let e = "start received before the connection is initialised"
- withComplete $ sendStartErr e
+ withComplete $ sendStartErr granularPrometheusMetricsState (_grOperationName q) e
(requestId, reqHdrs) <- liftIO $ getRequestId origReqHdrs
sc <- liftIO $ getSchemaCacheWithVersion appStateRef
@@ -488,9 +491,9 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
(reqParsed, queryParts) <- Tracing.newSpan "Parse GraphQL" $ do
reqParsedE <- lift $ E.checkGQLExecution userInfo (reqHdrs, ipAddress) enableAL sc q requestId
- reqParsed <- onLeft reqParsedE (withComplete . preExecErr requestId Nothing)
+ reqParsed <- onLeft reqParsedE (withComplete . preExecErr granularPrometheusMetricsState requestId Nothing (_grOperationName q) Nothing)
queryPartsE <- runExceptT $ getSingleOperation reqParsed
- queryParts <- onLeft queryPartsE (withComplete . preExecErr requestId Nothing)
+ queryParts <- onLeft queryPartsE (withComplete . preExecErr granularPrometheusMetricsState requestId Nothing (getOpNameFromParsedReq reqParsed) Nothing)
pure (reqParsed, queryParts)
let gqlOpType = G._todType queryParts
@@ -519,7 +522,7 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
responseErrorsConfig
headerPrecedence
- (parameterizedQueryHash, execPlan, modelInfoList) <- onLeft execPlanE (withComplete . preExecErr requestId (Just gqlOpType))
+ (parameterizedQueryHash, execPlan, modelInfoList) <- onLeft execPlanE (withComplete . preExecErr granularPrometheusMetricsState requestId (Just gqlOpType) opName Nothing)
case execPlan of
E.QueryExecutionPlan queryPlan asts dirMap -> do
@@ -535,7 +538,7 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
ResponseCached cachedResponseData -> do
logQueryLog logger $ QueryLog q Nothing requestId QueryLogKindCached
let reportedExecutionTime = 0
- liftIO $ recordGQLQuerySuccess reportedExecutionTime gqlOpType
+ liftIO $ recordGQLQuerySuccess granularPrometheusMetricsState reportedExecutionTime opName parameterizedQueryHash gqlOpType
modelInfoLogging modelInfoList True modelInfoLogStatus
sendSuccResp cachedResponseData opName parameterizedQueryHash $ ES.SubscriptionMetadata reportedExecutionTime
ResponseUncached storeResponseM -> do
@@ -582,7 +585,7 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
let (allResponses', allModelInfo) = unzip allResponses
pure $ (AnnotatedResponsePart 0 Telem.Local (encJFromList (map arpResponse allResponses')) [], concat allModelInfo)
in getResponse
- sendResultFromFragments Telem.Query timerTot requestId conclusion opName parameterizedQueryHash gqlOpType modelInfoList modelInfoLogStatus
+ sendResultFromFragments granularPrometheusMetricsState Telem.Query timerTot requestId conclusion opName parameterizedQueryHash gqlOpType modelInfoList modelInfoLogStatus
case (storeResponseM, conclusion) of
(Just ResponseCacher {..}, Right results) -> do
let (key, (compositeValue')) = unzip $ InsOrdHashMap.toList results
@@ -608,7 +611,7 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
$ doQErr
$ runPGMutationTransaction requestId q userInfo logger sourceConfig resolvedConnectionTemplate pgMutations
-- we do not construct result fragments since we have only one result
- handleResult requestId gqlOpType resp \(telemTimeIO_DT, results) -> do
+ handleResult granularPrometheusMetricsState requestId gqlOpType opName parameterizedQueryHash resp \(telemTimeIO_DT, results) -> do
let telemQueryType = Telem.Query
telemLocality = Telem.Local
telemTimeIO = convertDuration telemTimeIO_DT
@@ -618,7 +621,7 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
$ ES.SubscriptionMetadata telemTimeIO_DT
-- Telemetry. NOTE: don't time network IO:
Telem.recordTimingMetric Telem.RequestDimensions {..} Telem.RequestTimings {..}
- liftIO $ recordGQLQuerySuccess totalTime gqlOpType
+ liftIO $ recordGQLQuerySuccess granularPrometheusMetricsState totalTime opName parameterizedQueryHash gqlOpType
-- we are not in the transaction case; proceeding normally
Nothing -> do
@@ -666,7 +669,7 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
let (allResponses', allModelInfo) = unzip allResponses
pure $ (AnnotatedResponsePart 0 Telem.Local (encJFromList (map arpResponse allResponses')) [], concat allModelInfo)
in getResponse
- sendResultFromFragments Telem.Query timerTot requestId conclusion opName parameterizedQueryHash gqlOpType modelInfoList modelInfoLogStatus
+ sendResultFromFragments granularPrometheusMetricsState Telem.Query timerTot requestId conclusion opName parameterizedQueryHash gqlOpType modelInfoList modelInfoLogStatus
liftIO $ sendCompleted (Just requestId) (Just parameterizedQueryHash)
E.SubscriptionExecutionPlan (subExec, modifier) -> do
case subExec of
@@ -718,11 +721,10 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
asyncActionQueryLive
E.SEOnSourceDB (E.SSLivequery actionIds liveQueryBuilder) -> do
actionLogMapE <- fmap fst <$> runExceptT (EA.fetchActionLogResponses actionIds)
- actionLogMap <- onLeft actionLogMapE (withComplete . preExecErr requestId (Just gqlOpType))
- granularPrometheusMetricsState <- runGetPrometheusMetricsGranularity
+ actionLogMap <- onLeft actionLogMapE (withComplete . preExecErr granularPrometheusMetricsState requestId (Just gqlOpType) opName (Just parameterizedQueryHash))
modelInfoLogStatus'' <- runGetModelInfoLogStatus
opMetadataE <- liftIO $ startLiveQuery opName liveQueryBuilder parameterizedQueryHash requestId actionLogMap granularPrometheusMetricsState modifier modelInfoLogStatus''
- lqId <- onLeft opMetadataE (withComplete . preExecErr requestId (Just gqlOpType))
+ lqId <- onLeft opMetadataE (withComplete . preExecErr granularPrometheusMetricsState requestId (Just gqlOpType) opName (Just parameterizedQueryHash))
-- Update async action query subscription state
case NE.nonEmpty (toList actionIds) of
Nothing -> do
@@ -747,11 +749,16 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
onUnexpectedException
asyncActionQueryLive
E.SEOnSourceDB (E.SSStreaming rootFieldName streamQueryBuilder) -> do
- granularPrometheusMetricsState <- runGetPrometheusMetricsGranularity
modelInfoLogStatus'' <- runGetModelInfoLogStatus
liftIO $ startStreamingQuery rootFieldName streamQueryBuilder parameterizedQueryHash requestId granularPrometheusMetricsState modifier modelInfoLogStatus''
- liftIO $ Prometheus.Counter.inc (gqlRequestsSubscriptionSuccess gqlMetrics)
+ recordGraphqlOperationMetric
+ granularPrometheusMetricsState
+ (Just G.OperationTypeSubscription)
+ Success
+ opName
+ (Just parameterizedQueryHash)
+ (Prometheus.CounterVector.inc $ gqlRequests gqlMetrics)
liftIO $ logOpEv ODStarted (Just requestId) (Just parameterizedQueryHash)
where
sendDataMsg = WS._wsaGetDataMessageType onMessageActions
@@ -787,18 +794,21 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
handleResult ::
forall a.
+ IO GranularPrometheusMetricsState ->
RequestId ->
G.OperationType ->
+ Maybe OperationName ->
+ ParameterizedQueryHash ->
Either (Either GQExecError QErr) a ->
(a -> ExceptT () m ()) ->
ExceptT () m ()
- handleResult requestId gqlOpType r f = case r of
- Left (Left err) -> postExecErr' gqlOpType err
- Left (Right err) -> postExecErr requestId gqlOpType err
+ handleResult granularPrometheusMetricsState requestId gqlOpType mOpName pqh r f = case r of
+ Left (Left err) -> postExecErr' granularPrometheusMetricsState gqlOpType mOpName pqh err
+ Left (Right err) -> postExecErr granularPrometheusMetricsState requestId gqlOpType mOpName pqh err
Right results -> f results
- sendResultFromFragments telemQueryType timerTot requestId r opName pqh gqlOpType modelInfoList getModelInfoLogStatus =
- handleResult requestId gqlOpType r \results -> do
+ sendResultFromFragments granularPrometheusMetricsState telemQueryType timerTot requestId r opName pqh gqlOpType modelInfoList getModelInfoLogStatus =
+ handleResult granularPrometheusMetricsState requestId gqlOpType opName pqh r \results -> do
let (key, (compositeValue')) = unzip $ InsOrdHashMap.toList results
(annotatedResp, model) = unzip compositeValue'
results' = InsOrdHashMap.fromList $ zip key annotatedResp
@@ -814,7 +824,7 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
-- Telemetry. NOTE: don't time network IO:
Telem.recordTimingMetric Telem.RequestDimensions {..} Telem.RequestTimings {..}
modelInfoLogging (modelInfoList <> modelInfoList') False getModelInfoLogStatus
- liftIO $ (recordGQLQuerySuccess totalTime gqlOpType)
+ liftIO $ (recordGQLQuerySuccess granularPrometheusMetricsState totalTime opName pqh gqlOpType)
runRemoteGQ ::
RequestId ->
@@ -885,7 +895,7 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
getErrFn ERTLegacy = encodeQErr
getErrFn ERTGraphqlCompliant = encodeGQLErr
- sendStartErr e = do
+ sendStartErr granularPrometheusMetricsState mOpName e = do
let errFn = getErrFn errRespTy
sendMsg wsConn
$ SMErr
@@ -893,7 +903,7 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
$ errFn False
$ err400 StartFailed e
liftIO $ logOpEv (ODProtoErr e) Nothing Nothing
- liftIO $ reportGQLQueryError Nothing
+ liftIO $ reportGQLQueryError granularPrometheusMetricsState mOpName Nothing Nothing
liftIO $ closeConnAction wsConn opId (T.unpack e)
sendCompleted reqId paramQueryHash = do
@@ -901,24 +911,27 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
logOpEv ODCompleted reqId paramQueryHash
postExecErr ::
+ IO GranularPrometheusMetricsState ->
RequestId ->
G.OperationType ->
+ Maybe OperationName ->
+ ParameterizedQueryHash ->
QErr ->
ExceptT () m ()
- postExecErr reqId gqlOpType qErr = do
+ postExecErr granularPrometheusMetricsState reqId gqlOpType mOpName pqh qErr = do
let errFn = getErrFn errRespTy False
liftIO $ logOpEv (ODQueryErr qErr) (Just reqId) Nothing
- postExecErr' gqlOpType $ GQExecError $ pure $ errFn qErr
+ postExecErr' granularPrometheusMetricsState gqlOpType mOpName pqh $ GQExecError $ pure $ errFn qErr
- postExecErr' :: G.OperationType -> GQExecError -> ExceptT () m ()
- postExecErr' gqlOpType qErr =
+ postExecErr' :: IO GranularPrometheusMetricsState -> G.OperationType -> Maybe OperationName -> ParameterizedQueryHash -> GQExecError -> ExceptT () m ()
+ postExecErr' granularPrometheusMetricsState gqlOpType mOpName pqh qErr =
liftIO $ do
- reportGQLQueryError (Just gqlOpType)
+ reportGQLQueryError granularPrometheusMetricsState mOpName (Just pqh) (Just gqlOpType)
postExecErrAction wsConn opId qErr
-- why wouldn't pre exec error use graphql response?
- preExecErr reqId mGqlOpType qErr = do
- liftIO $ reportGQLQueryError mGqlOpType
+ preExecErr granularPrometheusMetricsState reqId mGqlOpType mOpName pqh qErr = do
+ liftIO $ reportGQLQueryError granularPrometheusMetricsState mOpName pqh mGqlOpType
liftIO $ sendError reqId qErr
sendError reqId qErr = do
@@ -1049,30 +1062,32 @@ onStart enabledLogTypes agentLicenseKey serverEnv wsConn shouldCaptureVariables
catchAndIgnore :: ExceptT () m () -> m ()
catchAndIgnore m = void $ runExceptT m
- reportGQLQueryError :: Maybe G.OperationType -> IO ()
- reportGQLQueryError = \case
- Nothing ->
- liftIO $ Prometheus.Counter.inc (gqlRequestsUnknownFailure gqlMetrics)
- Just opType -> case opType of
- G.OperationTypeQuery ->
- liftIO $ Prometheus.Counter.inc (gqlRequestsQueryFailure gqlMetrics)
- G.OperationTypeMutation ->
- liftIO $ Prometheus.Counter.inc (gqlRequestsMutationFailure gqlMetrics)
- G.OperationTypeSubscription ->
- liftIO $ Prometheus.Counter.inc (gqlRequestsSubscriptionFailure gqlMetrics)
+ reportGQLQueryError :: IO GranularPrometheusMetricsState -> Maybe OperationName -> Maybe ParameterizedQueryHash -> Maybe G.OperationType -> IO ()
+ reportGQLQueryError granularPrometheusMetricsState mOpName mQHash mOpType =
+ recordGraphqlOperationMetric
+ granularPrometheusMetricsState
+ mOpType
+ Failed
+ mOpName
+ mQHash
+ (Prometheus.CounterVector.inc $ gqlRequests gqlMetrics)
-- Tally and record execution times for successful GraphQL requests.
- recordGQLQuerySuccess :: DiffTime -> G.OperationType -> IO ()
- recordGQLQuerySuccess totalTime = \case
- G.OperationTypeQuery -> liftIO $ do
- Prometheus.Counter.inc (gqlRequestsQuerySuccess gqlMetrics)
- Prometheus.Histogram.observe (gqlExecutionTimeSecondsQuery gqlMetrics) (realToFrac totalTime)
- G.OperationTypeMutation -> liftIO $ do
- Prometheus.Counter.inc (gqlRequestsMutationSuccess gqlMetrics)
- Prometheus.Histogram.observe (gqlExecutionTimeSecondsMutation gqlMetrics) (realToFrac totalTime)
- G.OperationTypeSubscription ->
- -- We do not collect metrics for subscriptions at the request level.
- pure ()
+ recordGQLQuerySuccess :: IO GranularPrometheusMetricsState -> DiffTime -> Maybe OperationName -> ParameterizedQueryHash -> G.OperationType -> IO ()
+ recordGQLQuerySuccess granularPrometheusMetricsState totalTime mOpName qHash opType = do
+ recordGraphqlOperationMetric
+ granularPrometheusMetricsState
+ (Just opType)
+ Success
+ mOpName
+ (Just qHash)
+ (Prometheus.CounterVector.inc $ gqlRequests gqlMetrics)
+ case opType of
+ G.OperationTypeQuery -> liftIO $ Prometheus.Histogram.observe (gqlExecutionTimeSecondsQuery gqlMetrics) (realToFrac totalTime)
+ G.OperationTypeMutation -> liftIO $ Prometheus.Histogram.observe (gqlExecutionTimeSecondsMutation gqlMetrics) (realToFrac totalTime)
+ G.OperationTypeSubscription ->
+ -- We do not collect metrics for subscriptions at the request level.
+ pure ()
onMessage ::
( MonadIO m,
diff --git a/server/src-lib/Hasura/GraphQL/Transport/WebSocket/Server.hs b/server/src-lib/Hasura/GraphQL/Transport/WebSocket/Server.hs
index 4e5b02409e8..f121b5bda8a 100644
--- a/server/src-lib/Hasura/GraphQL/Transport/WebSocket/Server.hs
+++ b/server/src-lib/Hasura/GraphQL/Transport/WebSocket/Server.hs
@@ -72,7 +72,7 @@ import Hasura.Server.Auth (AuthMode, compareAuthMode)
import Hasura.Server.Cors (CorsPolicy)
import Hasura.Server.Init.Config (AllowListStatus (..), WSConnectionInitTimeout (..))
import Hasura.Server.Prometheus
- ( DynamicSubscriptionLabel (..),
+ ( DynamicGraphqlOperationLabel (..),
PrometheusMetrics (..),
recordMetricWithLabel,
)
@@ -639,8 +639,8 @@ createServerApp getMetricsConfig wsConnInitTimeout (WSServer logger@(L.Logger wr
messageDetails = MessageDetails (SB.fromLBS msg) messageLength
parameterizedQueryHash = wsInfo >>= _wseiParameterizedQueryHash
operationName = wsInfo >>= _wseiOperationName
- promMetricGranularLabel = DynamicSubscriptionLabel parameterizedQueryHash operationName
- promMetricLabel = DynamicSubscriptionLabel Nothing Nothing
+ promMetricGranularLabel = DynamicGraphqlOperationLabel parameterizedQueryHash operationName
+ promMetricLabel = DynamicGraphqlOperationLabel Nothing Nothing
websocketBytesSentMetric = pmWebSocketBytesSent prometheusMetrics
granularPrometheusMetricsState <- runGetPrometheusMetricsGranularity
liftIO $ do
diff --git a/server/src-lib/Hasura/Logging.hs b/server/src-lib/Hasura/Logging.hs
index 8493e141e0b..f7dc1aff2ba 100644
--- a/server/src-lib/Hasura/Logging.hs
+++ b/server/src-lib/Hasura/Logging.hs
@@ -60,7 +60,6 @@ module Hasura.Logging
)
where
-import Control.AutoUpdate qualified as Auto
import Control.Exception (ErrorCall (ErrorCallWithLocation), catch)
import Control.FoldDebounce qualified as FDebounce
import Control.Monad.Trans.Control
@@ -71,6 +70,7 @@ import Data.ByteString qualified as B
import Data.ByteString.Lazy qualified as BL
import Data.ByteString.Lazy.Char8 qualified as BLC
import Data.HashSet qualified as Set
+import Data.IORef
import Data.Map.Strict (Map)
import Data.Map.Strict qualified as Map
import Data.SerializableBlob qualified as SB
@@ -81,6 +81,7 @@ import Data.Time.Clock.POSIX qualified as Time
import Data.Time.Format qualified as Format
import Data.Time.LocalTime qualified as Time
import Hasura.Base.Error (QErr)
+import Hasura.CachedTime
import Hasura.Prelude
import Hasura.Tracing.Class qualified as Tracing
import Hasura.Tracing.Context
@@ -358,7 +359,8 @@ instance ToEngineLog UnhandledInternalErrorLog Hasura where
-- * LoggerSettings
data LoggerSettings = LoggerSettings
- { -- | should current time be cached (refreshed every sec)
+ { -- | should current time be cached (refreshed every sec)? For performance
+ -- impact, see benchmarks in: https://github.com/hasura/graphql-engine-mono/pull/10631
_lsCachedTimestamp :: !Bool,
_lsTimeZone :: !(Maybe Time.TimeZone),
_lsLevel :: !LogLevel
@@ -376,6 +378,12 @@ getFormattedTime tzM = do
t <- Time.getCurrentTime
return $ FormattedTime t tz
+-- | Get the current time, formatted with the current or specified timezone
+getCachedFormattedTime :: Maybe Time.TimeZone -> IO FormattedTime
+getCachedFormattedTime tzM = do
+ (t, tz, _) <- readIORef cachedRecentFormattedTimeAndZone
+ pure $ maybe (FormattedTime t tz) (FormattedTime t) tzM
+
-- | Creates a new 'LoggerCtx', optionally fanning out to an OTLP endpoint
-- (while enabled) as well.
--
@@ -392,21 +400,20 @@ mkLoggerCtxOTLP ::
LoggerSettings ->
Set.HashSet (EngineLogType impl) ->
ManagedT io (LoggerCtx impl)
-mkLoggerCtxOTLP logsExporter (LoggerSettings cacheTime tzM logLevel) enabledLogs = do
+mkLoggerCtxOTLP logsExporter (LoggerSettings shouldCacheTime tzM logLevel) enabledLogs = do
loggerSet <- allocate acquire release
- timeGetter <- liftIO $ bool (pure $ getFormattedTime tzM) cachedTimeGetter cacheTime
- pure $ LoggerCtx loggerSet logLevel timeGetter enabledLogs logsExporter
+ pure $ LoggerCtx loggerSet logLevel (timeGetter tzM) enabledLogs logsExporter
where
acquire = liftIO do
FL.newStdoutLoggerSet FL.defaultBufSize
release loggerSet = liftIO do
FL.flushLogStr loggerSet
FL.rmLoggerSet loggerSet
- cachedTimeGetter =
- Auto.mkAutoUpdate
- Auto.defaultUpdateSettings
- { Auto.updateAction = getFormattedTime tzM
- }
+ -- use either a slower time lookup per log line, or quick reference to not
+ -- very granular current-ish timestamp
+ timeGetter
+ | shouldCacheTime = getCachedFormattedTime
+ | otherwise = getFormattedTime
-- | 'mkLoggerCtxOTLP' but with no otlp log shipping, for compatibility
mkLoggerCtx ::
diff --git a/server/src-lib/Hasura/RQL/DDL/Schema/Cache.hs b/server/src-lib/Hasura/RQL/DDL/Schema/Cache.hs
index 1c811b8a340..cb49346a4d3 100644
--- a/server/src-lib/Hasura/RQL/DDL/Schema/Cache.hs
+++ b/server/src-lib/Hasura/RQL/DDL/Schema/Cache.hs
@@ -1593,7 +1593,7 @@ buildSchemaCacheRule logger env mSchemaRegistryContext = proc (MetadataWithResou
then do
recreateTriggerIfNeeded
-<
- ( dynamicConfig,
+ ( (_cdcSQLGenCtx dynamicConfig),
table,
tableColumns,
triggerName,
@@ -1629,7 +1629,7 @@ buildSchemaCacheRule logger env mSchemaRegistryContext = proc (MetadataWithResou
-- computation will not be done again.
Inc.cache
proc
- ( dynamicConfig,
+ ( sqlGenCtx,
tableName,
tableColumns,
triggerName,
@@ -1643,7 +1643,7 @@ buildSchemaCacheRule logger env mSchemaRegistryContext = proc (MetadataWithResou
-< do
liftEitherM
$ createTableEventTrigger @b
- (_cdcSQLGenCtx dynamicConfig)
+ sqlGenCtx
sourceConfig
tableName
tableColumns
diff --git a/server/src-lib/Hasura/Server/App.hs b/server/src-lib/Hasura/Server/App.hs
index b2c28247ec3..a5b7437bcb5 100644
--- a/server/src-lib/Hasura/Server/App.hs
+++ b/server/src-lib/Hasura/Server/App.hs
@@ -102,7 +102,7 @@ import Hasura.Server.Compression
import Hasura.Server.Init
import Hasura.Server.Limits
import Hasura.Server.Logging
-import Hasura.Server.Middleware (corsMiddleware)
+import Hasura.Server.Middleware
import Hasura.Server.OpenAPI (buildOpenAPI)
import Hasura.Server.Rest
import Hasura.Server.Types
@@ -345,7 +345,7 @@ mkSpockAction appStateRef qErrEncoder qErrModifier apiHandler = do
let getInfo parsedRequest = do
authenticationResp <- lift (resolveUserInfo (_lsLogger appEnvLoggers) appEnvManager headers acAuthMode parsedRequest)
- authInfo <- onLeft authenticationResp (logErrorAndResp Nothing requestId req (reqBody, Nothing) False origHeaders (ExtraUserInfo Nothing) . qErrModifier)
+ authInfo <- authenticationResp `onLeft` (logErrorAndResp Nothing requestId req (reqBody, Nothing) False Nothing origHeaders (ExtraUserInfo Nothing) . qErrModifier)
let (userInfo, _, authHeaders, extraUserInfo) = authInfo
appContext <- liftIO $ getAppContext appStateRef
schemaCache <- liftIO $ getRebuildableSchemaCacheWithVersion appStateRef
@@ -372,7 +372,7 @@ mkSpockAction appStateRef qErrEncoder qErrModifier apiHandler = do
(userInfo, authHeaders, handlerState, includeInternal, extraUserInfo) <- getInfo Nothing
(queryJSON, parsedReq) <-
runExcept (parseBody reqBody) `onLeft` \e -> do
- logErrorAndResp (Just userInfo) requestId req (reqBody, Nothing) includeInternal origHeaders extraUserInfo (qErrModifier e)
+ logErrorAndResp (Just userInfo) requestId req (reqBody, Nothing) includeInternal Nothing origHeaders extraUserInfo (qErrModifier e)
res <- lift $ runHandler (_lsLogger appEnvLoggers) handlerState $ handler parsedReq
pure (res, userInfo, authHeaders, includeInternal, Just queryJSON, extraUserInfo)
-- in this case we parse the request _first_ and then send the request to the webhook for auth
@@ -382,7 +382,7 @@ mkSpockAction appStateRef qErrEncoder qErrModifier apiHandler = do
-- if the request fails to parse, call the webhook without a request body
-- TODO should we signal this to the webhook somehow?
(userInfo, _, _, _, extraUserInfo) <- getInfo Nothing
- logErrorAndResp (Just userInfo) requestId req (reqBody, Nothing) False origHeaders extraUserInfo (qErrModifier e)
+ logErrorAndResp (Just userInfo) requestId req (reqBody, Nothing) False Nothing origHeaders extraUserInfo (qErrModifier e)
(userInfo, authHeaders, handlerState, includeInternal, extraUserInfo) <- getInfo (Just parsedReq)
res <- lift $ runHandler (_lsLogger appEnvLoggers) handlerState $ handler parsedReq
@@ -393,7 +393,7 @@ mkSpockAction appStateRef qErrEncoder qErrModifier apiHandler = do
-- if the request fails to parse, call the webhook without a request body
-- TODO should we signal this to the webhook somehow?
(userInfo, _, _, _, extraUserInfo) <- getInfo Nothing
- logErrorAndResp (Just userInfo) requestId req (reqBody, Nothing) False origHeaders extraUserInfo (qErrModifier e)
+ logErrorAndResp (Just userInfo) requestId req (reqBody, Nothing) False Nothing origHeaders extraUserInfo (qErrModifier e)
let newReq = case parsedReq of
EqrGQLReq reqText -> Just reqText
-- Note: We send only `ReqsText` to the webhook in case of `ExtPersistedQueryRequest` (persisted queries),
@@ -406,6 +406,8 @@ mkSpockAction appStateRef qErrEncoder qErrModifier apiHandler = do
res <- lift $ runHandler (_lsLogger appEnvLoggers) handlerState $ handler parsedReq
pure (res, userInfo, authHeaders, includeInternal, Just queryJSON, extraUserInfo)
+ let queryTime = Just (ioWaitTime, serviceTime)
+
-- https://opentelemetry.io/docs/reference/specification/trace/semantic_conventions/span-general/#general-identity-attributes
lift $ Tracing.attachMetadata [("enduser.role", roleNameToTxt $ _uiRole userInfo)]
@@ -415,10 +417,10 @@ mkSpockAction appStateRef qErrEncoder qErrModifier apiHandler = do
-- log and return result
case modResult of
Left err ->
- logErrorAndResp (Just userInfo) requestId req (reqBody, queryJSON) includeInternal headers extraUserInfo err
+ logErrorAndResp (Just userInfo) requestId req (reqBody, queryJSON) includeInternal queryTime headers extraUserInfo err
Right (httpLogGraphQLInfo, res) -> do
let httpLogMetadata = buildHttpLogMetadata @m httpLogGraphQLInfo extraUserInfo
- logSuccessAndResp (Just userInfo) requestId req (reqBody, queryJSON) res (Just (ioWaitTime, serviceTime)) origHeaders authHeaders httpLogMetadata
+ logSuccessAndResp (Just userInfo) requestId req (reqBody, queryJSON) res queryTime origHeaders authHeaders httpLogMetadata
where
logErrorAndResp ::
forall any ctx.
@@ -427,11 +429,12 @@ mkSpockAction appStateRef qErrEncoder qErrModifier apiHandler = do
Wai.Request ->
(BL.ByteString, Maybe Value) ->
Bool ->
+ Maybe (DiffTime, DiffTime) ->
[HTTP.Header] ->
ExtraUserInfo ->
QErr ->
Spock.ActionCtxT ctx m any
- logErrorAndResp userInfo reqId waiReq req includeInternal headers extraUserInfo qErr = do
+ logErrorAndResp userInfo reqId waiReq req includeInternal qTime headers extraUserInfo qErr = do
AppEnv {..} <- lift askAppEnv
let httpLogMetadata = buildHttpLogMetadata @m emptyHttpLogGraphQLInfo extraUserInfo
jsonResponse = J.encodingToLazyByteString $ qErrEncoder includeInternal qErr
@@ -439,7 +442,7 @@ mkSpockAction appStateRef qErrEncoder qErrModifier apiHandler = do
allHeaders = [contentLength, jsonHeader]
-- https://opentelemetry.io/docs/reference/specification/trace/semantic_conventions/http/#common-attributes
lift $ Tracing.attachMetadata [("http.response_content_length", bsToTxt $ snd contentLength)]
- lift $ logHttpError (_lsLogger appEnvLoggers) appEnvLoggingSettings userInfo reqId waiReq req qErr headers httpLogMetadata True
+ lift $ logHttpError (_lsLogger appEnvLoggers) appEnvLoggingSettings userInfo reqId waiReq req qErr qTime Nothing headers httpLogMetadata True
mapM_ setHeader allHeaders
Spock.setStatus $ qeStatus qErr
Spock.lazyBytes jsonResponse
@@ -880,6 +883,9 @@ httpApp setupHook appStateRef AppEnv {..} consoleType ekgStore closeWebsocketsOn
Spock.middleware
$ corsMiddleware (acCorsPolicy <$> getAppContext appStateRef)
+ -- bypass warp's use of 'auto-update'. See #10662
+ Spock.middleware dateHeaderMiddleware
+
-- API Console and Root Dir
serveApiConsole
@@ -1140,7 +1146,7 @@ httpApp setupHook appStateRef AppEnv {..} consoleType ekgStore closeWebsocketsOn
(reqId, _newHeaders) <- getRequestId headers
-- setting the bool flag countDataTransferBytes to False here since we don't want to count the data
-- transfer bytes for requests to `/heatlhz` and `/v1/version` endpoints
- lift $ logHttpError logger appEnvLoggingSettings Nothing reqId req (reqBody, Nothing) err headers (emptyHttpLogMetadata @m) False
+ lift $ logHttpError logger appEnvLoggingSettings Nothing reqId req (reqBody, Nothing) err Nothing Nothing headers (emptyHttpLogMetadata @m) False
spockAction ::
forall a.
@@ -1212,7 +1218,7 @@ raiseGenericApiError logger loggingSetting headers qErr = do
(reqId, _newHeaders) <- getRequestId $ Wai.requestHeaders req
-- setting the bool flag countDataTransferBytes to False here since we don't want to count the data
-- transfer bytes for requests to undefined resources
- lift $ logHttpError logger loggingSetting Nothing reqId req (reqBody, Nothing) qErr headers (emptyHttpLogMetadata @m) False
+ lift $ logHttpError logger loggingSetting Nothing reqId req (reqBody, Nothing) qErr Nothing Nothing headers (emptyHttpLogMetadata @m) False
setHeader jsonHeader
Spock.setStatus $ qeStatus qErr
Spock.lazyBytes $ encode qErr
diff --git a/server/src-lib/Hasura/Server/Logging.hs b/server/src-lib/Hasura/Server/Logging.hs
index 16835f4d45d..6432b2935b4 100644
--- a/server/src-lib/Hasura/Server/Logging.hs
+++ b/server/src-lib/Hasura/Server/Logging.hs
@@ -306,6 +306,10 @@ class (Monad m) => HttpLog m where
(BL.ByteString, Maybe J.Value) ->
-- | the error
QErr ->
+ -- | IO/network wait time and service time (respectively) for this request, if available.
+ Maybe (DiffTime, DiffTime) ->
+ -- | possible compression type
+ Maybe CompressionType ->
-- | list of request headers
[HTTP.Header] ->
HttpLogMetadata m ->
@@ -348,7 +352,7 @@ instance (HttpLog m) => HttpLog (TraceT m) where
buildExtraHttpLogMetadata a = buildExtraHttpLogMetadata @m a
emptyExtraHttpLogMetadata = emptyExtraHttpLogMetadata @m
- logHttpError a b c d e f g h i j = lift $ logHttpError a b c d e f g h i j
+ logHttpError a b c d e f g h i j k l = lift $ logHttpError a b c d e f g h i j k l
logHttpSuccess a b c d e f g h i j k l m = lift $ logHttpSuccess a b c d e f g h i j k l m
@@ -358,7 +362,7 @@ instance (HttpLog m) => HttpLog (ReaderT r m) where
buildExtraHttpLogMetadata a = buildExtraHttpLogMetadata @m a
emptyExtraHttpLogMetadata = emptyExtraHttpLogMetadata @m
- logHttpError a b c d e f g h i j = lift $ logHttpError a b c d e f g h i j
+ logHttpError a b c d e f g h i j k l = lift $ logHttpError a b c d e f g h i j k l
logHttpSuccess a b c d e f g h i j k l m = lift $ logHttpSuccess a b c d e f g h i j k l m
@@ -368,49 +372,49 @@ instance (HttpLog m) => HttpLog (ExceptT e m) where
buildExtraHttpLogMetadata a = buildExtraHttpLogMetadata @m a
emptyExtraHttpLogMetadata = emptyExtraHttpLogMetadata @m
- logHttpError a b c d e f g h i j = lift $ logHttpError a b c d e f g h i j
+ logHttpError a b c d e f g h i j k l = lift $ logHttpError a b c d e f g h i j k l
logHttpSuccess a b c d e f g h i j k l m = lift $ logHttpSuccess a b c d e f g h i j k l m
-- | Log information about the HTTP request
data HttpInfoLog = HttpInfoLog
- { hlStatus :: !HTTP.Status,
- hlMethod :: !Text,
- hlSource :: !Wai.IpAddress,
- hlPath :: !Text,
- hlHttpVersion :: !HTTP.HttpVersion,
- hlCompression :: !(Maybe CompressionType),
+ { hlStatus :: HTTP.Status,
+ hlMethod :: Text,
+ hlSource :: Wai.IpAddress,
+ hlPath :: Text,
+ hlHttpVersion :: HTTP.HttpVersion,
+ hlCompression :: Maybe CompressionType,
-- | all the request headers
- hlHeaders :: ![HTTP.Header]
+ hlHeaders :: [HTTP.Header]
}
deriving (Eq)
instance J.ToJSON HttpInfoLog where
- toJSON (HttpInfoLog st met src path hv compressTypeM _) =
+ toJSON (HttpInfoLog st met src path hv compressType _) =
J.object
[ "status" J..= HTTP.statusCode st,
"method" J..= met,
"ip" J..= Wai.showIPAddress src,
"url" J..= path,
"http_version" J..= show hv,
- "content_encoding" J..= (compressionTypeToTxt <$> compressTypeM)
+ "content_encoding" J..= (compressionTypeToTxt <$> compressType)
]
-- | Information about a GraphQL/Hasura metadata operation over HTTP
data OperationLog = OperationLog
- { olRequestId :: !RequestId,
- olUserVars :: !(Maybe SessionVariables),
- olResponseSize :: !(Maybe Int64),
+ { olRequestId :: RequestId,
+ olUserVars :: Maybe SessionVariables,
+ olResponseSize :: Maybe Int64,
-- | Response size before compression
- olUncompressedResponseSize :: !Int64,
+ olUncompressedResponseSize :: Int64,
-- | Request IO wait time, i.e. time spent reading the full request from the socket.
- olRequestReadTime :: !(Maybe Seconds),
+ olRequestReadTime :: Maybe Seconds,
-- | Service time, not including request IO wait time.
- olQueryExecutionTime :: !(Maybe Seconds),
- olQuery :: !(Maybe J.Value),
- olRawQuery :: !(Maybe Text),
- olError :: !(Maybe QErr),
- olRequestMode :: !RequestMode
+ olQueryExecutionTime :: Maybe Seconds,
+ olQuery :: Maybe J.Value,
+ olRawQuery :: Maybe Text,
+ olError :: Maybe QErr,
+ olRequestMode :: RequestMode
}
deriving (Eq, Generic)
@@ -421,9 +425,9 @@ instance J.ToJSON OperationLog where
-- | @BatchOperationSuccessLog@ contains the information required for a single
-- successful operation in a batch request for OSS. This type is a subset of the @GQLQueryOperationSuccessLog@
data BatchOperationSuccessLog = BatchOperationSuccessLog
- { _bolQuery :: !(Maybe J.Value),
- _bolResponseSize :: !Int64,
- _bolQueryExecutionTime :: !Seconds
+ { _bolQuery :: Maybe J.Value,
+ _bolResponseSize :: Int64,
+ _bolQueryExecutionTime :: Seconds
}
deriving (Eq, Generic)
diff --git a/server/src-lib/Hasura/Server/Middleware.hs b/server/src-lib/Hasura/Server/Middleware.hs
index 107c3ab17f6..5fafd89c2f2 100644
--- a/server/src-lib/Hasura/Server/Middleware.hs
+++ b/server/src-lib/Hasura/Server/Middleware.hs
@@ -1,12 +1,15 @@
module Hasura.Server.Middleware
( corsMiddleware,
+ dateHeaderMiddleware,
)
where
import Control.Applicative
import Data.ByteString qualified as B
import Data.CaseInsensitive qualified as CI
+import Data.IORef
import Data.Text.Encoding qualified as TE
+import Hasura.CachedTime
import Hasura.Prelude
import Hasura.Server.Cors
import Hasura.Server.Utils
@@ -73,3 +76,9 @@ corsMiddleware getPolicy app req sendResp = do
cacheExposedHeaders = ["X-Hasura-Query-Cache-Key", "X-Hasura-Query-Family-Cache-Key", "Warning"]
setHeaders hdrs = mapResponseHeaders (\h -> mkRespHdrs hdrs ++ h)
mkRespHdrs = map (\(k, v) -> (CI.mk k, v))
+
+-- bypass warp's use of 'auto-update'. See #10662
+dateHeaderMiddleware :: Middleware
+dateHeaderMiddleware app req respond = do
+ (_, _, nowRFC7231) <- liftIO $ readIORef cachedRecentFormattedTimeAndZone
+ app req $ respond . mapResponseHeaders (("Date", nowRFC7231) :)
diff --git a/server/src-lib/Hasura/Server/Prometheus.hs b/server/src-lib/Hasura/Server/Prometheus.hs
index dabd998b6b9..967f1540e5c 100644
--- a/server/src-lib/Hasura/Server/Prometheus.hs
+++ b/server/src-lib/Hasura/Server/Prometheus.hs
@@ -31,11 +31,13 @@ module Hasura.Server.Prometheus
observeHistogramWithLabel,
SubscriptionKindLabel (..),
SubscriptionLabel (..),
- DynamicSubscriptionLabel (..),
+ DynamicGraphqlOperationLabel (..),
streamingSubscriptionLabel,
liveQuerySubscriptionLabel,
recordMetricWithLabel,
- recordSubcriptionMetric,
+ recordSubscriptionMetric,
+ GraphQLRequestsLabels,
+ recordGraphqlOperationMetric,
)
where
@@ -71,7 +73,7 @@ data PrometheusMetrics = PrometheusMetrics
pmGraphQLRequestMetrics :: GraphQLRequestMetrics,
pmEventTriggerMetrics :: EventTriggerMetrics,
pmWebSocketBytesReceived :: Counter,
- pmWebSocketBytesSent :: CounterVector DynamicSubscriptionLabel,
+ pmWebSocketBytesSent :: CounterVector DynamicGraphqlOperationLabel,
pmActionBytesReceived :: Counter,
pmActionBytesSent :: Counter,
pmScheduledTriggerMetrics :: ScheduledTriggerMetrics,
@@ -83,13 +85,7 @@ data PrometheusMetrics = PrometheusMetrics
}
data GraphQLRequestMetrics = GraphQLRequestMetrics
- { gqlRequestsQuerySuccess :: Counter,
- gqlRequestsQueryFailure :: Counter,
- gqlRequestsMutationSuccess :: Counter,
- gqlRequestsMutationFailure :: Counter,
- gqlRequestsSubscriptionSuccess :: Counter,
- gqlRequestsSubscriptionFailure :: Counter,
- gqlRequestsUnknownFailure :: Counter,
+ { gqlRequests :: CounterVector GraphQLRequestsLabels,
gqlExecutionTimeSecondsQuery :: Histogram,
gqlExecutionTimeSecondsMutation :: Histogram
}
@@ -170,13 +166,7 @@ makeDummyPrometheusMetrics = do
makeDummyGraphQLRequestMetrics :: IO GraphQLRequestMetrics
makeDummyGraphQLRequestMetrics = do
- gqlRequestsQuerySuccess <- Counter.new
- gqlRequestsQueryFailure <- Counter.new
- gqlRequestsMutationSuccess <- Counter.new
- gqlRequestsMutationFailure <- Counter.new
- gqlRequestsSubscriptionSuccess <- Counter.new
- gqlRequestsSubscriptionFailure <- Counter.new
- gqlRequestsUnknownFailure <- Counter.new
+ gqlRequests <- CounterVector.new
gqlExecutionTimeSecondsQuery <- Histogram.new []
gqlExecutionTimeSecondsMutation <- Histogram.new []
pure GraphQLRequestMetrics {..}
@@ -295,6 +285,7 @@ instance ToLabels (Maybe DynamicEventTriggerLabel) where
toLabels (Just (DynamicEventTriggerLabel triggerName sourceName)) = Map.fromList $ [("trigger_name", triggerNameToTxt triggerName), ("source_name", sourceNameToText sourceName)]
data ResponseStatus = Success | Failed
+ deriving stock (Generic, Ord, Eq)
-- TODO: Make this a method of a new typeclass of the metrics library
responseStatusToLabelValue :: ResponseStatus -> Text
@@ -335,21 +326,21 @@ streamingSubscriptionLabel = SubscriptionKindLabel "streaming"
liveQuerySubscriptionLabel :: SubscriptionKindLabel
liveQuerySubscriptionLabel = SubscriptionKindLabel "live-query"
-data DynamicSubscriptionLabel = DynamicSubscriptionLabel
+data DynamicGraphqlOperationLabel = DynamicGraphqlOperationLabel
{ _dslParamQueryHash :: Maybe ParameterizedQueryHash,
_dslOperationName :: Maybe OperationName
}
deriving stock (Generic, Ord, Eq)
-instance ToLabels DynamicSubscriptionLabel where
- toLabels (DynamicSubscriptionLabel hash opName) =
+instance ToLabels DynamicGraphqlOperationLabel where
+ toLabels (DynamicGraphqlOperationLabel hash opName) =
Map.fromList
$ maybe [] (\pqh -> [("parameterized_query_hash", bsToTxt $ unParamQueryHash pqh)]) hash
<> maybe [] (\op -> [("operation_name", G.unName $ _unOperationName op)]) opName
data SubscriptionLabel = SubscriptionLabel
{ _slKind :: SubscriptionKindLabel,
- _slDynamicLabels :: Maybe DynamicSubscriptionLabel
+ _slDynamicLabels :: Maybe DynamicGraphqlOperationLabel
}
deriving stock (Generic, Ord, Eq)
@@ -357,6 +348,25 @@ instance ToLabels SubscriptionLabel where
toLabels (SubscriptionLabel kind Nothing) = Map.fromList $ [("subscription_kind", subscription_kind kind)]
toLabels (SubscriptionLabel kind (Just dl)) = (Map.fromList $ [("subscription_kind", subscription_kind kind)]) <> toLabels dl
+-- TODO: Make this a method of a new typeclass of the metrics library
+opTypeToLabelValue :: Maybe G.OperationType -> Text
+opTypeToLabelValue = \case
+ (Just G.OperationTypeQuery) -> "query"
+ (Just G.OperationTypeMutation) -> "mutation"
+ (Just G.OperationTypeSubscription) -> "subscription"
+ Nothing -> "unknown"
+
+data GraphQLRequestsLabels = GraphQLRequestsLabels
+ { operation_type :: Maybe G.OperationType,
+ response_status :: ResponseStatus,
+ dynamic_label :: Maybe DynamicGraphqlOperationLabel
+ }
+ deriving stock (Generic, Ord, Eq)
+
+instance ToLabels (GraphQLRequestsLabels) where
+ toLabels (GraphQLRequestsLabels op_type res_status dynamic_labels) =
+ (HashMap.fromList $ [("operation_type", opTypeToLabelValue op_type), ("response_status", responseStatusToLabelValue res_status)]) <> (fromMaybe mempty (toLabels <$> dynamic_labels))
+
-- | Record metrics with dynamic label
recordMetricWithLabel ::
(MonadIO m) =>
@@ -401,7 +411,7 @@ observeHistogramWithLabel getMetricState alwaysObserve histogramVector label val
-- | Record a subscription metric for all the operation names present in the subscription.
-- Use this when you want to update the same value of the metric for all the operation names.
-recordSubcriptionMetric ::
+recordSubscriptionMetric ::
(MonadIO m) =>
(IO GranularPrometheusMetricsState) ->
-- should the metric be observed without a label when granularMetricsState is OFF
@@ -412,11 +422,11 @@ recordSubcriptionMetric ::
-- the mertic action to perform
(SubscriptionLabel -> IO ()) ->
m ()
-recordSubcriptionMetric getMetricState alwaysObserve operationNamesMap parameterizedQueryHash subscriptionKind metricAction = do
+recordSubscriptionMetric getMetricState alwaysObserve operationNamesMap parameterizedQueryHash subscriptionKind metricAction = do
-- if no operation names are present, then emit metric with only param query hash as dynamic label
if (null operationNamesMap)
then do
- let promMetricGranularLabel = SubscriptionLabel subscriptionKind (Just $ DynamicSubscriptionLabel (Just parameterizedQueryHash) Nothing)
+ let promMetricGranularLabel = SubscriptionLabel subscriptionKind (Just $ DynamicGraphqlOperationLabel (Just parameterizedQueryHash) Nothing)
promMetricLabel = SubscriptionLabel subscriptionKind Nothing
recordMetricWithLabel
getMetricState
@@ -427,10 +437,29 @@ recordSubcriptionMetric getMetricState alwaysObserve operationNamesMap parameter
do
let operationNames = HashMap.keys operationNamesMap
for_ operationNames $ \opName -> do
- let promMetricGranularLabel = SubscriptionLabel subscriptionKind (Just $ DynamicSubscriptionLabel (Just parameterizedQueryHash) opName)
+ let promMetricGranularLabel = SubscriptionLabel subscriptionKind (Just $ DynamicGraphqlOperationLabel (Just parameterizedQueryHash) opName)
promMetricLabel = SubscriptionLabel subscriptionKind Nothing
recordMetricWithLabel
getMetricState
alwaysObserve
(metricAction promMetricGranularLabel)
(metricAction promMetricLabel)
+
+recordGraphqlOperationMetric ::
+ (MonadIO m) =>
+ (IO GranularPrometheusMetricsState) ->
+ Maybe G.OperationType ->
+ ResponseStatus ->
+ Maybe OperationName ->
+ Maybe ParameterizedQueryHash ->
+ (GraphQLRequestsLabels -> IO ()) ->
+ m ()
+recordGraphqlOperationMetric getMetricState operationType responseStatus operationName parameterizedQueryHash metricAction = do
+ let dynamicLabel = DynamicGraphqlOperationLabel parameterizedQueryHash operationName
+ promMetricGranularLabel = GraphQLRequestsLabels operationType responseStatus (Just dynamicLabel)
+ promMetricLabel = GraphQLRequestsLabels operationType responseStatus Nothing
+ recordMetricWithLabel
+ getMetricState
+ True
+ (metricAction promMetricGranularLabel)
+ (metricAction promMetricLabel)
diff --git a/server/src-rsr/catalog_versions.txt b/server/src-rsr/catalog_versions.txt
index 226e22ec4af..fd3007ef52b 100644
--- a/server/src-rsr/catalog_versions.txt
+++ b/server/src-rsr/catalog_versions.txt
@@ -198,3 +198,6 @@ v2.36.2 48
v2.37.0-beta.1 48
v2.36.3 48
v2.37.0 48
+v2.37.1 48
+v2.38.0-beta.1 48
+v2.38.0 48
diff --git a/server/tests-py/run.sh b/server/tests-py/run.sh
index 47d394f68ee..1761a95def0 100755
--- a/server/tests-py/run.sh
+++ b/server/tests-py/run.sh
@@ -41,6 +41,7 @@ export HASURA_GRAPHQL_PG_SOURCE_URL_1 HASURA_GRAPHQL_PG_SOURCE_URL_2 HASURA_GRAP
echo
echo '*** Running tests ***'
+export SQLALCHEMY_SILENCE_UBER_WARNING=1 # disable warnings about upgrading to SQLAlchemy 2.0
pytest \
--dist=loadscope \
-n auto \
diff --git a/server/tests-py/test_logging.py b/server/tests-py/test_logging.py
index 74bc1de1795..6b14ad9f5a0 100644
--- a/server/tests-py/test_logging.py
+++ b/server/tests-py/test_logging.py
@@ -45,8 +45,7 @@ class TestLogging:
headers = {'x-request-id': 'successful-query-log-test'}
if hge_ctx.hge_key:
headers['x-hasura-admin-secret'] = hge_ctx.hge_key
- resp = hge_ctx.http.post(hge_ctx.hge_url + '/v1/graphql', json=q,
- headers=headers)
+ resp = hge_ctx.http.post(hge_ctx.hge_url + '/v1/graphql', json=q, headers=headers)
assert resp.status_code == 200 and 'data' in resp.json()
# make a query where JSON body parsing fails
@@ -54,17 +53,31 @@ class TestLogging:
headers = {'x-request-id': 'json-parse-fail-log-test'}
if hge_ctx.hge_key:
headers['x-hasura-admin-secret'] = hge_ctx.hge_key
- resp = hge_ctx.http.post(hge_ctx.hge_url + '/v1/graphql', json=q,
- headers=headers)
+ resp = hge_ctx.http.post(hge_ctx.hge_url + '/v1/graphql', json=q, headers=headers)
assert resp.status_code == 200 and 'errors' in resp.json()
# make an unauthorized query where admin secret/access token is empty
q = {'query': 'query { hello {code name} }'}
- headers = {'x-request-id': 'unauthorized-query-test'}
- resp = hge_ctx.http.post(hge_ctx.hge_url + '/v1/graphql', json=q,
- headers=headers)
+ headers = {'x-request-id': 'unauthorized-query-log-test'}
+ resp = hge_ctx.http.post(hge_ctx.hge_url + '/v1/graphql', json=q, headers=headers)
assert resp.status_code == 200 and 'errors' in resp.json()
+ # make a successful "run SQL" query
+ q = {'type': 'run_sql', 'args': {'source': 'default', 'sql': 'SELECT 1 AS one'}}
+ headers = {'x-request-id': 'successful-run-sql-log-test'}
+ if hge_ctx.hge_key:
+ headers['x-hasura-admin-secret'] = hge_ctx.hge_key
+ resp = hge_ctx.http.post(hge_ctx.hge_url + '/v2/query', json=q, headers=headers)
+ assert resp.status_code == 200 and 'result' in resp.json()
+
+ # make a failed "run SQL" query
+ q = {'type': 'run_sql', 'args': {'source': 'default', 'sql': 'SELECT x FROM non_existent_table'}}
+ headers = {'x-request-id': 'failed-run-sql-log-test'}
+ if hge_ctx.hge_key:
+ headers['x-hasura-admin-secret'] = hge_ctx.hge_key
+ resp = hge_ctx.http.post(hge_ctx.hge_url + '/v2/query', json=q, headers=headers)
+ assert resp.status_code == 400
+
# make an unauthorized metadata request where admin secret/access token is empty
q = {
'query': {
@@ -79,7 +92,7 @@ class TestLogging:
}
}
}
- headers = {'x-request-id': 'unauthorized-metadata-test'}
+ headers = {'x-request-id': 'unauthorized-metadata-log-test'}
resp = hge_ctx.http.post(hge_ctx.hge_url + '/v1/query', json=q,
headers=headers)
assert resp.status_code == 401 and 'error' in resp.json()
@@ -94,7 +107,7 @@ class TestLogging:
'kind' in x['detail'] and \
x['detail']['kind'] == 'server_configuration'
- config_logs = list(filter(_get_server_config, logs_from_requests))
+ config_logs = [l for l in logs_from_requests if _get_server_config(l)]
print(config_logs)
assert len(config_logs) == 1
config_log = config_logs[0]
@@ -130,7 +143,7 @@ class TestLogging:
return x['type'] == 'http-log'
print('all logs gathered', logs_from_requests)
- http_logs = list(filter(_get_http_logs, logs_from_requests))
+ http_logs = [l for l in logs_from_requests if _get_http_logs(l)]
print('http logs', http_logs)
assert len(http_logs) > 0
for http_log in http_logs:
@@ -143,8 +156,9 @@ class TestLogging:
operation = http_log['detail']['operation']
assert 'request_id' in operation
- if operation['request_id'] == 'successful-query-log-test':
+ if operation['request_id'] in ['successful-query-log-test', 'successful-run-sql-log-test', 'failed-run-sql-log-test']:
assert 'query_execution_time' in operation
+ if operation['request_id'] == 'successful-query-log-test':
assert 'user_vars' in operation
# we should see the `query` field in successful operations
assert 'query' in operation
@@ -156,7 +170,7 @@ class TestLogging:
def _get_query_logs(x):
return x['type'] == 'query-log'
- query_logs = list(filter(_get_query_logs, logs_from_requests))
+ query_logs = [l for l in logs_from_requests if _get_query_logs(l)]
assert len(query_logs) > 0
onelog = query_logs[0]['detail']
assert 'request_id' in onelog
@@ -165,11 +179,11 @@ class TestLogging:
assert 'generated_sql' in onelog
def test_http_parse_failed_log(self, logs_from_requests):
- def _get_parse_failed_logs(x):
+ def _get_logs(x):
return x['type'] == 'http-log' and \
x['detail']['operation']['request_id'] == 'json-parse-fail-log-test'
- http_logs = list(filter(_get_parse_failed_logs, logs_from_requests))
+ http_logs = [l for l in logs_from_requests if _get_logs(l)]
print('parse failed logs', http_logs)
assert len(http_logs) > 0
print(http_logs[0])
@@ -177,11 +191,11 @@ class TestLogging:
assert http_logs[0]['detail']['operation']['error']['code'] == 'parse-failed'
def test_http_unauthorized_query(self, logs_from_requests):
- def _get_failed_logs(x):
+ def _get_logs(x):
return x['type'] == 'http-log' and \
- x['detail']['operation']['request_id'] == 'unauthorized-query-test'
+ x['detail']['operation']['request_id'] == 'unauthorized-query-log-test'
- http_logs = list(filter(_get_failed_logs, logs_from_requests))
+ http_logs = [l for l in logs_from_requests if _get_logs(l)]
print('unauthorized failed logs', http_logs)
assert len(http_logs) > 0
print(http_logs[0])
@@ -190,12 +204,35 @@ class TestLogging:
assert http_logs[0]['detail']['operation'].get('query') is None
assert http_logs[0]['detail']['operation']['raw_query'] is not None
- def test_http_unauthorized_metadata(self, logs_from_requests):
- def _get_failed_logs(x):
+ def test_successful_run_sql(self, logs_from_requests):
+ def _get_logs(x):
return x['type'] == 'http-log' and \
- x['detail']['operation']['request_id'] == 'unauthorized-metadata-test'
+ x['detail']['operation']['request_id'] == 'successful-run-sql-log-test'
- http_logs = list(filter(_get_failed_logs, logs_from_requests))
+ http_logs = [l for l in logs_from_requests if _get_logs(l)]
+ print('successful run SQL logs', http_logs)
+ assert len(http_logs) > 0
+ print(http_logs[0])
+ assert http_logs[0]['detail']['operation']['query']['type'] == 'run_sql'
+
+ def test_failed_run_sql(self, logs_from_requests):
+ def _get_logs(x):
+ return x['type'] == 'http-log' and \
+ x['detail']['operation']['request_id'] == 'failed-run-sql-log-test'
+
+ http_logs = [l for l in logs_from_requests if _get_logs(l)]
+ print('failed run SQL logs', http_logs)
+ assert len(http_logs) > 0
+ print(http_logs[0])
+ assert http_logs[0]['detail']['operation']['error']['code'] == 'postgres-error'
+ assert http_logs[0]['detail']['operation']['query']['type'] == 'run_sql'
+
+ def test_http_unauthorized_metadata(self, logs_from_requests):
+ def _get_logs(x):
+ return x['type'] == 'http-log' and \
+ x['detail']['operation']['request_id'] == 'unauthorized-metadata-log-test'
+
+ http_logs = [l for l in logs_from_requests if _get_logs(l)]
print('unauthorized failed logs', http_logs)
assert len(http_logs) > 0
print(http_logs[0])