diff --git a/cli/README.md b/cli/README.md index b881ff68a37..dd3b740558e 100644 --- a/cli/README.md +++ b/cli/README.md @@ -19,7 +19,7 @@ You can also install a specific version of the CLI by providing the `VERSION` variable: ```bash - curl -L https://github.com/hasura/graphql-engine/raw/stable/cli/get.sh | VERSION=v2.37.0 bash + curl -L https://github.com/hasura/graphql-engine/raw/stable/cli/get.sh | VERSION=v2.38.0 bash ``` - Windows diff --git a/cli/get.sh b/cli/get.sh index 8a6ffb28344..e6900d98ebc 100755 --- a/cli/get.sh +++ b/cli/get.sh @@ -44,7 +44,7 @@ log "Selecting version..." # version=${VERSION:-`echo $(curl -s -f -H 'Content-Type: application/json' \ # https://releases.hasura.io/graphql-engine?agent=cli-get.sh) | sed -n -e "s/^.*\"$release\":\"\([^\",}]*\)\".*$/\1/p"`} -version=${VERSION:-v2.37.0} +version=${VERSION:-v2.38.0} if [ ! $version ]; then log "${YELLOW}" @@ -62,7 +62,7 @@ log "Selected version: $version" log "${YELLOW}" log NOTE: Install a specific version of the CLI by using VERSION variable -log 'curl -L https://github.com/hasura/graphql-engine/raw/stable/cli/get.sh | VERSION=v2.37.0 bash' +log 'curl -L https://github.com/hasura/graphql-engine/raw/stable/cli/get.sh | VERSION=v2.38.0 bash' log "${NC}" # check for existing hasura installation diff --git a/docs/.gitignore b/docs/.gitignore index 5616b5ce501..9f320b4b142 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -30,4 +30,6 @@ yarn-error.log* .tool-versions -spell_check_results.txt \ No newline at end of file +spell_check_results.txt + +.env* \ No newline at end of file diff --git a/docs/docs/auth/authorization/permissions/row-level-permissions.mdx b/docs/docs/auth/authorization/permissions/row-level-permissions.mdx index c3c8303a4ca..975b5c60f04 100644 --- a/docs/docs/auth/authorization/permissions/row-level-permissions.mdx +++ b/docs/docs/auth/authorization/permissions/row-level-permissions.mdx @@ -296,6 +296,21 @@ When you use array operators such as `_in` in the permissions builder in the Has an array for your values. If your session variable value is already an array, you can click the `[X-Hasura-Allowed-Ids]` suggestion to remove the brackets and set your session variable in its place. +Here is an example of an array-based session variable: + +```bash +X-Hasura-Allowed-Ids: {1,2,3} +``` + +And the related permission configuration: + +```yaml +permission: + filter: + user_id: + _in: X-Hasura-Allowed-Ids +``` + ::: ## Permissions with relationships or nested objects {#relationships-in-permissions} diff --git a/docs/docs/databases/athena/getting-started/index.mdx b/docs/docs/databases/athena/getting-started/index.mdx index 11c9cef7fa8..67ea1866dc6 100644 --- a/docs/docs/databases/athena/getting-started/index.mdx +++ b/docs/docs/databases/athena/getting-started/index.mdx @@ -33,3 +33,11 @@ Here are 2 ways you can get started with Hasura: service from Hasura Cloud. 2. [Docker](/databases/athena/getting-started/docker.mdx): Run Hasura with Docker and then connect your Amazon Athena service to Hasura. + +:::info Using Kubernetes? + +We have Helm charts available for deploying Hasura on Kubernetes. Check out +[more information here](/deployment/deployment-guides/kubernetes-helm.mdx) and see the +[`enterprise-stack` here](https://github.com/hasura/helm-charts/tree/main/charts/hasura-enterprise-stack). + +::: diff --git a/docs/docs/databases/bigquery/getting-started/index.mdx b/docs/docs/databases/bigquery/getting-started/index.mdx index 64973f2a8e2..745ca619d80 100644 --- a/docs/docs/databases/bigquery/getting-started/index.mdx +++ b/docs/docs/databases/bigquery/getting-started/index.mdx @@ -16,7 +16,15 @@ To try Hasura with BigQuery, you'll need your own new or existing BigQuery datab Here are two ways you can get started with Hasura: -1. [Hasura Cloud](/databases/bigquery/getting-started/cloud.mdx): Access and manage your BigQuery -database from Hasura Cloud. +1. [Hasura Cloud](/databases/bigquery/getting-started/cloud.mdx): Access and manage your BigQuery database from Hasura + Cloud. 2. [Docker](/databases/bigquery/getting-started/docker.mdx): Run Hasura with Docker and then connect your BigQuery -database to Hasura. + database to Hasura. + +:::info Using Kubernetes? + +We have Helm charts available for deploying Hasura on Kubernetes. Check out +[more information here](/deployment/deployment-guides/kubernetes-helm.mdx) and see the +[`enterprise-stack` here](https://github.com/hasura/helm-charts/tree/main/charts/hasura-enterprise-stack). + +::: diff --git a/docs/docs/databases/clickhouse/getting-started/index.mdx b/docs/docs/databases/clickhouse/getting-started/index.mdx index b1e211f7310..57b6cfce962 100644 --- a/docs/docs/databases/clickhouse/getting-started/index.mdx +++ b/docs/docs/databases/clickhouse/getting-started/index.mdx @@ -18,3 +18,11 @@ Here are 2 ways you can get started with Hasura and ClickHouse: service from Hasura Cloud. 2. [Docker](/databases/clickhouse/getting-started/docker.mdx): Run Hasura with Docker and then connect your ClickHouse service to Hasura. + +:::info Using Kubernetes? + +We have Helm charts available for deploying Hasura on Kubernetes. Check out +[more information here](/deployment/deployment-guides/kubernetes-helm.mdx) and see the +[`enterprise-stack` here](https://github.com/hasura/helm-charts/tree/main/charts/hasura-enterprise-stack). + +::: diff --git a/docs/docs/databases/database-config/index.mdx b/docs/docs/databases/database-config/index.mdx index 634b04501c9..359b6a24e5a 100644 --- a/docs/docs/databases/database-config/index.mdx +++ b/docs/docs/databases/database-config/index.mdx @@ -72,9 +72,6 @@ is recommended to use environment variables for better security _(as connection exposed as part of the Hasura Metadata)_ as well as to allow configuring different databases in different environments _(like staging or production)_ easily. -A database can be connected to using the `HASURA_GRAPHQL_DATABASE_URL` environment variable as well in which case it -gets added automatically as a database named `default`. - ### Allow connections from the Hasura Cloud IP {#cloud-projects-create-allow-nat-ip} When using Hasura Cloud, you may need to adjust your connection settings of your database provider to allow @@ -114,8 +111,6 @@ is recommended to use environment variables for better security _(as connection exposed as part of the Hasura Metadata)_ as well as to allow configuring different databases in different environments _(like staging or production)_ easily. -A database can be connected to using the `HASURA_GRAPHQL_DATABASE_URL` environment variable as well in which case it -gets added automatically as a database named default. @@ -127,8 +122,7 @@ gets added automatically as a database named default. In your `config v3` project, head to the `/metadata/databases/databases.yaml` file and add the database configuration as -below. If you're using the `HASURA_GRAPHQL_DATABASE_URL` environment variable then the database will get automatically -added and named default. +below. ```yaml - name: @@ -198,8 +192,7 @@ Engine instance. When using Hasura Cloud, Metadata is stored for you in separate data storage to your connected database(s). When using Docker, if you want to [store the Hasura Metadata on a separate database](/deployment/graphql-engine-flags/reference.mdx#metadata-database-url), -you can use the `HASURA_GRAPHQL_METADATA_DATABASE_URL` env var to specify which database to use. By default, the -Hasura Metadata is stored on the same database as specified in the `HASURA_GRAPHQL_DATABASE_URL` environment variable. +you can use the `HASURA_GRAPHQL_METADATA_DATABASE_URL` env var to specify which database to use. ## Connect different Hasura instances to the same database diff --git a/docs/docs/databases/mariadb/cloud.mdx b/docs/docs/databases/mariadb/cloud.mdx index 0ac85ebb9aa..d0d20b7ebfc 100644 --- a/docs/docs/databases/mariadb/cloud.mdx +++ b/docs/docs/databases/mariadb/cloud.mdx @@ -25,7 +25,7 @@ the easiest way to set up Hasura Engine and the MariaDB GraphQL Data Connector. :::tip Supported versions: 1. Hasura GraphQL Engine `v2.24.0` onwards -2. Hasura supports most databases with standard implementations of **MariaDB 10.5 and higher** including: Amazon RDS, +2. Hasura supports most databases with standard implementations of **MariaDB 10.6 and higher** including: Amazon RDS, Amazon Aurora, Digital Ocean and SkySQL. ::: diff --git a/docs/docs/databases/mariadb/docker.mdx b/docs/docs/databases/mariadb/docker.mdx index 5d9432ad134..e01884e25ae 100644 --- a/docs/docs/databases/mariadb/docker.mdx +++ b/docs/docs/databases/mariadb/docker.mdx @@ -28,7 +28,7 @@ MariaDB GraphQL Data Connector. :::tip Supported versions: 1. Hasura GraphQL Engine `v2.24.0` onwards -2. Hasura supports most databases with standard implementations of **MariaDB 10.5 and higher** including: Amazon RDS, +2. Hasura supports most databases with standard implementations of **MariaDB 10.6 and higher** including: Amazon RDS, Amazon Aurora, Digital Ocean and SkySQL. ::: diff --git a/docs/docs/databases/mariadb/index.mdx b/docs/docs/databases/mariadb/index.mdx index 855c5975d7a..30edb7e5fe2 100644 --- a/docs/docs/databases/mariadb/index.mdx +++ b/docs/docs/databases/mariadb/index.mdx @@ -28,10 +28,18 @@ To get started with MariaDB: - In Hasura Cloud, check out our [Getting Started with MariaDB in Hasura Cloud](/databases/mariadb/cloud.mdx) guide - In a Docker environment, check out our [Getting Started with Docker](/databases/mariadb/docker.mdx) guide +:::info Using Kubernetes? + +We have Helm charts available for deploying Hasura on Kubernetes. Check out +[more information here](/deployment/deployment-guides/kubernetes-helm.mdx) and see the +[`enterprise-stack` here](https://github.com/hasura/helm-charts/tree/main/charts/hasura-enterprise-stack). + +::: + :::tip Supported versions: 1. Hasura GraphQL Engine `v2.24.0` onwards -2. Hasura supports most databases with standard implementations of **MariaDB 10.5 and higher** including: Amazon RDS, +2. Hasura supports most databases with standard implementations of **MariaDB 10.6 and higher** including: Amazon RDS, Amazon Aurora, Digital Ocean and SkySQL. ::: @@ -216,8 +224,8 @@ in the `API` tab and interact with it using the GraphiQL interface. :::info Console support -We recommend using your preferred MariaDB client instead. The Hasura Console is designed to be a tool for managing -your GraphQL API, and not a full-fledged database management tool. +We recommend using your preferred MariaDB client instead. The Hasura Console is designed to be a tool for managing your +GraphQL API, and not a full-fledged database management tool. ::: diff --git a/docs/docs/databases/ms-sql-server/getting-started/index.mdx b/docs/docs/databases/ms-sql-server/getting-started/index.mdx index 58443dbd3c7..756c5b4cb3e 100644 --- a/docs/docs/databases/ms-sql-server/getting-started/index.mdx +++ b/docs/docs/databases/ms-sql-server/getting-started/index.mdx @@ -15,10 +15,15 @@ To try Hasura with SQL Server, you'll need your own new or existing SQL Server d Here are 2 ways you can get started with Hasura: -1. [Hasura Cloud](/databases/ms-sql-server/getting-started/cloud.mdx) : You'll need to be able to access your SQL Server database from Hasura Cloud. -2. [Docker](/databases/ms-sql-server/getting-started/docker.mdx): Run Hasura with Docker and then connect your SQL Server database to Hasura. +1. [Hasura Cloud](/databases/ms-sql-server/getting-started/cloud.mdx): You'll need to be able to access your SQL Server + database from Hasura Cloud. +2. [Docker](/databases/ms-sql-server/getting-started/docker.mdx): Run Hasura with Docker and then connect your SQL + Server database to Hasura. - \ No newline at end of file +:::info Using Kubernetes? + +We have Helm charts available for deploying Hasura on Kubernetes. Check out +[more information here](/deployment/deployment-guides/kubernetes-helm.mdx) and see the +[`enterprise-stack` here](https://github.com/hasura/helm-charts/tree/main/charts/hasura-enterprise-stack). + +::: diff --git a/docs/docs/databases/mysql/index.mdx b/docs/docs/databases/mysql/index.mdx index 93697c594d7..6363e4c0c14 100644 --- a/docs/docs/databases/mysql/index.mdx +++ b/docs/docs/databases/mysql/index.mdx @@ -30,6 +30,14 @@ To get started with MySQL: - In Hasura Cloud, check out our [Getting Started with MySQL in Hasura Cloud](/databases/mysql/cloud.mdx) guide - In a Docker environment, check out our [Getting Started with Docker](/databases/mysql/docker.mdx) guide +:::info Using Kubernetes? + +We have Helm charts available for deploying Hasura on Kubernetes. Check out +[more information here](/deployment/deployment-guides/kubernetes-helm.mdx) and see the +[`enterprise-stack` here](https://github.com/hasura/helm-charts/tree/main/charts/hasura-enterprise-stack). + +::: + :::tip Supported versions: 1. Hasura GraphQL Engine `v2.24.0` onwards @@ -219,8 +227,8 @@ in the `API` tab and interact with it using the GraphiQL interface. :::info Console support -We recommend using your preferred MySQL client instead. The Hasura Console is designed to be a tool for managing -your GraphQL API, and not a full-fledged database management tool. +We recommend using your preferred MySQL client instead. The Hasura Console is designed to be a tool for managing your +GraphQL API, and not a full-fledged database management tool. ::: diff --git a/docs/docs/databases/oracle/index.mdx b/docs/docs/databases/oracle/index.mdx index ec1f2f99f13..6e91a30f425 100644 --- a/docs/docs/databases/oracle/index.mdx +++ b/docs/docs/databases/oracle/index.mdx @@ -29,6 +29,14 @@ To get started with Oracle: - In Hasura Cloud, check out our [Getting Started with Oracle in Hasura Cloud](/databases/oracle/cloud.mdx) guide - In a Docker environment, check out our [Getting Started with Docker](/databases/oracle/docker.mdx) guide +:::info Using Kubernetes? + +We have Helm charts available for deploying Hasura on Kubernetes. Check out +[more information here](/deployment/deployment-guides/kubernetes-helm.mdx) and see the +[`enterprise-stack` here](https://github.com/hasura/helm-charts/tree/main/charts/hasura-enterprise-stack). + +::: + :::tip Supported versions 1. Hasura GraphQL Engine `v2.24.0` onwards @@ -216,7 +224,7 @@ in the `API` tab and interact with it using the GraphiQL interface. :::info Console support -We recommend using your preferred Oracle client instead. The Hasura Console is designed to be a tool for managing -your GraphQL API, and not a full-fledged database management tool. +We recommend using your preferred Oracle client instead. The Hasura Console is designed to be a tool for managing your +GraphQL API, and not a full-fledged database management tool. -::: \ No newline at end of file +::: diff --git a/docs/docs/databases/quickstart.mdx b/docs/docs/databases/quickstart.mdx index 7c1837b4caa..d64e410fd27 100644 --- a/docs/docs/databases/quickstart.mdx +++ b/docs/docs/databases/quickstart.mdx @@ -85,8 +85,7 @@ required to ensure connectivity to your database from Hasura Cloud if needed. In your `config v3` project, head to the `/metadata/databases/databases.yaml` file and add the database configuration as -below. If you're using the `HASURA_GRAPHQL_DATABASE_URL` environment variable then the database will get automatically -added and named default. +below. ```yaml - name: @@ -254,8 +253,7 @@ X-Hasura-Role: admin When using Hasura Cloud, Metadata is stored for you in separate data storage to your connected database(s). When using Docker, if you want to [store the Hasura Metadata on a separate database](/deployment/graphql-engine-flags/reference.mdx#metadata-database-url), -you can use the `HASURA_GRAPHQL_METADATA_DATABASE_URL` env var to specify which database to use. By default, the Hasura -Metadata is stored on the same database as specified in the `HASURA_GRAPHQL_DATABASE_URL` environment variable. +you can use the `HASURA_GRAPHQL_METADATA_DATABASE_URL` env var to specify which database to use. ## Connect different Hasura instances to the same database diff --git a/docs/docs/databases/redshift/getting-started/index.mdx b/docs/docs/databases/redshift/getting-started/index.mdx index 848cbfbb104..1f0580198a2 100644 --- a/docs/docs/databases/redshift/getting-started/index.mdx +++ b/docs/docs/databases/redshift/getting-started/index.mdx @@ -1,5 +1,6 @@ --- slug: index +keywords: - hasura - docs - databases @@ -27,7 +28,15 @@ To try Hasura with Amazon Redshift, you'll need your own new or existing Amazon Here are 2 ways you can get started with Hasura: -1. [Hasura Cloud](/databases/redshift/getting-started/cloud.mdx) : You'll need to be able to access your Amazon Redshift - service from Hasura Cloud. -2. [Docker](/databases/redshift/getting-started/docker.mdx): Run Hasura with Docker and then connect your Amazon Redshift - service to Hasura. +1. [Hasura Cloud](/databases/redshift/getting-started/cloud.mdx) : You'll need to be able to access your Amazon + Redshift service from Hasura Cloud. +2. [Docker](/databases/redshift/getting-started/docker.mdx): Run Hasura with Docker and then connect your Amazon + Redshift service to Hasura. + +:::info Using Kubernetes? + +We have Helm charts available for deploying Hasura on Kubernetes. Check out +[more information here](/deployment/deployment-guides/kubernetes-helm.mdx) and see the +[`enterprise-stack` here](https://github.com/hasura/helm-charts/tree/main/charts/hasura-enterprise-stack). + +::: diff --git a/docs/docs/databases/snowflake/getting-started/index.mdx b/docs/docs/databases/snowflake/getting-started/index.mdx index 5c7d32edf6c..1950d1b1655 100644 --- a/docs/docs/databases/snowflake/getting-started/index.mdx +++ b/docs/docs/databases/snowflake/getting-started/index.mdx @@ -18,3 +18,11 @@ Here are 2 ways you can get started with Hasura and Snowflake: service from Hasura Cloud. 2. [Docker](/databases/snowflake/getting-started/docker.mdx): Run Hasura with Docker and then connect your Snowflake service to Hasura. + +:::info Using Kubernetes? + +We have Helm charts available for deploying Hasura on Kubernetes. Check out +[more information here](/deployment/deployment-guides/kubernetes-helm.mdx) and see the +[`enterprise-stack` here](https://github.com/hasura/helm-charts/tree/main/charts/hasura-enterprise-stack). + +::: diff --git a/docs/docs/databases/vector-databases/weaviate.mdx b/docs/docs/databases/vector-databases/weaviate.mdx index 22ff0d0c3ca..d1ffeeb38c9 100644 --- a/docs/docs/databases/vector-databases/weaviate.mdx +++ b/docs/docs/databases/vector-databases/weaviate.mdx @@ -103,7 +103,7 @@ At this point, we'll need to configure a few parameters: | Database Name | The name of your Weaviate database. | | `apiKey` | The API key for your Weaviate database. | | `host` | The URL of your Weaviate database. | -| `openAPIKey` | The OpenAI key for use with your Weaviate database. | +| `openAIKey` | The OpenAI key for use with your Weaviate database. | | `scheme` | The URL scheme for your Weaviate database (http/https). | :::info Where can I find these parameters? diff --git a/docs/docs/deployment/deployment-guides/azure-container-instances-postgres.mdx b/docs/docs/deployment/deployment-guides/azure-container-instances-postgres.mdx index 3a4d8f401cc..a033955bd43 100644 --- a/docs/docs/deployment/deployment-guides/azure-container-instances-postgres.mdx +++ b/docs/docs/deployment/deployment-guides/azure-container-instances-postgres.mdx @@ -150,7 +150,7 @@ az container create --resource-group hasura \ --dns-name-label "" \ --ports 80 \ --environment-variables "HASURA_GRAPHQL_SERVER_PORT"="80" "HASURA_GRAPHQL_ENABLE_CONSOLE"="true" "HASURA_GRAPHQL_ADMIN_SECRET"=""\ - --secure-environment-variables "HASURA_GRAPHQL_DATABASE_URL"="" + --secure-environment-variables "HASURA_METADATA_DATABASE_URL"="" "PG_DATABASE_URL"="" ``` `` should be replaced by the following format: @@ -159,7 +159,9 @@ az container create --resource-group hasura \ postgres://hasura%40:@:5432/hasura ``` -If you'd like to connect to an existing database, use that server's database url. +If you'd like to connect to an existing database, use that server's database url. Hasura requires a Postgres database +to store its metadata. You can use the same database for both Hasura and the application data, or you can use a separate +database for Hasura's metadata. :::info Note @@ -196,9 +198,14 @@ az container create --resource-group hasura \ "HASURA_GRAPHQL_ENABLE_CONSOLE"="true" \ "HASURA_GRAPHQL_ADMIN_SECRET"="" \ "HASURA_GRAPHQL_JWT_SECRET"= \ "{\"type\": \"RS512\",\"key\": \"-----BEGIN CERTIFICATE-----\\nMIIDBzCCAe+gAwIBAgIJTpEEoUJ/bOElMA0GCSqGSIb3DQEBCwUAMCExHzAdBgNV\\nBAMTFnRyYWNrLWZyOC51cy5hdXRoMC5jb20wHhcNMjAwNzE3MDYxMjE4WhcNMzQw\\nMzI2MDYxMjE4WjAhMR8wHQYDVQQDExZ0cmFjay1mcjgudXMuYXV0aDAuY29tMIIB\\nIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuK9N9FWK1hEPtwQ8ltYjlcjF\\nX03jhGgUKkLCLxe8q4x84eGJPmeHpyK+iZZ8TWaPpyD3fk+s8BC3Dqa/Sd9QeOBh\\nZH/YnzoB3yKqF/FruFNAY+F3LUt2P2t72tcnuFg4Vr8N9u8f4ESz7OHazn+XJ7u+\\ncuqKulaxMI4mVT/fGinCiT4uGVr0VVaF8KeWsF/EJYeZTiWZyubMwJsaZ2uW2U52\\n+VDE0RE0kz0fzYiCCMfuNNPg5V94lY3ImcmSI1qSjUpJsodqACqk4srmnwMZhICO\\n14F/WUknqmIBgFdHacluC6pqgHdKLMuPnp37bf7ACnQ/L2Pw77ZwrKRymUrzlQID\\nAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSOG3E+4lHiI+l0i91u\\nxG2Rca2NATAOBgNVHQ8BAf8EBAMCAoQwDQYJKoZIhvcNAQELBQADggEBAKgmxr6c\\nYmSNJOTPtjMFFDZHHX/7iwr+vqzC3nalr6ku8E3Zs0/IpwAtzqXp0eVVdPCWUY3A\\nQCUTt63GrqshBHYAxTbT0rlXFkqL8UkJvdZQ3XoQuNsqcp22zlQWGHxsk3YP97rn\\nltPI56smyHqPj+SBqyN/Vs7Vga9G8fHCfltJOdeisbmVHaC9WquZ9S5eyT7JzPAC\\n5dI5ZUunm0cgKFVbLfPr7ykClTPy36WdHS1VWhiCyS+rKeN7KYUvoaQN2U3hXesL\\nr2M+8qaPOSQdcNmg1eMNgxZ9Dh7SXtLQB2DAOuHe/BesJj8eRyENJCSdZsUOgeZl\\nMinkSy2d927Vts8=\\n-----END CERTIFICATE-----\"}" - --secure-environment-variables "HASURA_GRAPHQL_DATABASE_URL"="" + --secure-environment-variables "HASURA_METADATA_DATABASE_URL"="" "PG_DATABASE_URL"="" ``` +Above, we're using the `--secure-environment-variables` flag to pass two environment variables that contain sensitive +information. The `--secure-environment-variables` flag ensures that the values of these variables are encrypted at rest +and in transit. Hasura uses the `HASURA_METADATA_DATABASE_URL` variable to store its metadata and the `PG_DATABASE_URL` +variable to connect to the database. These can be the same database or different databases. + :::info Note Check out the [Running with JWT](/auth/authentication/jwt.mdx#running-with-jwt) section for the usage of diff --git a/docs/docs/deployment/deployment-guides/digital-ocean-one-click.mdx b/docs/docs/deployment/deployment-guides/digital-ocean-one-click.mdx index a3bc2d1600c..ddf0c30e270 100644 --- a/docs/docs/deployment/deployment-guides/digital-ocean-one-click.mdx +++ b/docs/docs/deployment/deployment-guides/digital-ocean-one-click.mdx @@ -284,8 +284,10 @@ cd /etc/hasura vim docker-compose.yaml ... -# change the url to use a different database -HASURA_GRAPHQL_DATABASE_URL: +# change the url to use a different database for your metadata +HASURA_METADATA_DATABASE_URL: +# and here for your data using the same or different database as above +PG_DATABASE_URL: ... # type ESC followed by :wq to save and quit diff --git a/docs/docs/deployment/deployment-guides/flightcontrol.mdx b/docs/docs/deployment/deployment-guides/flightcontrol.mdx index b41c70fdaf8..ea8c5b45cb5 100644 --- a/docs/docs/deployment/deployment-guides/flightcontrol.mdx +++ b/docs/docs/deployment/deployment-guides/flightcontrol.mdx @@ -56,7 +56,7 @@ Completed, it will look like this: ### Step 3: Configure your database In the database section, Set the `Env Variable Name for Connection String` in Database settings to be -`HASURA_GRAPHQL_DATABASE_URL` and choose a region: +`HASURA_METADATA_DATABASE_URL` and choose a region: diff --git a/docs/docs/deployment/deployment-guides/google-cloud-run-cloud-sql.mdx b/docs/docs/deployment/deployment-guides/google-cloud-run-cloud-sql.mdx new file mode 100644 index 00000000000..523faf1d95e --- /dev/null +++ b/docs/docs/deployment/deployment-guides/google-cloud-run-cloud-sql.mdx @@ -0,0 +1,286 @@ +--- +description: Step-by-step guide to deploy Hasura GraphQL Engine on Google Cloud Run with Cloud SQL for Postgres +title: 'Deploy Hasura GraphQL Engine on Google Cloud Run' +keywords: + - hasura + - google cloud run + - cloud sql + - deployment + - graphql +sidebar_position: 13 +sidebar_label: Using Google Cloud Run & Cloud SQL +--- + +# Deploying Hasura GraphQL Engine on Cloud Run + +To deploy Hasura GraphQL Engine on Google Cloud Run with a Cloud SQL (Postgres) instance and ensure secure communication +via private IP, follow this detailed guide. + +:::info Prerequisites + +This guide assumes you have a [Google Cloud](https://cloud.google.com/?hl=en) account and `gcloud` [installed](https://cloud.google.com/sdk/docs/install). Additionally, you should be working within a Google Cloud Project, whether it's one you've newly created or an existing project you have access to. +::: + + +## Step 1: Setup Your Environment + +1. **Authenticate with Google Cloud:** + +```bash +gcloud auth login +``` + +2. **Set your project ID:** + +Replace `` with your actual Google Cloud project ID. + +```bash +gcloud config set project +``` + +## Step 2: Enable Required Google Cloud Services + +Enable Cloud Run, Cloud SQL, Cloud SQL Admin, Secret Manager, and the Service Networking APIs: + + +```bash +gcloud services enable run.googleapis.com sqladmin.googleapis.com servicenetworking.googleapis.com secretmanager.googleapis.com +``` + +:::caution Requires IAM permissions + +To execute the above command, your Google Cloud account needs to have the Service Usage Admin role (roles/serviceusage.serviceUsageAdmin) or an equivalent custom role with permissions to enable services. This role allows you to view, enable, and disable services in your GCP project. + +If you encounter permissions errors, contact your GCP administrator to ensure your account has the appropriate roles assigned, or to request the services be enabled on the project you are working with. + +::: + +## Step 3: Create a Cloud SQL (Postgres) Instance + +1. **Create the database instance:** + +```bash +gcloud sql instances create hasura-postgres --database-version=POSTGRES_15 --cpu=2 --memory=7680MiB --region=us-central1 +``` + +2. **Set the password** for the default postgres user: + +Replace `` with your desired password. + +```bash +gcloud sql users set-password postgres --instance=hasura-postgres --password= +``` + +3. **Create a database** + +Replace `` with your database name: + +```bash +gcloud sql databases create --instance=hasura-postgres +``` + +:::info Don't have a `default` network? + +The `default` network is normally created inside a Google Cloud Platform Project, however in some cases the `default` network might have been deleted or the project may have been set up with a specific network configuration without a default network. + +To see the networks you have available you can run: + +```bash +gcloud compute networks list +``` + +If you find you do not have an appropriate network for your deployment, you can create a new VPC network by running the following command to create a network named `default`: + +```bash +gcloud compute networks create default --subnet-mode=auto +``` + +::: + + +## Step 4: Configure Service Networking for Private Connectivity + +1. **Allocate an IP range** for Google services in your VPC: + +```bash +gcloud compute addresses create google-managed-services-default \ + --global \ + --purpose=VPC_PEERING \ + --prefix-length=24 \ + --network=default +``` + +2. **Connect your VPC to the Service Networking API:** + +Replace `` with your actual Google Cloud project ID. + +```bash +gcloud services vpc-peerings connect \ + --service=servicenetworking.googleapis.com \ + --ranges=google-managed-services-default \ + --network=default \ + --project= +``` + +3. **Enable a private IP** for your CloudSQL instance: + +```bash +gcloud sql instances patch hasura-postgres --network=default +``` + +## Step 5: Create your connection string + +1. **Find your Cloud SQL instance's connection name:** + +```bash +gcloud sql instances describe hasura-postgres +``` + +:::info Note + +Take note of the `connectionName` field in the output of the above `describe` command. You will use the `connectionName` to deploy the GraphQL Engine to Cloud Run. + +::: + +2. **Construct your connection string** + +You can create the connection string by filling in the following template string. Replace ``, ``, and `` with your actual connectionName, database password, and +database name. + +``` +postgres://postgres:@/?host=/cloudsql/ +``` + +## Step 6: Store your connection string in the Secret Manager + +While you can put the connection string directly into the environment variables, it is recommended that you store it and any secrets or credentials inside of [Google's Secret Manager](https://cloud.google.com/security/products/secret-manager) for maximum security. This prevents secrets from being visible to administrators and from being accessible in other parts of the control/operations plane. + +1. **Store the constructed connection string as a secret** replacing `` with your actual connection string. + +```bash +echo -n "" | gcloud secrets create hasura-db-connection-string --data-file=- +``` + +:::info Not using the `default` service account? + +The following steps assume that you are running the `gcloud deploy` command via the default service account used by compute engine. If you are not using the default service account, you will need to grant the service account you are using the `roles/secretmanager.secretAccessor` role. + +::: + + +2. **To get the `` associated with the default service account:** + +```bash +echo "$(gcloud projects describe $(gcloud config get-value project) --format='value(projectNumber)')" +``` + +3. **Run the following command to grant the default service acount access to the secrets**, replacing `` with your project number from the previous command: + + ```bash + gcloud projects add-iam-policy-binding \ + --member='serviceAccount:-compute@developer.gserviceaccount.com' \ + --role='roles/secretmanager.secretAccessor' + ``` + +## Step 7: Deploy Hasura to Cloud Run: + +1. **Run the following command** and replace ``, with your actual connectionName. + +For additional information on configuring the Hasura GraphQL engine, please see the [Server configuration reference](https://hasura.io/docs/latest/deployment/graphql-engine-flags/reference/). + +```bash +gcloud run deploy hasura-graphql-engine \ + --image=hasura/graphql-engine:latest \ + --add-cloudsql-instances= \ + --update-env-vars='HASURA_GRAPHQL_ENABLE_CONSOLE=true' \ + --update-secrets=HASURA_GRAPHQL_DATABASE_URL=hasura-db-connection-string:latest \ + --region=us-central1 \ + --cpu=1 \ + --min-instances=1 \ + --memory=2048Mi \ + --port=8080 \ + --allow-unauthenticated +``` + + +## Step 8: Adding a VPC-Connector (Optional) + +To further enhance the connectivity and security of your Hasura GraphQL Engine deployment on Google Cloud Run, +especially when connecting to other services within your Virtual Private Cloud (VPC), you might consider adding a +Serverless VPC Access connector. This optional step is particularly useful when your architecture requires direct access +from your serverless Cloud Run service to resources within your VPC, such as VMs, other databases, or private services +that are not exposed to the public internet. For more information, please see [Google's official documentation for Serverless VPC Access](https://cloud.google.com/vpc/docs/serverless-vpc-access). + +1. **Enable the Serverless VPC Access API** + +First ensure that the Serverless VPC Access API is enabled: + +```bash +gcloud services enable vpcaccess.googleapis.com +``` + +2. **Create a Serverless VPC Access Connector** + +Choose an IP range that does not overlap with existing ranges in your VPC. This range will be used by the connector to +route traffic from your serverless application to your VPC. **It's important to ensure that the IP range does not overlap with other subnets to avoid routing conflicts.** + +```bash +gcloud compute networks vpc-access connectors create hasura-connector \ + --region=us-central1 \ + --network=default \ + --range=10.8.0.0/28 +``` + +3. **Update the Cloud Run Deployment to use the VPC Connector** + +When deploying or updating your Hasura GraphQL Engine service, specify the VPC connector with the `--vpc-connector` +flag: + +```bash +gcloud run deploy hasura-graphql-engine \ + --image=hasura/graphql-engine:latest \ + --add-cloudsql-instances= \ + --update-env-vars='HASURA_GRAPHQL_ENABLE_CONSOLE=true' \ + --update-secrets=HASURA_GRAPHQL_DATABASE_URL=hasura-db-connection-string:latest \ + --vpc-connector=hasura-connector \ + --region=us-central1 \ + --cpu=1 \ + --min-instances=1 \ + --memory=2048Mi \ + --port=8080 \ + --allow-unauthenticated +``` + +### When and Why to Use a VPC Connector + +* **Enhanced Security:** Utilize a VPC Connector when you need to ensure that traffic between your Cloud Run service and + internal Google Cloud resources does not traverse the public internet, enhancing security. +* **Access to Internal Resources:** Use it when your serverless application needs access to resources within your VPC, + such + as internal APIs, databases, or services that are not publicly accessible. +* **Compliance Requirements:** If your application is subject to compliance requirements that mandate data and network + traffic must remain within a private network, a VPC connector facilitates this by providing private access to your + cloud resources. +* **Network Peering:** It's beneficial when accessing services in a peered VPC, allowing your Cloud Run services to + communicate with resources across VPC networks. + +Adding a VPC Connector to your Cloud Run deployment ensures that your Hasura GraphQL Engine can securely and privately +access the necessary Google Cloud resources within your VPC, providing a robust and secure environment for your +applications. + +## Tearing Down + +To avoid incurring charges, delete the resources once you're done: + +```bash +gcloud sql instances delete hasura-postgres +gcloud run services delete hasura-graphql-engine +gcloud compute addresses delete google-managed-services-default --global +gcloud secrets delete hasura-db-connection-string +``` + +If you performed the optional Step 8, you should also delete the VPC-connector resource: + +```bash +gcloud compute networks vpc-access connectors delete hasura-connector --region=us-central1 +``` \ No newline at end of file diff --git a/docs/docs/deployment/deployment-guides/index.mdx b/docs/docs/deployment/deployment-guides/index.mdx index 89f88dc4d2d..a6def868ccc 100644 --- a/docs/docs/deployment/deployment-guides/index.mdx +++ b/docs/docs/deployment/deployment-guides/index.mdx @@ -43,3 +43,4 @@ Choose from the full list of deployment guides: - [Deploy using Nhost One-click Deploy with Managed PostgreSQL, Storage, and Auth](/deployment/deployment-guides/nhost-one-click.mdx) - [Deploy using Koyeb Serverless Platform](/deployment/deployment-guides/koyeb.mdx) - [Deploy using Flightcontrol on AWS Fargate](/deployment/deployment-guides/flightcontrol.mdx) +- [Deploy using Google Cloud Run with Cloud SQL](/deployment/deployment-guides/google-cloud-run-cloud-sql.mdx) diff --git a/docs/docs/deployment/deployment-guides/koyeb.mdx b/docs/docs/deployment/deployment-guides/koyeb.mdx index 259b85140a0..0ac96a66395 100644 --- a/docs/docs/deployment/deployment-guides/koyeb.mdx +++ b/docs/docs/deployment/deployment-guides/koyeb.mdx @@ -32,7 +32,7 @@ To deploy Hasura to Koyeb quickly, click the button below: [![Deploy to Koyeb](https://www.koyeb.com/static/images/deploy/button.svg)](https://app.koyeb.com/deploy?name=hasura-demo&type=docker&image=hasura/graphql-engine&env[HASURA_GRAPHQL_DATABASE_URL]=CHANGE_ME&env[HASURA_GRAPHQL_ENABLE_CONSOLE]=true&env[HASURA_GRAPHQL_ADMIN_SECRET]=CHANGE_ME&ports=8080;http;/) -On the configuration screen, set the `HASURA_GRAPHQL_DATABASE_URL` environment variable to the connection string for your database and the `HASURA_GRAPHQL_ADMIN_SECRET` environment variable to a secret value to access the Hasura Console. +On the configuration screen, set the `HASURA_METADATA_DATABASE_URL` (depicted as `HASURA_GRAPHQL_ENGINE_DATABASE_URL` in this screenshot) environment variable to the connection string for your database and the `HASURA_GRAPHQL_ADMIN_SECRET` environment variable to a secret value to access the Hasura Console. Click the **Deploy** button when you are finished. When the deployment completes, you can [access the Hasura Console](#access-the-hasura-console). @@ -52,9 +52,10 @@ On the [Koyeb control panel](https://app.koyeb.com/), click the **Create App** b 4. In the **Environment variables** section, configure the environment variables required to properly run the Hasura GraphQL Engine: - - `HASURA_GRAPHQL_DATABASE_URL`: The environment variable containing the PostgreSQL URL, i.e. `postgres://:@:/`. Since this value contains sensitive information, select the "Secret" type. Secrets are encrypted at rest and are ideal for storing sensitive data like API keys, OAuth tokens, etc. Choose "Create secret" in the "Value" drop-down menu and enter the secret value in the "Create secret" form. + - `HASURA_METADATA_DATABASE_URL`: Hasura requires a PostgreSQL database to store its metadata. This can be the same database as `PG_DATABASE_URL` or a different one. We strongly recommend using a secret to store this value. + - `PG_DATABASE_URL`: The environment variable containing the PostgreSQL URL, i.e. `postgres://:@:/`. Since this value contains sensitive information, select the "Secret" type. Secrets are encrypted at rest and are ideal for storing sensitive data like API keys, OAuth tokens, etc. Choose "Create secret" in the "Value" drop-down menu and enter the secret value in the "Create secret" form. - `HASURA_GRAPHQL_ENABLE_CONSOLE`: Set to `true`. This will expose and allow you to access the Hasura Console. - - `HASURA_GRAPHQL_ADMIN_SECRET`: The secret to access the Hasura Console. As with the `HASURA_GRAPHQL_DATABASE_URL`, we strongly recommend using a secret to store this value. + - `HASURA_GRAPHQL_ADMIN_SECRET`: The secret to access the Hasura Console. As with the other environment variables, we strongly recommend using a secret to store this value. 5. In the **Exposing your service** section, change the `Port` from `80` to `8080` to match the port that the `hasura/graphql-engine` Docker image app listens on. Koyeb uses this setting to perform application health checks and to properly route incoming HTTP requests. If you want the Hasura GraphQL Engine to be available on a specific path, you can change the default one (`/`) to the path of your choice. diff --git a/docs/docs/deployment/deployment-guides/kubernetes.mdx b/docs/docs/deployment/deployment-guides/kubernetes.mdx index 6fd06cb6f42..e6e8fd56005 100644 --- a/docs/docs/deployment/deployment-guides/kubernetes.mdx +++ b/docs/docs/deployment/deployment-guides/kubernetes.mdx @@ -37,11 +37,11 @@ Edit `deployment.yaml` and set the right database url: ```yaml {2} env: - - name: HASURA_GRAPHQL_DATABASE_URL + - name: HASURA_METADATA_DATABASE_URL value: postgres://:@hostname:/ ``` -Examples of `HASURA_GRAPHQL_DATABASE_URL`: +Examples of `HASURA_METADATA_DATABASE_URL`: - `postgres://admin:password@localhost:5432/my-db` - `postgres://admin:@localhost:5432/my-db` _(if there is no password)_ @@ -49,7 +49,7 @@ Examples of `HASURA_GRAPHQL_DATABASE_URL`: :::info Note - If your **password contains special characters** (e.g. #, %, $, @, etc.), you need to URL encode them in the - `HASURA_GRAPHQL_DATABASE_URL` env var (e.g. %40 for @). + `HASURA_METADATA_DATABASE_URL` env var (e.g. %40 for @). You can check the [logs](#kubernetes-logs) to see if the database credentials are proper and if Hasura is able to connect to the database. @@ -104,7 +104,7 @@ spec: command: ["graphql-engine"] args: ["serve", "--enable-console"] env: - - name: HASURA_GRAPHQL_DATABASE_URL + - name: HASURA_METADATA_DATABASE_URL value: postgres://:@hostname:/ - name: HASURA_GRAPHQL_ADMIN_SECRET value: mysecretkey diff --git a/docs/docs/deployment/downgrading.mdx b/docs/docs/deployment/downgrading.mdx index c11c3a8ba04..635bac71d89 100644 --- a/docs/docs/deployment/downgrading.mdx +++ b/docs/docs/deployment/downgrading.mdx @@ -49,7 +49,7 @@ command on `graphql-engine` itself. The way to execute this command is to run: ```bash -docker run -e HASURA_GRAPHQL_DATABASE_URL=$DATABASE_URL hasura/graphql-engine: graphql-engine downgrade --to- +docker run -e HASURA_METADATA_DATABASE_URL=$DATABASE_URL hasura/graphql-engine: graphql-engine downgrade --to- ``` You need to use a newer version of `graphql-engine` to downgrade to an diff --git a/docs/docs/deployment/graphql-engine-flags/config-examples.mdx b/docs/docs/deployment/graphql-engine-flags/config-examples.mdx index e8046f5d2cf..badd39291c7 100644 --- a/docs/docs/deployment/graphql-engine-flags/config-examples.mdx +++ b/docs/docs/deployment/graphql-engine-flags/config-examples.mdx @@ -259,7 +259,7 @@ provided to the server** ```bash # env var HASURA_GRAPHQL_METADATA_DATABASE_URL=postgres://:@:/ -HASURA_GRAPHQL_DATABASE_URL=postgres://:@:/ +PG_DATABASE_URL=postgres://:@:/ # flag --metadata-database-url=postgres://:@:/ @@ -269,7 +269,7 @@ HASURA_GRAPHQL_DATABASE_URL=postgres://:@:/ In this case, Hasura GraphQL Engine will use the `HASURA_GRAPHQL_METADATA_DATABASE_URL` to store the `metadata catalogue` and starts the server with the database provided in the -`HASURA_GRAPHQL_DATABASE_URL`. +`PG_DATABASE_URL`. **2. Only** `metadata database` **is provided to the server** @@ -285,27 +285,4 @@ In this case, Hasura GraphQL Engine will use the `HASURA_GRAPHQL_METADATA_DATABASE_URL` to store the `metadata catalogue` and starts the server without tracking/managing any database. _i.e_ a Hasura GraphQL server will be started with no database. The user could -then manually track/manage databases at a later time. - -**3. Only** `primary database` **is provided to the server** - -```bash -# env var -HASURA_GRAPHQL_DATABASE_URL=postgres://:@:/ - -# flag ---database-url=postgres://:@:/ -``` - -In this case, Hasura GraphQL Engine server will start with the database -provided in the `HASURA_GRAPHQL_DATABASE_URL` and will also use the -_same database_ to store the `metadata catalogue`. - -**4. Neither** `primary database` **nor** `metadata database` **is -provided to the server** - -Hasura GraphQL Engine will fail to startup and will throw an error - -```bash -Fatal Error: Either of --metadata-database-url or --database-url option expected -``` +then manually track/manage databases at a later time. \ No newline at end of file diff --git a/docs/docs/deployment/graphql-engine-flags/reference.mdx b/docs/docs/deployment/graphql-engine-flags/reference.mdx index f313895582f..544e2fa44cb 100644 --- a/docs/docs/deployment/graphql-engine-flags/reference.mdx +++ b/docs/docs/deployment/graphql-engine-flags/reference.mdx @@ -44,15 +44,16 @@ the list of connected data sources. :::info Note -This config option is supported to maintain backwards compatibility with `v1.x` Hasura instances. In versions `v2.0` and -above, databases can be connected using any custom environment variables of your choice. +This config option is supported to maintain backwards compatibility with `v1.x` Hasura instances. **In versions `v2.0` +and above, databases can be connected using any custom environment variables of your choice. Our `docker-compose.yaml` +files in the install manifests reference `PG_DATABASE_URL` as the environment variable to use for connecting to a +database, but this can be any plaintext value which does not start with `HASURA_`.** ::: ### Metadata Database URL -This Postgres database URL is used to store Hasura's Metadata. By default, the database configured using -`HASURA_GRAPHQL_DATABASE_URL` / `--database_url` will be used to store the Metadata. This can also be a URI of the form +This Postgres database URL is used to store Hasura's Metadata. This can also be a URI of the form `dynamic-from-file:///path/to/file`, where the referenced file contains a postgres connection string, which will be read dynamically every time a new connection is established. This allows the server to be used in an environment where secrets are rotated frequently. @@ -68,7 +69,7 @@ secrets are rotated frequently. :::info Note -Either one of the Metadata Database URL or the Database URL needs to be provided for Hasura to start. +THe metadata database URL needs to be set for Hasura to start. ::: @@ -386,17 +387,31 @@ subgraph in an Apollo supergraph. | **Default** | `false` | | **Supported in** | CE, Enterprise Edition, Cloud | -### Header Size Limit -Sets the maximum cumulative length of all headers in bytes. +### Enable Automated Persisted Queries + +Enables the [Automated Persisted Queries](https://www.apollographql.com/docs/apollo-server/performance/apq/) feature. + +| | | +| ------------------- | ------------------------------------------------ | +| **Flag** | `--enable-persisted-queries` | +| **Env var** | `HASURA_GRAPHQL_ENABLE_PERSISTED_QUERIES` | +| **Accepted values** | Boolean | +| **Default** | `false` | +| **Supported in** | Enterprise Edition | + +### Set Automated Persisted Queries TTL + +Sets the query TTL in the cache store for Automated Persisted Queries. + +| | | +| ------------------- | ------------------------------------------------ | +| **Flag** | `--persisted-queries-ttl` | +| **Env var** | `HASURA_GRAPHQL_PERSISTED_QUERIES_TTL` | +| **Accepted values** | Integer | +| **Default** | `5` (seconds) | +| **Supported in** | Enterprise Edition | -| | | -| ------------------- | ---------------------------------------- | -| **Flag** | `--max-total-header-length` | -| **Env var** | `HASURA_GRAPHQL_MAX_TOTAL_HEADER_LENGTH` | -| **Accepted values** | Integer | -| **Default** | `1024*1024` (1MB) | -| **Supported in** | CE, Enterprise Edition | ### Enable Error Log Level for Trigger Errors @@ -410,6 +425,7 @@ Sets the log-level as `error` for Trigger type error logs (Event Triggers, Sched | **Default** | `false` | | **Supported in** | CE, Enterprise Edition | + ### Enable Console Enable the Hasura Console (served by the server on `/` and `/console`). @@ -423,6 +439,19 @@ Enable the Hasura Console (served by the server on `/` and `/console`). | **Default** | **CE**, **Enterprise Edition**: `false`
**Cloud**: Console is always enabled | | **Supported in** | CE, Enterprise Edition | +### Header Size Limit + +Sets the maximum cumulative length of all headers in bytes. + +| | | +| ------------------- | ---------------------------------------- | +| **Flag** | `--max-total-header-length` | +| **Env var** | `HASURA_GRAPHQL_MAX_TOTAL_HEADER_LENGTH` | +| **Accepted values** | Integer | +| **Default** | `1024*1024` (1MB) | +| **Supported in** | CE, Enterprise Edition | + + ### Enable High-cardinality Labels for Metrics Enable high-cardinality labels for [Prometheus Metrics](/observability/enterprise-edition/prometheus/metrics.mdx). @@ -528,7 +557,7 @@ log types — can be found [here](/deployment/logging.mdx#log-types). | **Env var** | `HASURA_GRAPHQL_ENABLED_LOG_TYPES` | | **Accepted values** | String (Comma-separated) | | **Options** | `startup`, `http-log`, `webhook-log`, `websocket-log`, `query-log`, `execution-log`, `livequery-poller-log`, `action-handler-log`, `data-connector-log`, `jwk-refresh-log`, `validate-input-log` | -| **Default** | `startup, http-log, webhook-log, websocket-log`, `jwk-refresh` | +| **Default** | `startup, http-log, webhook-log, websocket-log`, `jwk-refresh-log` | | **Supported in** | CE, Enterprise Edition | ### Events HTTP Pool Size diff --git a/docs/docs/enterprise/getting-started/quickstart-google-cloud-run.mdx b/docs/docs/enterprise/getting-started/quickstart-google-cloud-run.mdx index 23e7ceec3bf..eea0b4b0c0f 100644 --- a/docs/docs/enterprise/getting-started/quickstart-google-cloud-run.mdx +++ b/docs/docs/enterprise/getting-started/quickstart-google-cloud-run.mdx @@ -131,7 +131,7 @@ gcloud run deploy hasura \ --env-vars-file=env.yaml \ --vpc-connector= \ --allow-unauthenticated \ - --max-instances=1 \ + --min-instances=1 \ --cpu=1 \ --memory=2048Mi \ --port=8080 diff --git a/docs/docs/enterprise/sso/adfs.mdx b/docs/docs/enterprise/sso/adfs.mdx index c3b55e84072..201609d7e15 100644 --- a/docs/docs/enterprise/sso/adfs.mdx +++ b/docs/docs/enterprise/sso/adfs.mdx @@ -306,7 +306,8 @@ services: environment: HASURA_GRAPHQL_EE_LICENSE_KEY: HASURA_GRAPHQL_ADMIN_SECRET: - HASURA_GRAPHQL_DATABASE_URL: postgres://postgres:postgrespassword@postgres:5432/postgres?sslmode=disable + HASURA_METADATA_DATABASE_URL: postgres://postgres:postgrespassword@postgres:5432/postgres?sslmode=disable + PG_DATABASE_URL: postgres://postgres:postgrespassword@postgres:5432/postgres?sslmode=disable HASURA_GRAPHQL_ENABLE_CONSOLE: 'true' HASURA_GRAPHQL_DEV_MODE: 'true' HASURA_GRAPHQL_ENABLED_LOG_TYPES: startup,http-log,webhook-log,websocket-log,query-log diff --git a/docs/docs/enterprise/sso/auth0.mdx b/docs/docs/enterprise/sso/auth0.mdx index b077ddd8c5d..235823c240f 100644 --- a/docs/docs/enterprise/sso/auth0.mdx +++ b/docs/docs/enterprise/sso/auth0.mdx @@ -427,7 +427,8 @@ services: environment: HASURA_GRAPHQL_EE_LICENSE_KEY: HASURA_GRAPHQL_ADMIN_SECRET: - HASURA_GRAPHQL_DATABASE_URL: postgres://postgres:postgrespassword@postgres:5432/postgres?sslmode=disable + HASURA_METADATA_DATABASE_URL: postgres://postgres:postgrespassword@postgres:5432/postgres?sslmode=disable + PG_DATABASE_URL: postgres://postgres:postgrespassword@postgres:5432/postgres?sslmode=disable HASURA_GRAPHQL_ENABLE_CONSOLE: 'true' HASURA_GRAPHQL_DEV_MODE: 'true' HASURA_GRAPHQL_ENABLED_LOG_TYPES: startup,http-log,webhook-log,websocket-log,query-log diff --git a/docs/docs/enterprise/sso/google-workspace.mdx b/docs/docs/enterprise/sso/google-workspace.mdx index 83726a50ec3..b8e26cc0051 100644 --- a/docs/docs/enterprise/sso/google-workspace.mdx +++ b/docs/docs/enterprise/sso/google-workspace.mdx @@ -281,7 +281,8 @@ services: environment: HASURA_GRAPHQL_EE_LICENSE_KEY: HASURA_GRAPHQL_ADMIN_SECRET: - HASURA_GRAPHQL_DATABASE_URL: postgres://postgres:postgrespassword@postgres:5432/postgres?sslmode=disable + HASURA_METADATA_DATABASE_URL: postgres://postgres:postgrespassword@postgres:5432/postgres?sslmode=disable + PG_DATABASE_URL: postgres://postgres:postgrespassword@postgres:5432/postgres?sslmode=disable HASURA_GRAPHQL_ENABLE_CONSOLE: 'true' HASURA_GRAPHQL_DEV_MODE: 'true' HASURA_GRAPHQL_ENABLED_LOG_TYPES: startup,http-log,webhook-log,websocket-log,query-log diff --git a/docs/docs/enterprise/sso/ldap.mdx b/docs/docs/enterprise/sso/ldap.mdx index 085a51800a4..b9bad39039b 100644 --- a/docs/docs/enterprise/sso/ldap.mdx +++ b/docs/docs/enterprise/sso/ldap.mdx @@ -403,7 +403,8 @@ services: environment: HASURA_GRAPHQL_EE_LICENSE_KEY: HASURA_GRAPHQL_ADMIN_SECRET: - HASURA_GRAPHQL_DATABASE_URL: postgres://postgres:postgrespassword@postgres:5432/postgres?sslmode=disable + HASURA_METADATA_DATABASE_URL: postgres://postgres:postgrespassword@postgres:5432/postgres?sslmode=disable + PG_DATABASE_URL: postgres://postgres:postgrespassword@postgres:5432/postgres?sslmode=disable HASURA_GRAPHQL_ENABLE_CONSOLE: 'true' HASURA_GRAPHQL_DEV_MODE: 'true' HASURA_GRAPHQL_ENABLED_LOG_TYPES: startup,http-log,webhook-log,websocket-log,query-log diff --git a/docs/docs/event-triggers/observability-and-performance.mdx b/docs/docs/event-triggers/observability-and-performance.mdx index 643b8491fa4..daab3a04a73 100644 --- a/docs/docs/event-triggers/observability-and-performance.mdx +++ b/docs/docs/event-triggers/observability-and-performance.mdx @@ -50,7 +50,7 @@ After receiving response from the webhook, the event's state is updated in the H ## Observability - + Hasura EE exposes a set of [Prometheus metrics](/observability/enterprise-edition/prometheus/metrics.mdx/#hasura-event-triggers-metrics) that can be used to monitor the Event Trigger system and help diagnose performance issues. diff --git a/docs/docs/hasura-cli/install-hasura-cli.mdx b/docs/docs/hasura-cli/install-hasura-cli.mdx index b42ddef3fe8..1af55b1a69e 100644 --- a/docs/docs/hasura-cli/install-hasura-cli.mdx +++ b/docs/docs/hasura-cli/install-hasura-cli.mdx @@ -46,7 +46,7 @@ curl -L https://github.com/hasura/graphql-engine/raw/stable/cli/get.sh | INSTALL You can also install a specific version of the CLI by providing the `VERSION` variable: ```bash -curl -L https://github.com/hasura/graphql-engine/raw/stable/cli/get.sh | VERSION=v2.37.0 bash +curl -L https://github.com/hasura/graphql-engine/raw/stable/cli/get.sh | VERSION=v2.38.0 bash ```
@@ -71,7 +71,7 @@ curl -L https://github.com/hasura/graphql-engine/raw/stable/cli/get.sh | INSTALL You can also install a specific version of the CLI by providing the `VERSION` variable: ```bash -curl -L https://github.com/hasura/graphql-engine/raw/stable/cli/get.sh | VERSION=v2.37.0 bash +curl -L https://github.com/hasura/graphql-engine/raw/stable/cli/get.sh | VERSION=v2.38.0 bash ```
diff --git a/docs/docs/hasura-cli/quickstart.mdx b/docs/docs/hasura-cli/quickstart.mdx index c9fe82085a8..e03e67c1e0c 100644 --- a/docs/docs/hasura-cli/quickstart.mdx +++ b/docs/docs/hasura-cli/quickstart.mdx @@ -117,8 +117,7 @@ PG_DATABASE_URL: postgres://postgres:postgres@postgres:5432/postgres ``` We'll enter the name `default` for the ` Database Display Name` field. This name is used to identify the data source in -Hasura's Metadata and is not your database's name. Should you choose to use the `HASURA_GRAPHQL_DATABASE_URL` -environment variable instead, `default` is the default name assigned to your data source by Hasura. +Hasura's Metadata and is not your database's name. Next, we'll choose `Environment Variable` from the `Connect Database Via` options; enter `PG_DATABASE_URL` as the name: diff --git a/docs/docs/migrations-metadata-seeds/legacy-configs/config-v2/advanced/auto-apply-migrations.mdx b/docs/docs/migrations-metadata-seeds/legacy-configs/config-v2/advanced/auto-apply-migrations.mdx index 9b91c171096..31fdc8d79a4 100644 --- a/docs/docs/migrations-metadata-seeds/legacy-configs/config-v2/advanced/auto-apply-migrations.mdx +++ b/docs/docs/migrations-metadata-seeds/legacy-configs/config-v2/advanced/auto-apply-migrations.mdx @@ -58,7 +58,7 @@ Example: docker run -p 8080:8080 \ -v /home/me/my-project/migrations:/hasura-migrations \ -v /home/me/my-project/metadata:/hasura-metadata \ - -e HASURA_GRAPHQL_DATABASE_URL=postgres://postgres:@postgres:5432/postgres \ + -e HASURA_METADATA_DATABASE_URL=postgres://postgres:@postgres:5432/postgres \ hasura/graphql-engine:v1.2.0.cli-migrations-v2 ``` diff --git a/docs/docs/migrations-metadata-seeds/legacy-configs/upgrade-v3.mdx b/docs/docs/migrations-metadata-seeds/legacy-configs/upgrade-v3.mdx index c10edeaf49f..bd0916be543 100644 --- a/docs/docs/migrations-metadata-seeds/legacy-configs/upgrade-v3.mdx +++ b/docs/docs/migrations-metadata-seeds/legacy-configs/upgrade-v3.mdx @@ -241,9 +241,7 @@ hasura scripts update-project-v3 Your project directory and `config.yaml` should be updated to v3. The update script will ask for the name of database the current -Migrations and seeds correspond to. If you are starting Hasura with a -`HASURA_GRAPHQL_DATABASE_URL` then the name of the database should be -`default`. +Migrations and seeds correspond to. ## Continue using config v2 diff --git a/docs/docs/observability/cloud/newrelic.mdx b/docs/docs/observability/cloud/newrelic.mdx index f17226bf6d2..97661f55a97 100644 --- a/docs/docs/observability/cloud/newrelic.mdx +++ b/docs/docs/observability/cloud/newrelic.mdx @@ -58,6 +58,12 @@ host, service name and custom attributes to associate with exported logs and met | Custom Attributes | Custom Attributes associated with your logs and metrics. A default source tag `hasura-cloud-metrics` is added to all exported logs and metrics. Attributes `project_id` and `project_name` are added to all exported metrics. | | Service Name | The name of the application or service generating the log events. | +:::info API Key type + +Your API key must be of type `License` in order to export logs and metrics to New Relic. + +::: + After adding appropriate values, click `Save`. diff --git a/docs/docs/observability/enterprise-edition/prometheus/metrics.mdx b/docs/docs/observability/enterprise-edition/prometheus/metrics.mdx index 1c5f156237e..7d966a57a8f 100644 --- a/docs/docs/observability/enterprise-edition/prometheus/metrics.mdx +++ b/docs/docs/observability/enterprise-edition/prometheus/metrics.mdx @@ -44,11 +44,11 @@ buckets, you should consider [tuning the performance](/deployment/performance-tu Number of GraphQL requests received, representing the GraphQL query/mutation traffic on the server. -| | | -| ------ | -------------------------------------------------------------- | -| Name | `hasura_graphql_requests_total` | -| Type | Counter | -| Labels | `operation_type`: query \| mutation \| subscription \| unknown | +| | | +| ------ | -------------------------------------------------------------------------------------------------------------------------------------------------- | +| Name | `hasura_graphql_requests_total` | +| Type | Counter | +| Labels | `operation_type`: query \| mutation \| subscription \| unknown, `response_status`: success \| failed, `operation_name`, `parameterized_query_hash` | The `unknown` operation type will be returned for queries that fail authorization, parsing, or certain validations. The `response_status` label will be `success` for successful requests and `failed` for failed requests. diff --git a/docs/docs/observability/opentelemetry.mdx b/docs/docs/observability/opentelemetry.mdx index f55eb2b514e..eb0946852fa 100644 --- a/docs/docs/observability/opentelemetry.mdx +++ b/docs/docs/observability/opentelemetry.mdx @@ -38,6 +38,11 @@ subscriptions with the [OpenTelemetry](https://opentelemetry.io/docs/concepts/si be exported directly from your Hasura instances to your observability tool that supports OpenTelemetry traces. This can be configured in the `Settings` section of the Hasura Console. +## Available Metrics + +The available OpenTelemetry metrics are the same as those available via +[Prometheus](/observability/enterprise-edition/prometheus/metrics.mdx). + ## Configure the OpenTelemetry receiver :::info Supported from @@ -56,8 +61,8 @@ All users are encouraged to migrate to this new integration. :::info Traces on Hasura Cloud -Hasura Cloud implements sampling on traces. That means only one in every `n` traces will be sampled and exported -(`n` will be automatically configured based on various parameters during runtime. This can't be manually adjusted). +Hasura Cloud implements sampling on traces. That means only one in every `n` traces will be sampled and exported (`n` +will be automatically configured based on various parameters during runtime. This can't be manually adjusted). ::: @@ -99,7 +104,7 @@ exporter_otlp: otlp_metrics_endpoint: http://host.docker.internal:4318/v1/metrics otlp_logs_endpoint: http://host.docker.internal:4318/v1/logs protocol: http/protobuf - traces_propagators: + traces_propagators: - tracecontext batch_span_processor: max_export_batch_size: 512 @@ -304,8 +309,8 @@ be found in the [OpenTelemetry Collector repository](https://github.com/open-tel Trace and Span ID are included in the root of the log body. GraphQL Engine follows [OpenTelemetry's data model](https://opentelemetry.io/docs/specs/otel/logs/data-model/#log-and-event-record-definition) -so that OpenTelemetry-compliant services can automatically correlate logs with Traces. However, some services need -extra configurations. +so that OpenTelemetry-compliant services can automatically correlate logs with Traces. However, some services need extra +configurations. ### Jaeger @@ -334,7 +339,7 @@ datasources: filterByTraceID: false filterBySpanID: false customQuery: true - query: "{exporter=\"OTLP\"} | json | traceid=`$${__span.traceId}`" + query: '{exporter="OTLP"} | json | traceid=`$${__span.traceId}`' traceQuery: timeShiftEnabled: true spanStartTimeShift: '1h' @@ -351,8 +356,9 @@ You will see the `Logs for this span` button enabled when exploring the trace de ### Datadog -If Datadog can't correlate between traces and logs, you should verify the Trace ID attributes mapping. -Read more at [the troubleshooting section](https://docs.datadoghq.com/tracing/troubleshooting/correlated-logs-not-showing-up-in-the-trace-id-panel/?tab=jsonlogs#trace-id-option) on Datadog. +If Datadog can't correlate between traces and logs, you should verify the Trace ID attributes mapping. Read more at +[the troubleshooting section](https://docs.datadoghq.com/tracing/troubleshooting/correlated-logs-not-showing-up-in-the-trace-id-panel/?tab=jsonlogs#trace-id-option) +on Datadog. diff --git a/docs/docs/queries/ms-sql-server/variables-aliases-fragments-directives.mdx b/docs/docs/queries/ms-sql-server/variables-aliases-fragments-directives.mdx index 26d46ceac3d..72faf956c6b 100644 --- a/docs/docs/queries/ms-sql-server/variables-aliases-fragments-directives.mdx +++ b/docs/docs/queries/ms-sql-server/variables-aliases-fragments-directives.mdx @@ -24,16 +24,13 @@ variables. **Example:** Fetch an author by their `author_id`: diff --git a/docs/docs/queries/postgres/variables-aliases-fragments-directives.mdx b/docs/docs/queries/postgres/variables-aliases-fragments-directives.mdx index e85241f43e0..6e735d89719 100644 --- a/docs/docs/queries/postgres/variables-aliases-fragments-directives.mdx +++ b/docs/docs/queries/postgres/variables-aliases-fragments-directives.mdx @@ -25,9 +25,9 @@ variables. **Example:** Fetch an author by their `author_id`: diff --git a/docs/docs/resources/upgrade-hasura-v2.mdx b/docs/docs/resources/upgrade-hasura-v2.mdx index 6fda698517e..1b68db72095 100644 --- a/docs/docs/resources/upgrade-hasura-v2.mdx +++ b/docs/docs/resources/upgrade-hasura-v2.mdx @@ -173,26 +173,20 @@ A detailed changelog with all the new features introduced in Hasura v2 is availa with Hasura v2 instances. Hasura v2 will assume the `v2` Metadata and Migrations belong to a database connected with the name `default`. -- A new optional env var `HASURA_GRAPHQL_METADATA_DATABASE_URL` is now introduced. When set, this Postgres database is - used to store the Hasura Metadata. If not set, the database set using `HASURA_GRAPHQL_DATABASE_URL` is used to store - the Hasura Metadata. - - Either one of `HASURA_GRAPHQL_METADATA_DATABASE_URL` or `HASURA_GRAPHQL_DATABASE_URL` needs to be set with a Postgres - database to start a Hasura v2 instance as Hasura always needs a Postgres database to store its metadata. - -- The database set using the `HASURA_GRAPHQL_DATABASE_URL` env var is connected automatically with the name `default` in - Hasura v2 while updating an existing instance or while starting a fresh instance. - - Setting this env var post initial setup/update will have no effect as the Hasura Metadata for data sources would - already have been initialized and the env var will be treated as any other custom env var. - - It is now not mandatory to set this env var if a dedicated `HASURA_GRAPHQL_METADATA_DATABASE_URL` is set. +- A new mandatory env var `HASURA_GRAPHQL_METADATA_DATABASE_URL` is now introduced and is mandatory for storing Hasura + Metadata. - Custom env vars can now be used to connect databases dynamically at runtime. - With support for multiple databases, older database specific env vars have been deprecated. [See details](#hasura-v2-env-changes) +:::info Existing Metadata + +`HASURA_GRAPHQL_METADATA_DATABASE_URL` must be the connection string for where your metadata existed previously. + +::: + ## Moving from Hasura v1 to Hasura v2 {#moving-from-hasura-v1-to-v2} ### Hasura v1 and Hasura v2 compatibility {#hasura-v1-v2-compatibility} @@ -206,14 +200,12 @@ instance**. Post adding a database named `default`, the Hasura v2 instance should behave equivalently to the Hasura v1 instance and all previous workflows will continue working as they were. -Refer to [connecting databases](/databases/overview.mdx) to add a database to Hasura v2. +Refer to [connecting databases](/databases/quickstart.mdx) to add a database to Hasura v2. ### Migrate Hasura v1 instance to Hasura v2 Hasura v2 is backwards compatible with Hasura v1. Hence simply updating the Hasura docker image version number and -restarting your Hasura instance should work seamlessly. The database connected using the `HASURA_GRAPHQL_DATABASE_URL` -env var will be added as a database with the name `default` automatically and all existing Metadata and Migrations will -be assumed to belong to it. +restarting your Hasura instance should work seamlessly. :::info Note @@ -282,7 +274,7 @@ by reverting the Hasura docker image version and using the [downgrade command](/ the Hasura Metadata catalogue changes: ```bash -docker run -e HASURA_GRAPHQL_DATABASE_URL=$POSTGRES_URL hasura/graphql-engine:v2.0.0 graphql-engine downgrade --to-v1.3.3 +docker run -e HASURA_METADATA_DATABASE_URL=$POSTGRES_URL hasura/graphql-engine:v2.0.0 graphql-engine downgrade --to-v1.3.3 ``` :::info Note diff --git a/docs/docs/restified/quickstart.mdx b/docs/docs/restified/quickstart.mdx index 114e69dc5a8..73717b0a14b 100644 --- a/docs/docs/restified/quickstart.mdx +++ b/docs/docs/restified/quickstart.mdx @@ -22,62 +22,40 @@ will walk you through the process of creating a REST endpoint from a table. To see an alternative method of creating a REST endpoint from an query in the GraphiQL IDE, check out the [Create RESTified endpoints](/restified/create.mdx#create-from-graphiql) page. -:::info Data source availability - -Available for **Postgres, MS SQL Server, Citus, AlloyDB and CockroachDB** databases. - -::: - ### Step 1: Navigate to the products table. Navigate to `Data > default > public > products` and click the "Create REST Endpoints" button. - - + ### Step 2: Choose operations -After clicking on the "Create REST endpoints" button, you will see a modal list of all REST operations (`READ`, `READ - ALL`, `CREATE`, `UPDATE`, `DELETE`) available on the table. Select `READ` and `CREATE` for this demo. Click the +After clicking on the "Create REST endpoints" button, you will see a modal list of all REST operations (`READ`, +`READ ALL`, `CREATE`, `UPDATE`, `DELETE`) available on the table. Select `READ` and `CREATE` for this demo. Click the "Create" button. - + ### Step 3: View all REST endpoints You will be able to see the newly created REST endpoints listed in the `API > REST` tab. - + ### Step 4: Test the REST endpoint -Click on the `products_by_pk` title to get to the details page for that RESTified endpoint. In the "Request -Variables" section for `id` enter the value `7992fdfa-65b5-11ed-8612-6a8b11ef7372`, the UUID for one of the products -already in the `products` table of the docs sample app. Click "Run Request". +Click on the `products_by_pk` title to get to the details page for that RESTified endpoint. In the "Request Variables" +section for `id` enter the value `7992fdfa-65b5-11ed-8612-6a8b11ef7372`, the UUID for one of the products already in the +`products` table of the docs sample app. Click "Run Request". - + You will see the result returned next to the query. -You can test the other `insert_products_one` endpoint that we created in the same way by providing a new product -object as the request variable. +You can test the other `insert_products_one` endpoint that we created in the same way by providing a new product object +as the request variable. You can also use your favourite REST client to test the endpoint. For example, using `curl`: @@ -89,11 +67,11 @@ curl --location --request GET 'https://.hasura.app/api/rest ## Recap -What just happened? Well, you just created two REST endpoints for reading a single product and inserting a product, +What just happened? Well, you just created two REST endpoints for reading a single product and inserting a product, super fast, and without writing a single line of code 🎉 -This saves you significant time and effort, as you easily enable REST endpoints on your tables or [convert any query -or mutation into a REST endpoint](/restified/create.mdx) with just a few clicks. +This saves you significant time and effort, as you easily enable REST endpoints on your tables or +[convert any query or mutation into a REST endpoint](/restified/create.mdx) with just a few clicks. By using RESTified endpoints, you can take advantage of the benefits of both REST and GraphQL, making your Hasura project even more versatile and powerful. For more details, check out the diff --git a/docs/docs/schema/postgres/custom-functions.mdx b/docs/docs/schema/postgres/custom-functions.mdx index 5af7f96c134..6d6f7e2f494 100644 --- a/docs/docs/schema/postgres/custom-functions.mdx +++ b/docs/docs/schema/postgres/custom-functions.mdx @@ -8,7 +8,6 @@ keywords: - postgres - schema - sql functions - - stored procedures --- import GraphiQLIDE from '@site/src/components/GraphiQLIDE'; @@ -21,8 +20,7 @@ import TabItem from '@theme/TabItem'; ## What are Custom functions? Postgres [user-defined SQL functions](https://www.postgresql.org/docs/current/sql-createfunction.html) can be used to -either encapsulate some custom business logic or extend the built-in SQL functions and operators. SQL functions are also -referred to as **stored procedures**. +either encapsulate some custom business logic or extend the built-in SQL functions and operators. Hasura GraphQL Engine lets you expose certain types of user-defined functions as top level fields in the GraphQL API to allow querying them with either `queries` or `subscriptions`, or for `VOLATILE` functions as `mutations`. These are diff --git a/docs/docs/schema/snowflake/native-queries.mdx b/docs/docs/schema/snowflake/native-queries.mdx index 8bad3559af2..229c6cfc639 100644 --- a/docs/docs/schema/snowflake/native-queries.mdx +++ b/docs/docs/schema/snowflake/native-queries.mdx @@ -308,7 +308,7 @@ In order to represent the structure of the data returned by the query, we first :::info Permissions and Logical Models Note that this Logical Model has no attached permissions and therefore will only be available to the admin role. See the -[Logical Model documentation](/schema/ms-sql-server/logical-models.mdx) for information on attaching permissions. +[Logical Model documentation](/schema/snowflake/logical-models.mdx) for information on attaching permissions. ::: @@ -497,6 +497,14 @@ use an argument to specify the name of the table in a `FROM` clause. When making a query, the arguments are specified using the `args` parameter of the query root field. +##### Example: `LIKE` operator + +A commonly used operator is the `LIKE`. When used in a `WHERE` condition, it's usually written with this syntax +`WHERE Title LIKE '%word%'`. + +In order to use it with Native Query arguments, you need to use this syntax `LIKE ('%' || {{searchTitle}} || '%')`, +where `searchTitle` is the Native Query parameter. + ## Query functionality Just like tables, Native Queries generate GraphQL types with the ability to further break down the data. You can find @@ -515,8 +523,7 @@ A future release will allow mutations to be specified using Native Queries. ## Permissions Native queries will inherit the permissions of the Logical Model that they return. See the -[documentation on Logical Models](/schema/ms-sql-server/logical-models.mdx) for an explanation of how to add -permissions. +[documentation on Logical Models](/schema/snowflake/logical-models.mdx) for an explanation of how to add permissions. ## Relationships @@ -530,7 +537,7 @@ Model in order to be tracked successfully. Currently relationships are only supported between Native Queries residing in the same source. As an example, consider the following Native Queries which implement the data model of articles and authors given in the -section on [Logical Model references](/schema/ms-sql-server/logical-models.mdx#referencing-other-logical-models): +section on [Logical Model references](/schema/snowflake/logical-models.mdx#referencing-other-logical-models): diff --git a/docs/docs/security/dynamic-secrets.mdx b/docs/docs/security/dynamic-secrets.mdx index ca0f44a3976..9d8d211755b 100644 --- a/docs/docs/security/dynamic-secrets.mdx +++ b/docs/docs/security/dynamic-secrets.mdx @@ -92,3 +92,16 @@ reference. Dynamic secrets can be used in template variables for data connectors. See [Template variables](/databases/database-config/data-connector-config.mdx/#template) for reference. + +## Forcing secret refresh + +If the environment variable `HASURA_SECRETS_BLOCKING_FORCE_REFRESH_URL=` +is set, on each connection failure the server will POST to the specified URL the payload: + +``` +{"filename": } +``` + +It is expected that the responding server will return only after refreshing the +secret at the given filepath. [hasura-secret-refresh](https://github.com/hasura/hasura-secret-refresh) +follows this spec. diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 3683d778418..e506437833f 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -19,10 +19,19 @@ const config = { projectName: 'graphql-engine', staticDirectories: ['static', 'public'], customFields: { - docsBotEndpointURL: - process.env.NODE_ENV === 'development' - ? 'ws://localhost:8000/hasura-docs-ai' - : 'wss://website-api.hasura.io/chat-bot/hasura-docs-ai', + docsBotEndpointURL: (() => { + console.log('process.env.release_mode docs-bot', process.env.release_mode); + switch (process.env.release_mode) { + case 'development': + return 'ws://localhost:8000/hasura-docs-ai'; + case 'production': + return 'wss://website-api.hasura.io/chat-bot/hasura-docs-ai'; + case 'staging': + return 'wss://website-api.stage.hasura.io/chat-bot/hasura-docs-ai'; + default: + return 'ws://localhost:8000/hasura-docs-ai'; // default to development if no match + } + })(), hasuraVersion: 2, DEV_TOKEN: process.env.DEV_TOKEN, }, diff --git a/docs/src/components/AiChatBot/AiChatBot.tsx b/docs/src/components/AiChatBot/AiChatBot.tsx index b450a0dce5a..f1fbc14cf4b 100644 --- a/docs/src/components/AiChatBot/AiChatBot.tsx +++ b/docs/src/components/AiChatBot/AiChatBot.tsx @@ -4,8 +4,8 @@ import './styles.css'; import useDocusaurusContext from '@docusaurus/useDocusaurusContext'; import { CloseIcon, RespondingIconGray, SparklesIcon } from '@site/src/components/AiChatBot/icons'; import { useLocalStorage } from 'usehooks-ts' -import profilePic from '@site/static/img/hasura-ai-profile-pic.png'; - +import profilePic from '@site/static/img/docs-bot-profile-pic.webp'; +import { v4 as uuidv4 } from 'uuid'; interface Message { userMessage: string; @@ -26,7 +26,7 @@ interface Query { const initialMessages: Message[] = [ { userMessage: '', - botResponse: "Hi! I'm HasuraAI, the docs chatbot.", + botResponse: "Hi! I'm DocsBot, the Hasura docs AI chatbot.", }, { userMessage: '', @@ -50,6 +50,8 @@ export function AiChatBot() { const [isResponding, setIsResponding] = useState(false) // Manage the text input const [input, setInput] = useState(''); + // Manage the message thread ID + const [messageThreadId, setMessageThreadId] = useLocalStorage(`hasuraV${customFields.hasuraVersion}ThreadId`, uuidv4()) // Manage the historical messages const [messages, setMessages] = useLocalStorage(`hasuraV${customFields.hasuraVersion}BotMessages`, initialMessages); // Manage the current message @@ -185,7 +187,7 @@ export function AiChatBot() { } if (ws) { - const toSend = JSON.stringify({ previousMessages: messages, currentUserInput: input }); + const toSend = JSON.stringify({ previousMessages: messages, currentUserInput: input, messageThreadId }); setCurrentMessage({ userMessage: input, botResponse: '' }); setInput(''); ws.send(toSend); @@ -194,6 +196,8 @@ export function AiChatBot() { }; + const baseUrl = useDocusaurusContext().siteConfig.baseUrl; + return (
{isOpen ? ( @@ -209,12 +213,13 @@ export function AiChatBot() {
-
HasuraAI
+
DocsBot
diff --git a/docs/src/components/BannerDismissable/DDNBanner.tsx b/docs/src/components/BannerDismissable/DDNBanner.tsx index 83eda711a06..8eae7da3af8 100644 --- a/docs/src/components/BannerDismissable/DDNBanner.tsx +++ b/docs/src/components/BannerDismissable/DDNBanner.tsx @@ -9,7 +9,7 @@ export const DDNBanner = () => { return (
- Hasura DDN is the future of data delivery. Click here for the Hasura DDN docs. + Hasura DDN is the future of data delivery. Click here for the Hasura DDN docs.