2022-05-26 14:54:30 +03:00
|
|
|
# Test with graphql-default naming convention
|
|
|
|
|
|
|
|
- description: PG add source
|
|
|
|
url: /v1/metadata
|
|
|
|
status: 200
|
|
|
|
response:
|
|
|
|
message: success
|
|
|
|
query:
|
|
|
|
type: pg_add_source
|
|
|
|
args:
|
|
|
|
name: pg1
|
|
|
|
configuration:
|
|
|
|
connection_info:
|
|
|
|
database_url:
|
|
|
|
from_env: HASURA_GRAPHQL_PG_SOURCE_URL_1
|
|
|
|
pool_settings:
|
|
|
|
max_connections: 50
|
|
|
|
idle_timeout: 180
|
|
|
|
retries:
|
|
|
|
customization:
|
2022-05-27 08:55:45 +03:00
|
|
|
naming_convention: graphql-default
|
2022-05-26 14:54:30 +03:00
|
|
|
|
|
|
|
- description: create table 1
|
|
|
|
url: /v1/query
|
|
|
|
status: 200
|
|
|
|
response:
|
|
|
|
result_type: CommandOk
|
|
|
|
result:
|
|
|
|
query:
|
|
|
|
type: run_sql
|
|
|
|
args:
|
|
|
|
source: pg1
|
|
|
|
sql: |
|
|
|
|
create table author_local(
|
|
|
|
id serial primary key,
|
|
|
|
name text unique
|
|
|
|
);
|
|
|
|
INSERT INTO author_local (name)
|
|
|
|
VALUES ('Author 1'), ('Author 2');
|
server/tests-py: Split the naming convention tests.
This splits two naming convention tests into four classes (and four YAML
files), which might seem overkill, but allows us to provision sources
declaratively in the future. As each class will require a custom source
configuration, we are able to annotate them accordingly, which means the
test cases are decoupled from the source database URL, letting us
generate a new database for each test case and automatically add it as a
source to HGE.
The future changes are already prepared, but this has been extracted out
as it splits the YAML files, which is a large change best reviewed in
isolation.
The test case `test_type_and_field_names` has been split into:
* `TestNamingConventionsTypeAndFieldNamesGraphqlDefault`
* `TestNamingConventionsTypeAndFieldNamesHasuraDefault`
The test case `test_type_and_field_names_with_prefix_and_suffix` has
been split into:
* `TestNamingConventionsTypeAndFieldNamesGraphqlDefaultWithPrefixAndSuffix`
* `TestNamingConventionsTypeAndFieldNamesHasuraDefaultWithPrefixAndSuffix`
The YAML files have been split in the same way. This was fairly trivial
as each test case would add a source, run some tests with
the `graphql_default` naming convention, drop the source, and then
repeat for the `hasura_default` naming convention. I simply split the
file in two. There is a little bit of duplication for provisioning the
various database tables, which I think is worth it.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5496
GitOrigin-RevId: 94825e755c427a5414230f69985b534991b3aad6
2022-08-17 07:35:03 +03:00
|
|
|
|
2022-05-26 14:54:30 +03:00
|
|
|
create table article_local(
|
|
|
|
id serial primary key,
|
|
|
|
author_id serial,
|
|
|
|
title text
|
|
|
|
);
|
|
|
|
INSERT INTO article_local (author_id, title)
|
|
|
|
VALUES (1, 'Article 1'), (1, 'Article 2'), (2, 'New Article 1'), (2, 'New Article 2');
|
|
|
|
|
|
|
|
- description: track table
|
|
|
|
url: /v1/metadata
|
|
|
|
status: 200
|
|
|
|
response:
|
|
|
|
message: success
|
|
|
|
query:
|
|
|
|
type: pg_track_table
|
|
|
|
args:
|
|
|
|
table: author_local
|
|
|
|
source: pg1
|
|
|
|
configuration:
|
2022-07-01 09:10:52 +03:00
|
|
|
custom_name: authorDetails
|
2022-05-26 14:54:30 +03:00
|
|
|
custom_root_fields:
|
|
|
|
select_by_pk: author_detail
|
|
|
|
insert_one: add_author
|
|
|
|
delete_by_pk: delete_one_author
|
|
|
|
|
|
|
|
- description: track table
|
|
|
|
url: /v1/metadata
|
|
|
|
status: 200
|
|
|
|
response:
|
|
|
|
message: success
|
|
|
|
query:
|
|
|
|
type: pg_track_table
|
|
|
|
args:
|
|
|
|
table: article_local
|
|
|
|
source: pg1
|
|
|
|
|
|
|
|
- description: Simple GraphQL query to fetch items from the source table
|
|
|
|
url: /v1/graphql
|
|
|
|
status: 200
|
|
|
|
response:
|
|
|
|
data:
|
2022-07-01 09:10:52 +03:00
|
|
|
authorDetails:
|
server/tests-py: Split the naming convention tests.
This splits two naming convention tests into four classes (and four YAML
files), which might seem overkill, but allows us to provision sources
declaratively in the future. As each class will require a custom source
configuration, we are able to annotate them accordingly, which means the
test cases are decoupled from the source database URL, letting us
generate a new database for each test case and automatically add it as a
source to HGE.
The future changes are already prepared, but this has been extracted out
as it splits the YAML files, which is a large change best reviewed in
isolation.
The test case `test_type_and_field_names` has been split into:
* `TestNamingConventionsTypeAndFieldNamesGraphqlDefault`
* `TestNamingConventionsTypeAndFieldNamesHasuraDefault`
The test case `test_type_and_field_names_with_prefix_and_suffix` has
been split into:
* `TestNamingConventionsTypeAndFieldNamesGraphqlDefaultWithPrefixAndSuffix`
* `TestNamingConventionsTypeAndFieldNamesHasuraDefaultWithPrefixAndSuffix`
The YAML files have been split in the same way. This was fairly trivial
as each test case would add a source, run some tests with
the `graphql_default` naming convention, drop the source, and then
repeat for the `hasura_default` naming convention. I simply split the
file in two. There is a little bit of duplication for provisioning the
various database tables, which I think is worth it.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5496
GitOrigin-RevId: 94825e755c427a5414230f69985b534991b3aad6
2022-08-17 07:35:03 +03:00
|
|
|
- id: 1
|
|
|
|
name: "Author 1"
|
2022-08-17 15:46:36 +03:00
|
|
|
__typename: authorDetails
|
server/tests-py: Split the naming convention tests.
This splits two naming convention tests into four classes (and four YAML
files), which might seem overkill, but allows us to provision sources
declaratively in the future. As each class will require a custom source
configuration, we are able to annotate them accordingly, which means the
test cases are decoupled from the source database URL, letting us
generate a new database for each test case and automatically add it as a
source to HGE.
The future changes are already prepared, but this has been extracted out
as it splits the YAML files, which is a large change best reviewed in
isolation.
The test case `test_type_and_field_names` has been split into:
* `TestNamingConventionsTypeAndFieldNamesGraphqlDefault`
* `TestNamingConventionsTypeAndFieldNamesHasuraDefault`
The test case `test_type_and_field_names_with_prefix_and_suffix` has
been split into:
* `TestNamingConventionsTypeAndFieldNamesGraphqlDefaultWithPrefixAndSuffix`
* `TestNamingConventionsTypeAndFieldNamesHasuraDefaultWithPrefixAndSuffix`
The YAML files have been split in the same way. This was fairly trivial
as each test case would add a source, run some tests with
the `graphql_default` naming convention, drop the source, and then
repeat for the `hasura_default` naming convention. I simply split the
file in two. There is a little bit of duplication for provisioning the
various database tables, which I think is worth it.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5496
GitOrigin-RevId: 94825e755c427a5414230f69985b534991b3aad6
2022-08-17 07:35:03 +03:00
|
|
|
- id: 2
|
|
|
|
name: "Author 2"
|
2022-08-17 15:46:36 +03:00
|
|
|
__typename: authorDetails
|
2022-05-26 14:54:30 +03:00
|
|
|
articleLocal:
|
server/tests-py: Split the naming convention tests.
This splits two naming convention tests into four classes (and four YAML
files), which might seem overkill, but allows us to provision sources
declaratively in the future. As each class will require a custom source
configuration, we are able to annotate them accordingly, which means the
test cases are decoupled from the source database URL, letting us
generate a new database for each test case and automatically add it as a
source to HGE.
The future changes are already prepared, but this has been extracted out
as it splits the YAML files, which is a large change best reviewed in
isolation.
The test case `test_type_and_field_names` has been split into:
* `TestNamingConventionsTypeAndFieldNamesGraphqlDefault`
* `TestNamingConventionsTypeAndFieldNamesHasuraDefault`
The test case `test_type_and_field_names_with_prefix_and_suffix` has
been split into:
* `TestNamingConventionsTypeAndFieldNamesGraphqlDefaultWithPrefixAndSuffix`
* `TestNamingConventionsTypeAndFieldNamesHasuraDefaultWithPrefixAndSuffix`
The YAML files have been split in the same way. This was fairly trivial
as each test case would add a source, run some tests with
the `graphql_default` naming convention, drop the source, and then
repeat for the `hasura_default` naming convention. I simply split the
file in two. There is a little bit of duplication for provisioning the
various database tables, which I think is worth it.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5496
GitOrigin-RevId: 94825e755c427a5414230f69985b534991b3aad6
2022-08-17 07:35:03 +03:00
|
|
|
- id: 1
|
|
|
|
title: "Article 1"
|
|
|
|
__typename: ArticleLocal
|
|
|
|
- id: 2
|
|
|
|
title: "Article 2"
|
|
|
|
__typename: ArticleLocal
|
2022-05-26 14:54:30 +03:00
|
|
|
query:
|
|
|
|
query: |
|
|
|
|
query {
|
2022-07-01 09:10:52 +03:00
|
|
|
authorDetails {
|
2022-05-26 14:54:30 +03:00
|
|
|
id
|
|
|
|
name
|
|
|
|
__typename
|
|
|
|
}
|
|
|
|
articleLocal (where: {authorId: {_eq: 1}}) {
|
|
|
|
id
|
|
|
|
title
|
|
|
|
__typename
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
- description: Lookup by pk
|
|
|
|
url: /v1/graphql
|
|
|
|
status: 200
|
|
|
|
response:
|
|
|
|
data:
|
|
|
|
author_detail:
|
|
|
|
id: 1
|
server/tests-py: Split the naming convention tests.
This splits two naming convention tests into four classes (and four YAML
files), which might seem overkill, but allows us to provision sources
declaratively in the future. As each class will require a custom source
configuration, we are able to annotate them accordingly, which means the
test cases are decoupled from the source database URL, letting us
generate a new database for each test case and automatically add it as a
source to HGE.
The future changes are already prepared, but this has been extracted out
as it splits the YAML files, which is a large change best reviewed in
isolation.
The test case `test_type_and_field_names` has been split into:
* `TestNamingConventionsTypeAndFieldNamesGraphqlDefault`
* `TestNamingConventionsTypeAndFieldNamesHasuraDefault`
The test case `test_type_and_field_names_with_prefix_and_suffix` has
been split into:
* `TestNamingConventionsTypeAndFieldNamesGraphqlDefaultWithPrefixAndSuffix`
* `TestNamingConventionsTypeAndFieldNamesHasuraDefaultWithPrefixAndSuffix`
The YAML files have been split in the same way. This was fairly trivial
as each test case would add a source, run some tests with
the `graphql_default` naming convention, drop the source, and then
repeat for the `hasura_default` naming convention. I simply split the
file in two. There is a little bit of duplication for provisioning the
various database tables, which I think is worth it.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5496
GitOrigin-RevId: 94825e755c427a5414230f69985b534991b3aad6
2022-08-17 07:35:03 +03:00
|
|
|
name: "Author 1"
|
2022-08-17 15:46:36 +03:00
|
|
|
__typename: authorDetails
|
2022-05-26 14:54:30 +03:00
|
|
|
articleLocalByPk:
|
server/tests-py: Split the naming convention tests.
This splits two naming convention tests into four classes (and four YAML
files), which might seem overkill, but allows us to provision sources
declaratively in the future. As each class will require a custom source
configuration, we are able to annotate them accordingly, which means the
test cases are decoupled from the source database URL, letting us
generate a new database for each test case and automatically add it as a
source to HGE.
The future changes are already prepared, but this has been extracted out
as it splits the YAML files, which is a large change best reviewed in
isolation.
The test case `test_type_and_field_names` has been split into:
* `TestNamingConventionsTypeAndFieldNamesGraphqlDefault`
* `TestNamingConventionsTypeAndFieldNamesHasuraDefault`
The test case `test_type_and_field_names_with_prefix_and_suffix` has
been split into:
* `TestNamingConventionsTypeAndFieldNamesGraphqlDefaultWithPrefixAndSuffix`
* `TestNamingConventionsTypeAndFieldNamesHasuraDefaultWithPrefixAndSuffix`
The YAML files have been split in the same way. This was fairly trivial
as each test case would add a source, run some tests with
the `graphql_default` naming convention, drop the source, and then
repeat for the `hasura_default` naming convention. I simply split the
file in two. There is a little bit of duplication for provisioning the
various database tables, which I think is worth it.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5496
GitOrigin-RevId: 94825e755c427a5414230f69985b534991b3aad6
2022-08-17 07:35:03 +03:00
|
|
|
title: "New Article 1"
|
2022-05-26 14:54:30 +03:00
|
|
|
__typename: ArticleLocal
|
|
|
|
query:
|
|
|
|
query: |
|
|
|
|
query {
|
|
|
|
author_detail(id: 1) {
|
|
|
|
id
|
|
|
|
name
|
|
|
|
__typename
|
|
|
|
}
|
|
|
|
articleLocalByPk(id: 3){
|
|
|
|
title
|
|
|
|
__typename
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
- description: Aggregate
|
|
|
|
url: /v1/graphql
|
|
|
|
status: 200
|
|
|
|
response:
|
|
|
|
data:
|
|
|
|
articleLocalAggregate:
|
|
|
|
__typename: ArticleLocalAggregate
|
|
|
|
aggregate:
|
|
|
|
__typename: ArticleLocalAggregateFields
|
|
|
|
count: 2
|
|
|
|
query:
|
|
|
|
query: |
|
|
|
|
query MyQuery {
|
|
|
|
articleLocalAggregate(where: {authorId: {_eq: 2}}) {
|
|
|
|
__typename
|
|
|
|
aggregate {
|
|
|
|
__typename
|
|
|
|
count
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
- description: Insert
|
|
|
|
url: /v1/graphql
|
|
|
|
status: 200
|
|
|
|
response:
|
|
|
|
data:
|
|
|
|
add_author:
|
2022-08-17 15:46:36 +03:00
|
|
|
__typename: authorDetails
|
2022-05-26 14:54:30 +03:00
|
|
|
id: 3
|
|
|
|
name: Author 3
|
|
|
|
query:
|
|
|
|
query: |
|
|
|
|
mutation MyMutation {
|
|
|
|
add_author(object: {name: "Author 3", id: 3}) {
|
|
|
|
__typename
|
|
|
|
id
|
|
|
|
name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
- description: Delete by pk
|
|
|
|
url: /v1/graphql
|
|
|
|
status: 200
|
|
|
|
response:
|
|
|
|
data:
|
|
|
|
delete_one_author:
|
2022-08-17 15:46:36 +03:00
|
|
|
__typename: authorDetails
|
2022-05-26 14:54:30 +03:00
|
|
|
id: 3
|
|
|
|
name: Author 3
|
|
|
|
query:
|
|
|
|
query: |
|
|
|
|
mutation MyMutation {
|
|
|
|
delete_one_author(id: 3) {
|
|
|
|
__typename
|
|
|
|
id
|
|
|
|
name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
- description: untrack table
|
|
|
|
url: /v1/metadata
|
|
|
|
status: 200
|
|
|
|
response:
|
|
|
|
message: success
|
|
|
|
query:
|
|
|
|
type: pg_untrack_table
|
|
|
|
args:
|
|
|
|
table: author_local
|
|
|
|
source: pg1
|
|
|
|
|
|
|
|
- description: drop table
|
|
|
|
url: /v1/query
|
|
|
|
status: 200
|
|
|
|
response:
|
|
|
|
result_type: CommandOk
|
|
|
|
result:
|
|
|
|
query:
|
|
|
|
type: run_sql
|
|
|
|
args:
|
|
|
|
source: pg1
|
|
|
|
sql: |
|
|
|
|
drop table author_local;
|
|
|
|
|
|
|
|
- description: untrack table
|
|
|
|
url: /v1/metadata
|
|
|
|
status: 200
|
|
|
|
response:
|
|
|
|
message: success
|
|
|
|
query:
|
|
|
|
type: pg_untrack_table
|
|
|
|
args:
|
|
|
|
table: article_local
|
|
|
|
source: pg1
|
|
|
|
|
|
|
|
- description: drop table
|
|
|
|
url: /v1/query
|
|
|
|
status: 200
|
|
|
|
response:
|
|
|
|
result_type: CommandOk
|
|
|
|
result:
|
|
|
|
query:
|
|
|
|
type: run_sql
|
|
|
|
args:
|
|
|
|
source: pg1
|
|
|
|
sql: |
|
|
|
|
drop table article_local;
|
|
|
|
|
|
|
|
- description: PG Drop Source 1
|
|
|
|
url: /v1/metadata
|
|
|
|
status: 200
|
|
|
|
response:
|
|
|
|
message: success
|
|
|
|
query:
|
|
|
|
type: pg_drop_source
|
|
|
|
args:
|
|
|
|
name: pg1
|