graphql-engine/server/tests-py/test_metadata.py

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

623 lines
24 KiB
Python
Raw Normal View History

import os
import pytest
from validate import check_query_f
usefixtures = pytest.mark.usefixtures
use_mutation_fixtures = usefixtures(
'per_class_db_schema_for_mutation_tests',
'per_method_db_data_for_mutation_tests'
)
@usefixtures('gql_server', 'per_method_tests_db_state')
class TestMetadata:
def test_reload_metadata(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/reload_metadata.yaml')
# FIXME:- Using export_metadata will dump
# the source configuration dependent on --database-url
# def test_export_metadata(self, hge_ctx):
# check_query_f(hge_ctx, self.dir() + '/export_metadata.yaml')
def test_clear_metadata(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/clear_metadata.yaml')
def test_clear_metadata_as_user(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/metadata_as_user_err.yaml')
def test_replace_metadata(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/replace_metadata.yaml')
def test_replace_metadata_no_tables(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/replace_metadata_no_tables.yaml')
def test_replace_metadata_wo_remote_schemas(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/replace_metadata_wo_rs.yaml')
def test_replace_metadata_v2(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/replace_metadata_v2.yaml')
def test_replace_metadata_allow_inconsistent(self, hge_ctx):
check_query_f(hge_ctx, self.dir() +
'/replace_metadata_allow_inconsistent_inconsistent.yaml')
check_query_f(hge_ctx, self.dir() +
'/replace_metadata_allow_inconsistent.yaml')
def test_replace_metadata_disallow_inconsistent_metadata(self, hge_ctx):
resp = hge_ctx.v1metadataq({"type": "export_metadata", "args": {}})
default_source_config = {}
default_source = list(filter(lambda source: (source["name"] == "default"), resp["sources"]))
if default_source:
default_source_config = default_source[0]["configuration"]
else:
assert False, "default source config not found"
resp = hge_ctx.v1metadataq(
{
"type": "replace_metadata",
"version": 2,
"args": {
"metadata": {
"version": 3,
"sources": [
{
"name": "default",
"kind": "postgres",
"tables": [
{
"table": {
"schema": "public",
"name": "author"
},
"insert_permissions": [
{
"role": "user1",
"permission": {
"check": {},
"columns": [
"id",
"name"
],
"backend_only": False
}
},
{
"role": "user2",
"permission": {
"check": {
"id": {
"_eq": "X-Hasura-User-Id"
}
},
"columns": [
"id",
"name"
],
"backend_only": False
}
}
]
}
],
"configuration": default_source_config
}
],
"inherited_roles": [
{
"role_name": "users",
"role_set": [
"user2",
"user1"
]
}
]
}
}
},
expected_status_code = 400
)
assert resp == {
"internal": [
{
"reason": "Could not inherit permission for the role 'users' for the entity: 'insert permission, table: author, source: 'default''",
"name": "users",
"type": "inherited role permission inconsistency",
"entity": {
"permission_type": "insert",
"source": "default",
"table": "author"
}
}
],
"path": "$.args",
"error": "cannot continue due to inconsistent metadata",
"code": "unexpected"
}
"""Test that missing "kind" key in metadata source defaults to "postgres".
Regression test for https://github.com/hasura/graphql-engine-mono/issues/4501"""
def test_replace_metadata_default_kind(self, hge_ctx):
resp = hge_ctx.v1metadataq({"type": "export_metadata", "args": {}})
default_source_config = {}
default_source = list(filter(lambda source: (source["name"] == "default"), resp["sources"]))
if default_source:
default_source_config = default_source[0]["configuration"]
else:
assert False, "default source config not found"
hge_ctx.v1metadataq({
"type": "replace_metadata",
"version": 2,
"args": {
"metadata": {
"version": 3,
"sources": [
{
"name": "default",
"tables": [],
"configuration": default_source_config
}
]
}
}
})
resp = hge_ctx.v1metadataq({"type": "export_metadata", "args": {}})
assert resp["sources"][0]["kind"] == "postgres"
def test_dump_internal_state(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/dump_internal_state.yaml')
def test_pg_add_source(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/pg_add_source.yaml')
Disable test_pg_add_source_with_source_parameters locally. ### Description Our python test suite has several major problems; one of them being that the tests themselves are not responsible for their own setup. We are therefore using environment variables for all matters of configuration, such as _where the postgres instance is_. This is something that should be changed, but in the meantime, it is the test implementer's responsibility to ensure that tests have a consistent setup in CI and locally, or to to add the proper "skip" annotations. The recently added `test_pg_add_source_with_source_parameters` fails to do so: as it tests adding a postgres source from hardcoded parameters, rather than relying on environment variables, it only works if the postgres instance is at the matching address, which happens to be the one set in the circle ci config. This is undesirable for two reasons: - it breaks local tests: running tests locally with `dev.sh` sets postgres up differently, and the test fails; - a change to the circle config would result in failures in that test. Sadly, there's no good solution here: our tests do not currently support expanding environment variables in the queries' yaml files, meaning it's not possible to set the values of all those parameters differently in each environment. And we haven't yet started working towards having a unified testing environment setup. As a result, this PR disables the offending test UNLESS the postgres instance happens to be exactly where the test expects it. This is also very inelegant and adds more tech debt to the pile, but I do not see how to fix this with our current test infrastructure. :( https://github.com/hasura/graphql-engine-mono/pull/2336 GitOrigin-RevId: 8bc9142075d14acaa48e9c4b20de2527185bc75c
2021-09-14 07:10:15 +03:00
def test_pg_add_source_with_replace_config(self, hge_ctx):
hge_ctx.v1metadataq({
"type": "pg_add_source",
"args": {
"name": "pg1",
"configuration": {
"connection_info": {
"database_url": {
"from_env": "HASURA_GRAPHQL_PG_SOURCE_URL_1"
}
}
}
}
})
hge_ctx.v1metadataq({
"type": "pg_add_source",
"args": {
"name": "pg1",
"configuration": {
"connection_info": {
"database_url": {
"from_env": "HASURA_GRAPHQL_PG_SOURCE_URL_1"
}
}
},
"customization": {
"root_fields": {
"namespace": "some_namespace"
}
},
"replace_configuration": True
}
})
resp = hge_ctx.v1metadataq({"type": "export_metadata", "args": {}})
assert resp["sources"][1]["customization"]["root_fields"]["namespace"] == "some_namespace"
hge_ctx.v1metadataq({
"type": "pg_drop_source",
"args": {
"name": "pg1"
}
})
def test_pg_update_unknown_source(self, hge_ctx):
resp = hge_ctx.v1metadataq(
{
"type": "pg_update_source",
"args": {
"name": "pg-not-previously-added",
"configuration": {
"connection_info": {
"database_url": {
"from_env": "HASURA_GRAPHQL_PG_SOURCE_URL_1"
}
}
}
}
},
expected_status_code = 400
)
assert resp["error"] == "source with name \"pg-not-previously-added\" does not exist"
def test_pg_update_source(self, hge_ctx):
hge_ctx.v1metadataq({
"type": "pg_add_source",
"args": {
"name": "pg1",
"configuration": {
"connection_info": {
"database_url": {
"from_env": "HASURA_GRAPHQL_PG_SOURCE_URL_1"
},
"pool_settings": {
"max_connections": 10
}
}
}
}
})
hge_ctx.v1metadataq({
"type": "pg_update_source",
"args": {
"name": "pg1",
"customization": {
"root_fields": {
"namespace": "some_namespace"
}
}
}
})
resp = hge_ctx.v1metadataq({"type": "export_metadata", "args": {}})
assert resp["sources"][1]["customization"]["root_fields"]["namespace"] == "some_namespace"
assert resp["sources"][1]["configuration"]["connection_info"]["pool_settings"]["max_connections"] == 10
hge_ctx.v1metadataq({
"type": "pg_update_source",
"args": {
"name": "pg1",
"configuration": {
"connection_info": {
"database_url": {
"from_env": "HASURA_GRAPHQL_PG_SOURCE_URL_1"
},
"pool_settings": {
"max_connections": 50
}
}
}
}
})
resp = hge_ctx.v1metadataq({"type": "export_metadata", "args": {}})
assert resp["sources"][1]["customization"]["root_fields"]["namespace"] == "some_namespace"
assert resp["sources"][1]["configuration"]["connection_info"]["pool_settings"]["max_connections"] == 50
hge_ctx.v1metadataq({
"type": "pg_drop_source",
"args": {
"name": "pg1"
}
})
Disable test_pg_add_source_with_source_parameters locally. ### Description Our python test suite has several major problems; one of them being that the tests themselves are not responsible for their own setup. We are therefore using environment variables for all matters of configuration, such as _where the postgres instance is_. This is something that should be changed, but in the meantime, it is the test implementer's responsibility to ensure that tests have a consistent setup in CI and locally, or to to add the proper "skip" annotations. The recently added `test_pg_add_source_with_source_parameters` fails to do so: as it tests adding a postgres source from hardcoded parameters, rather than relying on environment variables, it only works if the postgres instance is at the matching address, which happens to be the one set in the circle ci config. This is undesirable for two reasons: - it breaks local tests: running tests locally with `dev.sh` sets postgres up differently, and the test fails; - a change to the circle config would result in failures in that test. Sadly, there's no good solution here: our tests do not currently support expanding environment variables in the queries' yaml files, meaning it's not possible to set the values of all those parameters differently in each environment. And we haven't yet started working towards having a unified testing environment setup. As a result, this PR disables the offending test UNLESS the postgres instance happens to be exactly where the test expects it. This is also very inelegant and adds more tech debt to the pile, but I do not see how to fix this with our current test infrastructure. :( https://github.com/hasura/graphql-engine-mono/pull/2336 GitOrigin-RevId: 8bc9142075d14acaa48e9c4b20de2527185bc75c
2021-09-14 07:10:15 +03:00
@pytest.mark.skipif(
os.getenv('HASURA_GRAPHQL_PG_SOURCE_URL_1') != 'postgresql://gql_test:gql_test@localhost:5432/pg_source_1',
Disable test_pg_add_source_with_source_parameters locally. ### Description Our python test suite has several major problems; one of them being that the tests themselves are not responsible for their own setup. We are therefore using environment variables for all matters of configuration, such as _where the postgres instance is_. This is something that should be changed, but in the meantime, it is the test implementer's responsibility to ensure that tests have a consistent setup in CI and locally, or to to add the proper "skip" annotations. The recently added `test_pg_add_source_with_source_parameters` fails to do so: as it tests adding a postgres source from hardcoded parameters, rather than relying on environment variables, it only works if the postgres instance is at the matching address, which happens to be the one set in the circle ci config. This is undesirable for two reasons: - it breaks local tests: running tests locally with `dev.sh` sets postgres up differently, and the test fails; - a change to the circle config would result in failures in that test. Sadly, there's no good solution here: our tests do not currently support expanding environment variables in the queries' yaml files, meaning it's not possible to set the values of all those parameters differently in each environment. And we haven't yet started working towards having a unified testing environment setup. As a result, this PR disables the offending test UNLESS the postgres instance happens to be exactly where the test expects it. This is also very inelegant and adds more tech debt to the pile, but I do not see how to fix this with our current test infrastructure. :( https://github.com/hasura/graphql-engine-mono/pull/2336 GitOrigin-RevId: 8bc9142075d14acaa48e9c4b20de2527185bc75c
2021-09-14 07:10:15 +03:00
reason="This test relies on hardcoded connection parameters that match Circle's setup.")
def test_pg_add_source_with_source_parameters(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/pg_add_source_with_parameters.yaml')
def test_pg_track_table_source(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/pg_track_table_source.yaml')
def test_rename_source(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/rename_source.yaml')
def test_pg_multisource_query(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/pg_multisource_query.yaml')
def test_pg_remote_source_query(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/pg_remote_source_query.yaml')
@pytest.mark.skipif(
os.getenv('HASURA_GRAPHQL_PG_SOURCE_URL_1') == os.getenv('HASURA_GRAPHQL_PG_SOURCE_URL_2') or
os.getenv('HASURA_GRAPHQL_PG_SOURCE_URL_1') is None or
os.getenv('HASURA_GRAPHQL_PG_SOURCE_URL_2') is None,
reason="We need two different and valid instances of postgres for this test.")
def test_pg_remote_source_customized_query(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/pg_remote_source_customized_query.yaml')
def test_pg_source_namespace_query(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/pg_source_namespace_query.yaml')
def test_pg_source_prefix_query(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/pg_source_prefix_query.yaml')
def test_pg_source_customization(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/pg_source_customization.yaml')
def test_pg_source_cust_custom_name(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/pg_source_customization_custom_name.yaml')
def test_pg_function_tracking_with_comment(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/pg_track_function_with_comment_setup.yaml')
# make an introspection query to see if the description of the function has changed
introspection_query = """{
__schema {
queryType {
fields {
name
description
}
}
}
}"""
url = "/v1/graphql"
query = {
"query": introspection_query,
"variables": {}
}
headers = {}
if hge_ctx.hge_key is not None:
headers['x-hasura-admin-secret'] = hge_ctx.hge_key
status_code, resp, _ = hge_ctx.anyq(url, query, headers)
assert status_code == 200, f'Expected {status_code} to be 200. Response:\n{resp}'
fn_name = 'search_authors_s1'
fn_description = 'this function helps fetch articles based on the title'
resp_fields = resp['data']['__schema']['queryType']['fields']
if resp_fields is not None:
comment_found = False
for field_info in resp_fields:
if field_info['name'] == fn_name and field_info['description'] == fn_description:
comment_found = True
break
assert comment_found == True, resp
check_query_f(hge_ctx, self.dir() + '/pg_track_function_with_comment_teardown.yaml')
def test_webhook_transform_success(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/test_webhook_transform_success.yaml')
def test_webhook_transform_success_remove_body(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/test_webhook_transform_success_remove_body.yaml')
def test_webhook_transform_success_old_body_schema(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/test_webhook_transform_success_old_body_schema.yaml')
def test_webhook_transform_success_form_urlencoded(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/test_webhook_transform_success_form_urlencoded.yaml')
def test_webhook_transform_with_url_env_reference_success(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/test_webhook_transform_env_reference_success.yaml')
def test_webhook_transform_bad_parse(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/test_webhook_transform_bad_parse.yaml')
def test_webhook_transform_bad_eval(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/test_webhook_transform_bad_eval.yaml')
def test_webhook_transform_custom_functions(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/test_webhook_transform_custom_functions.yaml')
@pytest.mark.skipif(
os.getenv('HASURA_GRAPHQL_PG_SOURCE_URL_1') == os.getenv('HASURA_GRAPHQL_PG_SOURCE_URL_2') or
os.getenv('HASURA_GRAPHQL_PG_SOURCE_URL_1') is None or
os.getenv('HASURA_GRAPHQL_PG_SOURCE_URL_2') is None,
reason="We need two different and valid instances of postgres for this test.")
def test_pg_multisource_table_name_conflict(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/pg_multisource_table_name_conflict.yaml')
@classmethod
def dir(cls):
return "queries/v1/metadata"
# TODO These look like dependent tests. Ideally we should be able to run tests independently
@usefixtures('per_class_tests_db_state')
class TestMetadataOrder:
@classmethod
def dir(cls):
return "queries/v1/metadata_order"
# FIXME:- Using export_metadata will dump
# the source configuration dependent on --database-url
# def test_export_metadata(self, hge_ctx):
# check_query_f(hge_ctx, self.dir() + '/export_metadata.yaml')
# def test_clear_export_metadata(self, hge_ctx):
# In the 'clear_export_metadata.yaml' the metadata is added
# using the metadata APIs
# check_query_f(hge_ctx, self.dir() + '/clear_export_metadata.yaml')
def test_export_replace(self, hge_ctx):
url = '/v1/query'
export_query = {
'type': 'export_metadata',
'args': {}
}
headers = {}
if hge_ctx.hge_key is not None:
headers['X-Hasura-Admin-Secret'] = hge_ctx.hge_key
# we are exporting the metadata here after creating it through
# the metadata APIs
export_code, export_resp, _ = hge_ctx.anyq(url, export_query, headers)
assert export_code == 200, export_resp
replace_query = {
'type': 'replace_metadata',
'args': export_resp
}
# we are replacing the metadata with the exported metadata from the
# `export_metadata` response.
replace_code, replace_resp, _ = hge_ctx.anyq(
url, replace_query, headers)
assert replace_code == 200, replace_resp
# This test catches incorrect key names(if any) in the export_metadata serialization,
# for example, A new query collection is added to the allow list using the
# add_collection_to_allowlist metadata API. When
# the metadata is exported it will contain the allowlist. Now, when this
# metadata is imported, if the graphql-engine is expecting a different key
# like allow_list(instead of allowlist) then the allow list won't be imported.
# Now, exporting the metadata won't contain the allowlist key
# because it wasn't imported properly and hence the two exports will differ.
export_code_1, export_resp_1, _ = hge_ctx.anyq(
url, export_query, headers)
assert export_code_1 == 200
assert export_resp == export_resp_1
def test_export_replace_v2(self, hge_ctx):
url = '/v1/metadata'
export_query = {
'type': 'export_metadata',
'version': 2,
'args': {}
}
headers = {}
if hge_ctx.hge_key is not None:
headers['X-Hasura-Admin-Secret'] = hge_ctx.hge_key
# we are exporting the metadata here after creating it through
# the metadata APIs
export_code, export_resp, _ = hge_ctx.anyq(url, export_query, headers)
assert export_code == 200, export_resp
replace_query = {
'type': 'replace_metadata',
'version': 2,
'resource_version': export_resp['resource_version'],
'args': {'metadata': export_resp['metadata']}
}
# we are replacing the metadata with the exported metadata from the
# `export_metadata` response.
replace_code, replace_resp, _ = hge_ctx.anyq(
url, replace_query, headers)
assert replace_code == 200, replace_resp
export_code_1, export_resp_1, _ = hge_ctx.anyq(
url, export_query, headers)
assert export_code_1 == 200
assert export_resp['metadata'] == export_resp_1['metadata']
# `resource_version` should have been incremented
assert export_resp['resource_version'] + \
1 == export_resp_1['resource_version']
def test_export_replace_v2_conflict(self, hge_ctx):
url = '/v1/metadata'
export_query = {
'type': 'export_metadata',
'version': 2,
'args': {}
}
headers = {}
if hge_ctx.hge_key is not None:
headers['X-Hasura-Admin-Secret'] = hge_ctx.hge_key
# we are exporting the metadata here after creating it through
# the metadata APIs
export_code, export_resp, _ = hge_ctx.anyq(url, export_query, headers)
assert export_code == 200, export_resp
replace_query = {
'type': 'replace_metadata',
'version': 2,
'resource_version': export_resp['resource_version'] - 1,
'args': {'metadata': export_resp['metadata']}
}
# we are replacing the metadata with the exported metadata from the
# `export_metadata` response.
# Using the wrong `resource_version` should result in a 409 conflict
replace_code, replace_resp, _ = hge_ctx.anyq(
url, replace_query, headers)
assert replace_code == 409, replace_resp
export_code_1, export_resp_1, _ = hge_ctx.anyq(
url, export_query, headers)
assert export_code_1 == 200
assert export_resp['metadata'] == export_resp_1['metadata']
# `resource_version` should be unchanged
assert export_resp['resource_version'] == export_resp_1['resource_version']
def test_reload_metadata(self, hge_ctx):
url = '/v1/metadata'
export_query = {
'type': 'export_metadata',
'version': 2,
'args': {}
}
headers = {}
if hge_ctx.hge_key is not None:
headers['X-Hasura-Admin-Secret'] = hge_ctx.hge_key
# we are exporting the metadata here after creating it through
# the metadata APIs
export_code, export_resp, _ = hge_ctx.anyq(url, export_query, headers)
assert export_code == 200, export_resp
reload_query = {
'type': 'reload_metadata',
'resource_version': export_resp['resource_version'],
'args': {}
}
# we are replacing the metadata with the exported metadata from the
# `export_metadata` response.
reload_code, reload_resp, _ = hge_ctx.anyq(
url, reload_query, headers)
assert reload_code == 200, reload_resp
export_code_1, export_resp_1, _ = hge_ctx.anyq(
url, export_query, headers)
assert export_code_1 == 200
assert export_resp['metadata'] == export_resp_1['metadata']
# `resource_version` should have been incremented
assert export_resp['resource_version'] + \
1 == export_resp_1['resource_version']
def test_reload_metadata_conflict(self, hge_ctx):
url = '/v1/metadata'
export_query = {
'type': 'export_metadata',
'version': 2,
'args': {}
}
headers = {}
if hge_ctx.hge_key is not None:
headers['X-Hasura-Admin-Secret'] = hge_ctx.hge_key
# we are exporting the metadata here after creating it through
# the metadata APIs
export_code, export_resp, _ = hge_ctx.anyq(url, export_query, headers)
assert export_code == 200, export_resp
reload_query = {
'type': 'reload_metadata',
'resource_version': export_resp['resource_version'] - 1,
'args': {}
}
# we are replacing the metadata with the exported metadata from the
# `export_metadata` response.
reload_code, reload_resp, _ = hge_ctx.anyq(
url, reload_query, headers)
assert reload_code == 409, reload_resp
export_code_1, export_resp_1, _ = hge_ctx.anyq(
url, export_query, headers)
assert export_code_1 == 200
assert export_resp['metadata'] == export_resp_1['metadata']
# `resource_version` should be unchanged
assert export_resp['resource_version'] == export_resp_1['resource_version']
@pytest.mark.backend('citus', 'mssql', 'postgres', 'bigquery')
@usefixtures('per_class_tests_db_state')
server/tests: Fix `BigQuery test failure Job exceeded rate limits` error in CI Fixes https://github.com/hasura/graphql-engine-mono/issues/3695. Error: [BigQuery test failure Job exceeded rate limits](https://github.com/hasura/graphql-engine-mono/issues/3695) Cause: 1. [this command](https://github.com/hasura/graphql-engine/blob/2325755954bb3a777403503d709b412e01219ba9/.circleci/test-server.sh#L1263) runs tests matching the `Bigquery or Common` string, for the `test-oss-server-bigquery` CI job. 2. in this case, the pytest filter matched on `TestGraphQLQueryBoolExpSearchCommon`. Although unrelated pytests are skipped, BQ setup and teardown runs uneccesarily for the [MSSQL and Postgres backends](https://github.com/hasura/graphql-engine/blob/e444cf1f5d5eb1762357266d8b298b1dfb48d937/server/tests-py/test_graphql_queries.py#L868). 4. the setup and teardown runs three times in quick succession, _for each of_ SQL Server, Postgres and BigQuery. Occasionally, this surpassed [BigQuery's maximum rate of 5 table update operations in 10 seconds](https://cloud.google.com/bigquery/quotas#load_job_per_table.long). Fix: restrict setup/teardown to only the relevant backends... - Hotfix (this PR): ...by renaming pytest classes and changing the pytest filters in `test-server` - ok, this is faintly horrifying and an inelegant convention change. On the bright side, it shaves a minute or so off our integration test suite run by skipping fewer tests. Anecdata for `test-oss-server-bigquery` - before: 87 passed, 299 skipped, 1 warning, 1 error in 192.99s - after: 87 passed, 20 skipped, 1 warning in 170.82s - [`Common` was a terrible name, anyway](https://github.com/hasura/graphql-engine-mono/issues/2079), for `AnyCombinationOfBackends`. - Better fix: ...by refactoring the `conftest.py` helpers. I ran out of a timebox so will write up a separate issue. Given we're actively [porting pytests over to hspec](https://github.com/hasura/graphql-engine/issues/8432), I don't know how much it's worth investing time in a refactor. To verify the fix: I ran a full CI build a few times [[1]](https://buildkite.com/hasura/graphql-engine-mono/builds/8069#078c781a-c8ef-44f2-a400-15f91fb88e42)[[2]](https://buildkite.com/hasura/graphql-engine-mono/builds/8072#f9e7f59d-264f-46a4-973d-21aa762cca35)[[3]](https://buildkite.com/hasura/graphql-engine-mono/builds/8075#bb104e80-ff76-408c-a46b-6f40e92e6317) whilst troubleshooting to convince myself this fixed the problem. PR-URL: https://github.com/hasura/graphql-engine-mono/pull/4362 GitOrigin-RevId: 4c3283f0654b70e9dcda642d9012f6376aa95290
2022-04-27 21:39:40 +03:00
class TestSetTableCustomizationPostgresMSSQLCitusBigquery:
@classmethod
def dir(cls):
return "queries/v1/metadata"
def test_set_table_customization(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + hge_ctx.backend_suffix('/set_table_customization') + '.yaml')
@pytest.mark.backend('bigquery')
@usefixtures('per_method_tests_db_state')
class TestMetadataBigquery:
def test_replace_metadata_no_tables(self, hge_ctx):
check_query_f(hge_ctx, self.dir() + '/replace_metadata_no_tables.yaml')
@classmethod
def dir(cls):
return "queries/v1/metadata/bigquery"