graphql-engine/server/tests-py/validate.py

623 lines
26 KiB
Python
Raw Normal View History

2018-09-18 09:21:57 +03:00
#!/usr/bin/env python3
import time
import ruamel.yaml as yaml
from ruamel.yaml.compat import ordereddict, StringIO
from ruamel.yaml.comments import CommentedMap
import json
import copy
import graphql
import os
import base64
import jsondiff
import jwt
import queue
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
import random
import warnings
import pytest
import textwrap
2018-09-18 09:21:57 +03:00
from context import GQLWsClient, PytestConf
2018-10-30 12:21:58 +03:00
2018-09-18 09:21:57 +03:00
def check_keys(keys, obj):
for k in keys:
assert k in obj, obj
2018-10-30 12:21:58 +03:00
def check_ev_payload_shape(ev_payload):
top_level_keys = ["created_at", "event", "id", "table", "trigger"]
2018-09-18 09:21:57 +03:00
check_keys(top_level_keys, ev_payload)
event_keys = ["data", "op"]
check_keys(event_keys, ev_payload['event'])
trigger_keys = ["name"]
2018-09-18 09:21:57 +03:00
check_keys(trigger_keys, ev_payload['trigger'])
2018-10-30 12:21:58 +03:00
2018-09-18 09:21:57 +03:00
def validate_event_payload(ev_payload, trig_name, table):
check_ev_payload_shape(ev_payload)
assert ev_payload['table'] == table, ev_payload
assert ev_payload['trigger']['name'] == trig_name, ev_payload
2018-10-30 12:21:58 +03:00
def validate_event_headers(ev_headers, headers):
for key, value in headers.items():
v = ev_headers.get(key)
assert v == value, (key, v)
2018-09-18 09:21:57 +03:00
def validate_removed_event_headers (ev_headers, headers):
for key in headers:
v = ev_headers.get(key)
assert (not v), (key, v)
2018-10-30 12:21:58 +03:00
def validate_event_webhook(ev_webhook_path, webhook_path):
assert ev_webhook_path == webhook_path
2018-09-18 09:21:57 +03:00
def validate_session_variables(hge_ctx, ev, session_variables):
# TODO: Naveen: MSSQL ET does not yet contain session variables in it's
# payload body. Hence ignoring that for now. Remove this guard when payload
# has it
if (hge_ctx.backend == "postgres"):
assert ev['session_variables'] == session_variables, ev
elif (hge_ctx.backend == "mssql"):
with pytest.raises(KeyError):
assert ev['session_variables'] == session_variables, ev
def validate_event(hge_ctx,
trig_name,
table,
operation,
ev_full,
exp_ev_data,
headers = {},
webhook_path = '/',
session_variables = {'x-hasura-role': 'admin'},
retry = 0,
):
ev = ev_full['body']['event']
validate_event_webhook(ev_full['path'], webhook_path)
validate_event_headers(ev_full['headers'], headers)
validate_event_payload(ev_full['body'], trig_name, table)
assert ev['op'] == operation, ev
validate_session_variables(hge_ctx, ev, session_variables)
assert ev['data'] == exp_ev_data, ev
assert ev_full['body']['delivery_info']['current_retry'] == retry
# Make some assertions on a single event recorded by webhook. Waits up to 3
# seconds by default for an event to appear
def check_event(hge_ctx,
evts_webhook,
trig_name,
table,
operation,
exp_ev_data,
headers = {},
webhook_path = '/',
session_variables = {'x-hasura-role': 'admin'},
retry = 0,
get_timeout = 3
):
ev_full = evts_webhook.get_event(get_timeout)
validate_event(hge_ctx, trig_name, table, operation, ev_full, exp_ev_data, headers, webhook_path, session_variables, retry)
2018-09-18 09:21:57 +03:00
# Compares the list of expected event data with the one received from webhook. Waits
# up to 3 seconds by default for an event to appear.
# This is useful when the list of events that are generated have no order, so you
# cannot use 'check_event' to check each event one after another.
def check_events(hge_ctx,
evts_webhook,
trig_name,
table,
operation,
num_of_events,
exp_ev_datas,
headers = {},
webhook_path = '/',
session_variables = {'x-hasura-role': 'admin'},
retry = 0,
get_timeout = 3
):
events_payloads_webhook = []
# Get all the expected number of events
for i in range(num_of_events):
ev_full = evts_webhook.get_event(get_timeout)
ev_payload_data = ev_full['body']['event']['data']
events_payloads_webhook.append((ev_payload_data, ev_full))
# If there are still some events present in queue after we get the expected number
# of events, then it means some stray events got generated.
if (not evts_webhook.is_queue_empty()):
assert False, "expected number of event payload not equal to the actual number of event payload generated"
# Check if the payload we get from event webhook is present in the expected datas
# If no such data is present, then error else validate the data.
for (ev_payload_data, ev_full) in events_payloads_webhook:
if ev_payload_data in exp_ev_datas:
# Since ev_payload_data is present in exp_ev_datas, is means that it is
# the exp_ev_data for that particular event payload (ev_full)
exp_ev_data = ev_payload_data
validate_event(hge_ctx, trig_name, table, operation, ev_full, exp_ev_data, headers, webhook_path, session_variables, retry)
else:
assert False, ("Received event data: \n" + json.dumps(ev_payload_data) + "\n not present in expected event data from webhook")
def check_event_transformed(hge_ctx,
evts_webhook,
exp_payload,
headers = {},
webhook_path = '/',
session_variables = {'x-hasura-role': 'admin'},
retry = 0,
get_timeout = 3,
removedHeaders = []):
ev_full = evts_webhook.get_event(get_timeout)
validate_event_webhook(ev_full['path'], webhook_path)
validate_event_headers(ev_full['headers'], headers)
validate_removed_event_headers(ev_full['headers'], removedHeaders)
assert ev_full['body'] == exp_payload
def test_forbidden_when_admin_secret_reqd(hge_ctx, conf):
if conf['url'] == '/v1/graphql' or conf['url'] == '/v1beta1/relay':
if conf['status'] == 404:
status = [404]
else:
status = [200]
else:
status = [401, 404]
2018-10-30 12:21:58 +03:00
headers = {}
if 'headers' in conf:
# Convert header values to strings, as the YAML parser might give us an internal class.
headers = {name: str(value) for name, value in conf['headers'].items()}
# Test without admin secret
code, resp, resp_hdrs = hge_ctx.anyq(conf['url'], conf['query'], headers)
#assert code in [401,404], "\n" + yaml.dump({
assert code in status, "\n" + yaml.dump({
"expected": "Should be access denied as admin secret is not provided",
2018-10-30 12:21:58 +03:00
"actual": {
"code": code,
"response": resp
},
'request id': resp_hdrs.get('x-request-id')
2018-10-30 12:21:58 +03:00
})
# Test with random admin secret
headers['X-Hasura-Admin-Secret'] = base64.b64encode(os.urandom(30))
code, resp, resp_hdrs = hge_ctx.anyq(conf['url'], conf['query'], headers)
#assert code in [401,404], "\n" + yaml.dump({
assert code in status, "\n" + yaml.dump({
"expected": "Should be access denied as an incorrect admin secret is provided",
2018-10-30 12:21:58 +03:00
"actual": {
"code": code,
"response": resp
},
'request id': resp_hdrs.get('x-request-id')
2018-10-30 12:21:58 +03:00
})
def test_forbidden_webhook(hge_ctx, conf):
if conf['url'] == '/v1/graphql' or conf['url'] == '/v1beta1/relay':
if conf['status'] == 404:
status = [404]
else:
status = [200]
else:
status = [401, 404]
2018-10-30 12:21:58 +03:00
h = {'Authorization': 'Bearer ' + base64.b64encode(base64.b64encode(os.urandom(30))).decode('utf-8')}
code, resp, resp_hdrs = hge_ctx.anyq(conf['url'], conf['query'], h)
#assert code in [401,404], "\n" + yaml.dump({
assert code in status, "\n" + yaml.dump({
2018-10-30 12:21:58 +03:00
"expected": "Should be access denied as it is denied from webhook",
"actual": {
"code": code,
"response": resp
},
'request id': resp_hdrs.get('x-request-id')
2018-10-30 12:21:58 +03:00
})
def mk_claims_with_namespace_path(claims,hasura_claims,namespace_path):
if namespace_path is None:
claims['https://hasura.io/jwt/claims'] = hasura_claims
elif namespace_path == "$":
claims.update(hasura_claims)
elif namespace_path == "$.hasura_claims":
claims['hasura_claims'] = hasura_claims
elif namespace_path == "$.hasura['claims%']":
claims['hasura'] = {}
claims['hasura']['claims%'] = hasura_claims
else:
raise Exception(
'''claims_namespace_path should not be anything
other than $.hasura_claims, $.hasura['claims%'] or $ for testing. The
value of claims_namespace_path was {}'''.format(namespace_path))
return claims
# Returns the response received and a bool indicating whether the test passed
# or not (this will always be True unless we are `--accepting`)
def check_query(hge_ctx, conf, transport='http', add_auth=True, claims_namespace_path=None, gqlws=False):
run default tests in test_server_upgrade (#3718) * run basic tests after upgrade * terminate before specifying file in pytest cmd * Move fixture definitions out of test classes Previously we had abstract classes with the fixtures defined in them. The test classes then inherits these super classes. This is creating inheritence problems, especially when you want to just inherit the tests in class, but not the fixtures. We have now moved all those fixture definitions outside of the class (in conftest.py). These fixtures are now used by the test classes when and where they are required. * Run pytests on server upgrade Server upgrade tests are run by 1) Run pytest with schema/metadata setup but do not do schema/metadata teardown 2) Upgrade the server 3) Run pytest using the above schema and teardown at the end of the tests 4) Cleanup hasura metadata and start again with next set of tests We have added options --skip-schema-setup and --skip-schema-teardown to help running server upgrade tests. While running the tests, we noticed that error codes and messages for some of the tests have changed. So we have added another option to pytest `--avoid-error-message-checks`. If this flag is set, and if comparing expected and response message fails, and if the expected response has an error message, Pytest will throw warnings instead of an error. * Use marks to specify server-upgrade tests Not all tests can be run as serve upgrade tests, particularly those which themselves change the schema. We introduce two pytest markers. Marker allow_server_upgrade_test will add the test into the list of server upgrade tests that can be run. skip_server_upgrade_test removes it from the list. With this we have added tests for queries, mutations, and selected event trigger and remote schema tests to the list of server upgrade tests. * Remove components not needed anymore * Install curl * Fix error in query validation * Fix error in test_v1_queries.py * install procps for server upgrade tests * Use postgres image which has postgis installed * set pager off with psql * quote the bash variable WORKTREE_DIR Co-authored-by: nizar-m <19857260+nizar-m@users.noreply.github.com> Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com>
2020-02-13 12:14:02 +03:00
hge_ctx.tests_passed = True
2018-10-30 12:21:58 +03:00
headers = {}
2018-09-18 09:21:57 +03:00
if 'headers' in conf:
# Convert header values to strings, as the YAML parser might give us an internal class.
headers = {name: str(value) for name, value in conf['headers'].items()}
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
# No headers in conf => Admin role
# Set the X-Hasura-Role header randomly
# If header is set, jwt/webhook auth will happen
# Otherwise admin-secret will be set
if len(headers) == 0 and random.choice([True, False]):
headers['X-Hasura-Role'] = 'admin'
if add_auth:
# Use the hasura role specified in the test case, and create a JWT token
if hge_ctx.hge_jwt_key is not None and len(headers) > 0 and 'X-Hasura-Role' in headers:
2018-10-30 12:21:58 +03:00
hClaims = dict()
hClaims['X-Hasura-Allowed-Roles'] = [headers['X-Hasura-Role']]
hClaims['X-Hasura-Default-Role'] = headers['X-Hasura-Role']
for key in headers:
if key != 'X-Hasura-Role':
2018-10-30 12:21:58 +03:00
hClaims[key] = headers[key]
claim = {
"sub": "foo",
"name": "bar",
}
claim = mk_claims_with_namespace_path(claim, hClaims, claims_namespace_path)
headers['Authorization'] = 'Bearer ' + jwt.encode(claim, hge_ctx.hge_jwt_key, algorithm=hge_ctx.hge_jwt_algo)
# Use the hasura role specified in the test case, and create an authorization token which will be verified by webhook
if hge_ctx.hge_webhook is not None and len(headers) > 0:
if not hge_ctx.webhook_insecure:
# Check whether the output is also forbidden when webhook returns forbidden
test_forbidden_webhook(hge_ctx, conf)
headers['X-Hasura-Auth-Mode'] = 'webhook'
headers_new = dict()
2018-10-30 12:21:58 +03:00
headers_new['Authorization'] = 'Bearer ' + base64.b64encode(json.dumps(headers).encode('utf-8')).decode(
'utf-8')
headers = headers_new
# The case as admin with admin-secret and jwt/webhook
2018-10-30 12:21:58 +03:00
elif (
hge_ctx.hge_webhook is not None or hge_ctx.hge_jwt_key is not None) and hge_ctx.hge_key is not None and len(
headers) == 0:
headers['X-Hasura-Admin-Secret'] = hge_ctx.hge_key
# The case as admin with only admin-secret
elif hge_ctx.hge_key is not None and hge_ctx.hge_webhook is None and hge_ctx.hge_jwt_key is None:
# Test whether it is forbidden when incorrect/no admin_secret is specified
test_forbidden_when_admin_secret_reqd(hge_ctx, conf)
headers['X-Hasura-Admin-Secret'] = hge_ctx.hge_key
assert transport in ['http', 'websocket', 'subscription'], "Unknown transport type " + transport
if transport == 'http':
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
print('running on http')
if 'allowed_responses' in conf:
return validate_http_anyq_with_allowed_responses(hge_ctx, conf['url'], conf['query'], headers,
conf['status'], conf.get('allowed_responses'), body=conf.get('body'), method=conf.get('method'))
else:
return validate_http_anyq(hge_ctx, conf['url'], conf['query'], headers,
conf['status'], conf.get('response'), conf.get('resp_headers'), body=conf.get('body'), method=conf.get('method'))
elif transport == 'websocket':
print('running on websocket')
return validate_gql_ws_q(hge_ctx, conf, headers, retry=True, gqlws=gqlws)
elif transport == 'subscription':
print('running via subscription')
return validate_gql_ws_q(hge_ctx, conf, headers, retry=True, via_subscription=True, gqlws=gqlws)
def validate_gql_ws_q(hge_ctx, conf, headers, retry=False, via_subscription=False, gqlws=False):
assert 'response' in conf
assert conf['url'].endswith('/graphql') or conf['url'].endswith('/relay')
endpoint = conf['url']
query = conf['query']
exp_http_response = conf['response']
if via_subscription:
query_text = query['query']
assert query_text.startswith('query '), query_text
# make the query into a subscription and add the
# _multiple_subscriptions directive that enables having more
# than 1 root field in a subscription
query['query'] = 'subscription' + query_text[len('query'):].replace("{"," @_multiple_top_level_fields {",1)
if endpoint == '/v1alpha1/graphql':
ws_client = hge_ctx.ws_client_v1alpha1
elif endpoint == '/v1beta1/relay':
ws_client = hge_ctx.ws_client_relay
elif gqlws: # for `graphQL-ws` clients
ws_client = hge_ctx.ws_client_graphql_ws
else:
ws_client = hge_ctx.ws_client
print(ws_client.ws_url)
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
if not headers or len(headers) == 0:
ws_client.init({})
if ws_client.remote_closed or ws_client.is_closing:
ws_client.create_conn()
if not headers or len(headers) == 0 or hge_ctx.hge_key is None:
ws_client.init()
else:
ws_client.init_as_admin()
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
query_resp = ws_client.send_query(query, query_id='hge_test', headers=headers, timeout=15)
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
resp = next(query_resp)
print('websocket resp: ', resp)
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
if resp.get('type') == 'complete':
if retry:
#Got query complete before payload. Retry once more
print("Got query complete before getting query response payload. Retrying")
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
ws_client.recreate_conn()
return validate_gql_ws_q(hge_ctx, query, headers, exp_http_response, False)
else:
assert resp['type'] in ['data', 'error', 'next'], resp
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
if 'errors' in exp_http_response or 'error' in exp_http_response:
if gqlws:
resp['payload'] = {'errors':resp['payload']}
assert resp['type'] in ['data', 'error', 'next'], resp
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
else:
assert resp['type'] == 'data' or resp['type'] == 'next', resp
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
assert 'payload' in resp, resp
if via_subscription:
if not gqlws:
ws_client.send({ 'id': 'hge_test', 'type': 'stop' })
else:
ws_client.send({ 'id': 'hge_test', 'type': 'complete' })
if not gqlws: # NOTE: for graphql-ws, we have some elements that are left in the queue especially after a 'next' message.
with pytest.raises(queue.Empty):
ws_client.get_ws_event(0)
else:
resp_done = next(query_resp)
assert resp_done['type'] == 'complete'
run default tests in test_server_upgrade (#3718) * run basic tests after upgrade * terminate before specifying file in pytest cmd * Move fixture definitions out of test classes Previously we had abstract classes with the fixtures defined in them. The test classes then inherits these super classes. This is creating inheritence problems, especially when you want to just inherit the tests in class, but not the fixtures. We have now moved all those fixture definitions outside of the class (in conftest.py). These fixtures are now used by the test classes when and where they are required. * Run pytests on server upgrade Server upgrade tests are run by 1) Run pytest with schema/metadata setup but do not do schema/metadata teardown 2) Upgrade the server 3) Run pytest using the above schema and teardown at the end of the tests 4) Cleanup hasura metadata and start again with next set of tests We have added options --skip-schema-setup and --skip-schema-teardown to help running server upgrade tests. While running the tests, we noticed that error codes and messages for some of the tests have changed. So we have added another option to pytest `--avoid-error-message-checks`. If this flag is set, and if comparing expected and response message fails, and if the expected response has an error message, Pytest will throw warnings instead of an error. * Use marks to specify server-upgrade tests Not all tests can be run as serve upgrade tests, particularly those which themselves change the schema. We introduce two pytest markers. Marker allow_server_upgrade_test will add the test into the list of server upgrade tests that can be run. skip_server_upgrade_test removes it from the list. With this we have added tests for queries, mutations, and selected event trigger and remote schema tests to the list of server upgrade tests. * Remove components not needed anymore * Install curl * Fix error in query validation * Fix error in test_v1_queries.py * install procps for server upgrade tests * Use postgres image which has postgis installed * set pager off with psql * quote the bash variable WORKTREE_DIR Co-authored-by: nizar-m <19857260+nizar-m@users.noreply.github.com> Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com>
2020-02-13 12:14:02 +03:00
return assert_graphql_resp_expected(resp['payload'], exp_http_response, query, skip_if_err_msg=hge_ctx.avoid_err_msg_checks)
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
def assert_response_code(url, query, code, exp_code, resp, body=None):
assert code == exp_code, \
f"""
When querying {url},
Got response code {code}, expected {exp_code}.
Request query:
{textwrap.indent(json.dumps(query, indent=2), ' ')}
Request body:
{textwrap.indent(json.dumps(body, indent=2), ' ')}
Response body:
{textwrap.indent(json.dumps(resp, indent=2), ' ')}
"""
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
def validate_http_anyq(hge_ctx, url, query, headers, exp_code, exp_response, exp_resp_hdrs, body = None, method = None):
code, resp, resp_hdrs = hge_ctx.anyq(url, query, headers, body, method)
assert_response_code(url, query, code, exp_code, resp, body)
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
if exp_response:
return assert_graphql_resp_expected(resp, exp_response, query, resp_hdrs, hge_ctx.avoid_err_msg_checks, exp_resp_hdrs=exp_resp_hdrs)
else:
return resp, True
def validate_http_anyq_with_allowed_responses(hge_ctx, url, query, headers, exp_code, allowed_responses, body = None, method = None):
code, resp, resp_hdrs = hge_ctx.anyq(url, query, headers, body, method)
assert_response_code(url, query, code, exp_code, resp, body)
if isinstance(allowed_responses, list) and len(allowed_responses) > 0:
resp_res = {}
test_passed = False
for response in allowed_responses:
dict_resp = json.loads(json.dumps(response))
exp_resp = dict_resp['response']
exp_resp_hdrs = dict_resp.get('resp_headers')
resp_result, pass_test = assert_graphql_resp_expected(resp, exp_resp, query, resp_hdrs, hge_ctx.avoid_err_msg_checks, True, exp_resp_hdrs)
if pass_test == True:
test_passed = True
resp_res = resp_result
break
if test_passed == True:
return resp_res, test_passed
else:
# test should fail if none of the allowed responses work
raise Exception("allowed_responses did not contain the response that was expected. Please check your allowed_responses")
else:
raise Exception("allowed_responses was not a list of permissible responses")
# Check the actual graphql response is what we expected, also taking into
# consideration the ordering of keys that we expect to be preserved, based on
# 'query'.
#
# Returns 'resp' and a bool indicating whether the test passed or not (this
# will always be True unless we are `--accepting`)
def assert_graphql_resp_expected(resp_orig, exp_response_orig, query, resp_hdrs={}, skip_if_err_msg=False, skip_assertion=False, exp_resp_hdrs={}):
print('Reponse Headers: ', resp_hdrs)
print(exp_resp_hdrs)
# Prepare actual and expected responses so comparison takes into
# consideration only the ordering that we care about:
resp = collapse_order_not_selset(resp_orig, query)
exp_response = collapse_order_not_selset(exp_response_orig, query)
matched = equal_CommentedMap(resp, exp_response) and (exp_resp_hdrs or {}).items() <= resp_hdrs.items()
if PytestConf.config.getoption("--accept"):
print('skipping assertion since we chose to --accept new output')
else:
yml = yaml.YAML()
# https://yaml.readthedocs.io/en/latest/example.html#output-of-dump-as-a-string :
dump_str = StringIO()
test_output = {
# Keep strict received order when displaying errors:
'response': resp_orig,
'expected': exp_response_orig,
'diff':
(lambda diff:
"(results differ only in their order of keys)" if diff == {} else diff)
run default tests in test_server_upgrade (#3718) * run basic tests after upgrade * terminate before specifying file in pytest cmd * Move fixture definitions out of test classes Previously we had abstract classes with the fixtures defined in them. The test classes then inherits these super classes. This is creating inheritence problems, especially when you want to just inherit the tests in class, but not the fixtures. We have now moved all those fixture definitions outside of the class (in conftest.py). These fixtures are now used by the test classes when and where they are required. * Run pytests on server upgrade Server upgrade tests are run by 1) Run pytest with schema/metadata setup but do not do schema/metadata teardown 2) Upgrade the server 3) Run pytest using the above schema and teardown at the end of the tests 4) Cleanup hasura metadata and start again with next set of tests We have added options --skip-schema-setup and --skip-schema-teardown to help running server upgrade tests. While running the tests, we noticed that error codes and messages for some of the tests have changed. So we have added another option to pytest `--avoid-error-message-checks`. If this flag is set, and if comparing expected and response message fails, and if the expected response has an error message, Pytest will throw warnings instead of an error. * Use marks to specify server-upgrade tests Not all tests can be run as serve upgrade tests, particularly those which themselves change the schema. We introduce two pytest markers. Marker allow_server_upgrade_test will add the test into the list of server upgrade tests that can be run. skip_server_upgrade_test removes it from the list. With this we have added tests for queries, mutations, and selected event trigger and remote schema tests to the list of server upgrade tests. * Remove components not needed anymore * Install curl * Fix error in query validation * Fix error in test_v1_queries.py * install procps for server upgrade tests * Use postgres image which has postgis installed * set pager off with psql * quote the bash variable WORKTREE_DIR Co-authored-by: nizar-m <19857260+nizar-m@users.noreply.github.com> Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com>
2020-02-13 12:14:02 +03:00
(stringify_keys(jsondiff.diff(exp_response, resp))),
'query': query
}
if 'x-request-id' in resp_hdrs:
test_output['request id'] = resp_hdrs['x-request-id']
if exp_resp_hdrs:
Caching, Rate Limiting, Metrics & Session Variable Improvements (#376) * server: use a leaky bucket algorithm for bytes-per-second cache rate limiting * Use evalsha properly * Adds redis cache limit parameters to PoliciesConfig * Loads Leaky Bucket Script On Server Start * Adds more redis logging and moves cache update into lua script * reverts setex in lua and adds notes * Refactors cacheStore and adds max TTL and cache size limits * Filter session vars in cache key * WIP * parens * cache-clear-hander POC implementation * cache-clear-hander POC implementation * Pro projectId used as cache key * POC working! * prefixing query-response keys in redis * Add cacheClearer to RedisScripts * Partial implementation of cacheClearer from scripts record * updating tests * [automated] stylish-haskell commit * Adds query look with up with metrics script * Adds missing module and lua script from last commit * Changes redis script module structure to match cache clearing branch * minor change to lua script * cleaning up cache clearing * generalising JsonLog * [automated] stylish-haskell commit * Draft Cache Metrics Endpoint * Adds Cache Metrics Handler * Adds hook handler module * Missed HandlerHook module in last commit * glob * Fixes redis mget bug * Removes cache totals and changes dashes to colons in metric cache keys * Adds query param to clear clear endpoint for deleting specific keys * Adds query param to clear clear endpoint for deleting specific keys * Cache Metrics on query families rather then queries * Replace Set with nub * Base16 Redis Hashes * Query Family Redis Keys With Roles * response headers for cache keys * fixing bug in family key by excluding operation name; using hash for response header instead of entire key * Adds query family to redis cache keys and cache clear endpoint * Fixes queryfamily hash bug * Moves cache endpoints to /pro * Moved cache clear to POST * Refactors cache clear function * Fixes query family format bug * Adds query cache tests and optional --redis-url flag to python test suite * Adds session variable cache test * Update pro changelog * adding documentation for additional caching features * more docs * clearing up units of leaky bucket params * Adds comments to leaky bucket script * removes old todo * Fixes session variable filtering to work with new query rootfield * more advanced defaulting behaviour for bucket rate and capacity. * Updates Docs * Moves Role into QueryFamily hash * Use Aeson for Cache Clear endpoint response * Moves trace to bracket the leaky bucket script * Misc review tweaks * Adds sum type for cache clear query params * Hardcodes RegisReplyLog log level * Update docs/graphql/cloud/response-caching.rst Co-authored-by: Phil Freeman <phil@hasura.io> * new prose for rate limiting docs * [automated] stylish-haskell commit * make rootToSessVarPreds total * [automated] stylish-haskell commit * Fixes out of scope error * Renamed _acRedis to _acCacheStore Co-authored-by: Solomon Bothwell <ssbothwell@gmail.com> Co-authored-by: Lyndon Maydwell <lyndon@sordina.net> Co-authored-by: David Overton <david@hasura.io> Co-authored-by: Stylish Haskell Bot <stylish-haskell@users.noreply.github.com> Co-authored-by: Lyndon Maydwell <lyndon@hasura.io> GitOrigin-RevId: dda5c1a3f902967b3d78310f950541a55fabb1b0
2021-02-13 03:05:23 +03:00
diff_hdrs = {key: val for key, val in resp_hdrs.items() if key in exp_resp_hdrs}
test_output['headers'] = {
'actual': dict(resp_hdrs),
'expected': exp_resp_hdrs,
Caching, Rate Limiting, Metrics & Session Variable Improvements (#376) * server: use a leaky bucket algorithm for bytes-per-second cache rate limiting * Use evalsha properly * Adds redis cache limit parameters to PoliciesConfig * Loads Leaky Bucket Script On Server Start * Adds more redis logging and moves cache update into lua script * reverts setex in lua and adds notes * Refactors cacheStore and adds max TTL and cache size limits * Filter session vars in cache key * WIP * parens * cache-clear-hander POC implementation * cache-clear-hander POC implementation * Pro projectId used as cache key * POC working! * prefixing query-response keys in redis * Add cacheClearer to RedisScripts * Partial implementation of cacheClearer from scripts record * updating tests * [automated] stylish-haskell commit * Adds query look with up with metrics script * Adds missing module and lua script from last commit * Changes redis script module structure to match cache clearing branch * minor change to lua script * cleaning up cache clearing * generalising JsonLog * [automated] stylish-haskell commit * Draft Cache Metrics Endpoint * Adds Cache Metrics Handler * Adds hook handler module * Missed HandlerHook module in last commit * glob * Fixes redis mget bug * Removes cache totals and changes dashes to colons in metric cache keys * Adds query param to clear clear endpoint for deleting specific keys * Adds query param to clear clear endpoint for deleting specific keys * Cache Metrics on query families rather then queries * Replace Set with nub * Base16 Redis Hashes * Query Family Redis Keys With Roles * response headers for cache keys * fixing bug in family key by excluding operation name; using hash for response header instead of entire key * Adds query family to redis cache keys and cache clear endpoint * Fixes queryfamily hash bug * Moves cache endpoints to /pro * Moved cache clear to POST * Refactors cache clear function * Fixes query family format bug * Adds query cache tests and optional --redis-url flag to python test suite * Adds session variable cache test * Update pro changelog * adding documentation for additional caching features * more docs * clearing up units of leaky bucket params * Adds comments to leaky bucket script * removes old todo * Fixes session variable filtering to work with new query rootfield * more advanced defaulting behaviour for bucket rate and capacity. * Updates Docs * Moves Role into QueryFamily hash * Use Aeson for Cache Clear endpoint response * Moves trace to bracket the leaky bucket script * Misc review tweaks * Adds sum type for cache clear query params * Hardcodes RegisReplyLog log level * Update docs/graphql/cloud/response-caching.rst Co-authored-by: Phil Freeman <phil@hasura.io> * new prose for rate limiting docs * [automated] stylish-haskell commit * make rootToSessVarPreds total * [automated] stylish-haskell commit * Fixes out of scope error * Renamed _acRedis to _acCacheStore Co-authored-by: Solomon Bothwell <ssbothwell@gmail.com> Co-authored-by: Lyndon Maydwell <lyndon@sordina.net> Co-authored-by: David Overton <david@hasura.io> Co-authored-by: Stylish Haskell Bot <stylish-haskell@users.noreply.github.com> Co-authored-by: Lyndon Maydwell <lyndon@hasura.io> GitOrigin-RevId: dda5c1a3f902967b3d78310f950541a55fabb1b0
2021-02-13 03:05:23 +03:00
'diff': (stringify_keys(jsondiff.diff(exp_resp_hdrs, diff_hdrs)))
}
yml.dump(test_output, stream=dump_str)
run default tests in test_server_upgrade (#3718) * run basic tests after upgrade * terminate before specifying file in pytest cmd * Move fixture definitions out of test classes Previously we had abstract classes with the fixtures defined in them. The test classes then inherits these super classes. This is creating inheritence problems, especially when you want to just inherit the tests in class, but not the fixtures. We have now moved all those fixture definitions outside of the class (in conftest.py). These fixtures are now used by the test classes when and where they are required. * Run pytests on server upgrade Server upgrade tests are run by 1) Run pytest with schema/metadata setup but do not do schema/metadata teardown 2) Upgrade the server 3) Run pytest using the above schema and teardown at the end of the tests 4) Cleanup hasura metadata and start again with next set of tests We have added options --skip-schema-setup and --skip-schema-teardown to help running server upgrade tests. While running the tests, we noticed that error codes and messages for some of the tests have changed. So we have added another option to pytest `--avoid-error-message-checks`. If this flag is set, and if comparing expected and response message fails, and if the expected response has an error message, Pytest will throw warnings instead of an error. * Use marks to specify server-upgrade tests Not all tests can be run as serve upgrade tests, particularly those which themselves change the schema. We introduce two pytest markers. Marker allow_server_upgrade_test will add the test into the list of server upgrade tests that can be run. skip_server_upgrade_test removes it from the list. With this we have added tests for queries, mutations, and selected event trigger and remote schema tests to the list of server upgrade tests. * Remove components not needed anymore * Install curl * Fix error in query validation * Fix error in test_v1_queries.py * install procps for server upgrade tests * Use postgres image which has postgis installed * set pager off with psql * quote the bash variable WORKTREE_DIR Co-authored-by: nizar-m <19857260+nizar-m@users.noreply.github.com> Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com>
2020-02-13 12:14:02 +03:00
if not skip_if_err_msg:
if skip_assertion:
return resp, matched
else:
assert matched, '\n' + dump_str.getvalue()
run default tests in test_server_upgrade (#3718) * run basic tests after upgrade * terminate before specifying file in pytest cmd * Move fixture definitions out of test classes Previously we had abstract classes with the fixtures defined in them. The test classes then inherits these super classes. This is creating inheritence problems, especially when you want to just inherit the tests in class, but not the fixtures. We have now moved all those fixture definitions outside of the class (in conftest.py). These fixtures are now used by the test classes when and where they are required. * Run pytests on server upgrade Server upgrade tests are run by 1) Run pytest with schema/metadata setup but do not do schema/metadata teardown 2) Upgrade the server 3) Run pytest using the above schema and teardown at the end of the tests 4) Cleanup hasura metadata and start again with next set of tests We have added options --skip-schema-setup and --skip-schema-teardown to help running server upgrade tests. While running the tests, we noticed that error codes and messages for some of the tests have changed. So we have added another option to pytest `--avoid-error-message-checks`. If this flag is set, and if comparing expected and response message fails, and if the expected response has an error message, Pytest will throw warnings instead of an error. * Use marks to specify server-upgrade tests Not all tests can be run as serve upgrade tests, particularly those which themselves change the schema. We introduce two pytest markers. Marker allow_server_upgrade_test will add the test into the list of server upgrade tests that can be run. skip_server_upgrade_test removes it from the list. With this we have added tests for queries, mutations, and selected event trigger and remote schema tests to the list of server upgrade tests. * Remove components not needed anymore * Install curl * Fix error in query validation * Fix error in test_v1_queries.py * install procps for server upgrade tests * Use postgres image which has postgis installed * set pager off with psql * quote the bash variable WORKTREE_DIR Co-authored-by: nizar-m <19857260+nizar-m@users.noreply.github.com> Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com>
2020-02-13 12:14:02 +03:00
elif matched:
return resp, matched
else:
def is_err_msg(msg):
return any(msg.get(x) for x in ['error','errors'])
def as_list(x):
return x if isinstance(x, list) else [x]
# If it is a batch GraphQL query, compare each individual response separately
for (exp, out) in zip(as_list(exp_response), as_list(resp)):
matched_ = equal_CommentedMap(exp, out)
Rewrite GraphQL schema generation and query parsing (close #2801) (#4111) Aka “the PDV refactor.” History is preserved on the branch 2801-graphql-schema-parser-refactor. * [skip ci] remove stale benchmark commit from commit_diff * [skip ci] Check for root field name conflicts between remotes * [skip ci] Additionally check for conflicts between remotes and DB * [skip ci] Check for conflicts in schema when tracking a table * [skip ci] Fix equality checking in GraphQL AST * server: fix mishandling of GeoJSON inputs in subscriptions (fix #3239) (#4551) * Add support for multiple top-level fields in a subscription to improve testability of subscriptions * Add an internal flag to enable multiple subscriptions * Add missing call to withConstructorFn in live queries (fix #3239) Co-authored-by: Alexis King <lexi.lambda@gmail.com> * Scheduled triggers (close #1914) (#3553) server: add scheduled triggers Co-authored-by: Alexis King <lexi.lambda@gmail.com> Co-authored-by: Marion Schleifer <marion@hasura.io> Co-authored-by: Karthikeyan Chinnakonda <karthikeyan@hasura.io> Co-authored-by: Aleksandra Sikora <ola.zxcvbnm@gmail.com> * dev.sh: bump version due to addition of croniter python dependency * server: fix an introspection query caching issue (fix #4547) (#4661) Introspection queries accept variables, but we need to make sure to also touch the variables that we ignore, so that an introspection query is marked not reusable if we are not able to build a correct query plan for it. A better solution here would be to deal with such unused variables correctly, so that more introspection queries become reusable. An even better solution would be to type-safely track *how* to reuse which variables, rather than to split the reusage marking from the planning. Co-authored-by: Tirumarai Selvan <tiru@hasura.io> * flush log buffer on exception in mkWaiApp ( fix #4772 ) (#4801) * flush log buffer on exception in mkWaiApp * add comment to explain the introduced change * add changelog * allow logging details of a live query polling thread (#4959) * changes for poller-log add various multiplexed query info in poller-log * minor cleanup, also fixes a bug which will return duplicate data * Live query poller stats can now be logged This also removes in-memory stats that are collected about batched query execution as the log lines when piped into an monitoring tool will give us better insights. * allow poller-log to be configurable * log minimal information in the livequery-poller-log Other information can be retrieved from /dev/subscriptions/extended * fix few review comments * avoid marshalling and unmarshalling from ByteString to EncJSON * separate out SubscriberId and SubscriberMetadata Co-authored-by: Anon Ray <rayanon004@gmail.com> * Don't compile in developer APIs by default * Tighten up handling of admin secret, more docs Store the admin secret only as a hash to prevent leaking the secret inadvertently, and to prevent timing attacks on the secret. NOTE: best practice for stored user passwords is a function with a tunable cost like bcrypt, but our threat model is quite different (even if we thought we could reasonably protect the secret from an attacker who could read arbitrary regions of memory), and bcrypt is far too slow (by design) to perform on each request. We'd have to rely on our (technically savvy) users to choose high entropy passwords in any case. Referencing #4736 * server/docs: add instructions to fix loss of float precision in PostgreSQL <= 11 (#5187) This adds a server flag, --pg-connection-options, that can be used to set a PostgreSQL connection parameter, extra_float_digits, that needs to be used to avoid loss of data on older versions of PostgreSQL, which have odd default behavior when returning float values. (fixes #5092) * [skip ci] Add new commits from master to the commit diff * [skip ci] serve default directives (skip & include) over introspection * [skip ci] Update non-Haskell assets with the version on master * server: refactor GQL execution check and config API (#5094) Co-authored-by: Vamshi Surabhi <vamshi@hasura.io> Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com> * [skip ci] fix js issues in tests by pinning dependencies version * [skip ci] bump graphql version * [skip ci] Add note about memory usage * generalize query execution logic on Postgres (#5110) * generalize PGExecCtx to support specialized functions for various operations * fix tests compilation * allow customising PGExecCtx when starting the web server * server: changes catalog initialization and logging for pro customization (#5139) * new typeclass to abstract the logic of QueryLog-ing * abstract the logic of logging websocket-server logs introduce a MonadWSLog typeclass * move catalog initialization to init step expose a helper function to migrate catalog create schema cache in initialiseCtx * expose various modules and functions for pro * [skip ci] cosmetic change * [skip ci] fix test calling a mutation that does not exist * [skip ci] minor text change * [skip ci] refactored input values * [skip ci] remove VString Origin * server: fix updating of headers behaviour in the update cron trigger API and create future events immediately (#5151) * server: fix bug to update headers in an existing cron trigger and create future events Co-authored-by: Tirumarai Selvan <tiru@hasura.io> * Lower stack chunk size in RTS to reduce thread STACK memory (closes #5190) This reduces memory consumption for new idle subscriptions significantly (see linked ticket). The hypothesis is: we fork a lot of threads per websocket, and some of these use slightly more than the initial 1K stack size, so the first overflow balloons to 32K, when significantly less is required. However: running with `+RTS -K1K -xc` did not seem to show evidence of any overflows! So it's a mystery why this improves things. GHC should probably also be doubling the stack buffer at each overflow or doing something even smarter; the knobs we have aren't so helpful. * [skip ci] fix todo and schema generation for aggregate fields * 5087 libpq pool leak (#5089) Shrink libpq buffers to 1MB before returning connection to pool. Closes #5087 See: https://github.com/hasura/pg-client-hs/pull/19 Also related: #3388 #4077 * bump pg-client-hs version (fixes a build issue on some environments) (#5267) * do not use prepared statements for mutations * server: unlock scheduled events on graceful shutdown (#4928) * Fix buggy parsing of new --conn-lifetime flag in 2b0e3774 * [skip ci] remove cherry-picked commit from commit_diff.txt * server: include additional fields in scheduled trigger webhook payload (#5262) * include scheduled triggers metadata in the webhook body Co-authored-by: Tirumarai Selvan <tiru@hasura.io> * server: call the webhook asynchronously in event triggers (#5352) * server: call the webhook asynchronosly in event triggers * Expose all modules in Cabal file (#5371) * [skip ci] update commit_diff.txt * [skip ci] fix cast exp parser & few TODOs * [skip ci] fix remote fields arguments * [skip ci] fix few more TODO, no-op refactor, move resolve/action.hs to execute/action.hs * Pass environment variables around as a data structure, via @sordina (#5374) * Pass environment variables around as a data structure, via @sordina * Resolving build error * Adding Environment passing note to changelog * Removing references to ILTPollerLog as this seems to have been reintroduced from a bad merge * removing commented-out imports * Language pragmas already set by project * Linking async thread * Apply suggestions from code review Use `runQueryTx` instead of `runLazyTx` for queries. * remove the non-user facing entry in the changelog Co-authored-by: Phil Freeman <paf31@cantab.net> Co-authored-by: Phil Freeman <phil@hasura.io> Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com> * [skip ci] fix: restrict remote relationship field generation for hasura queries * [skip ci] no-op refactor; move insert execution code from schema parser module * server: call the webhook asynchronously in event triggers (#5352) * server: call the webhook asynchronosly in event triggers * Expose all modules in Cabal file (#5371) * [skip ci] update commit_diff.txt * Pass environment variables around as a data structure, via @sordina (#5374) * Pass environment variables around as a data structure, via @sordina * Resolving build error * Adding Environment passing note to changelog * Removing references to ILTPollerLog as this seems to have been reintroduced from a bad merge * removing commented-out imports * Language pragmas already set by project * Linking async thread * Apply suggestions from code review Use `runQueryTx` instead of `runLazyTx` for queries. * remove the non-user facing entry in the changelog Co-authored-by: Phil Freeman <paf31@cantab.net> Co-authored-by: Phil Freeman <phil@hasura.io> Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com> * [skip ci] implement header checking Probably closes #14 and #3659. * server: refactor 'pollQuery' to have a hook to process 'PollDetails' (#5391) Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com> * update pg-client (#5421) * [skip ci] update commit_diff * Fix latency buckets for telemetry data These must have gotten messed up during a refactor. As a consequence almost all samples received so far fall into the single erroneous 0 to 1K seconds (originally supposed to be 1ms?) bucket. I also re-thought what the numbers should be, but these are still arbitrary and might want adjusting in the future. * [skip ci] include the latest commit compared against master in commit_diff * [skip ci] include new commits from master in commit_diff * [skip ci] improve description generation * [skip ci] sort all introspect arrays * [skip ci] allow parsers to specify error codes * [skip ci] fix integer and float parsing error code * [skip ci] scalar from json errors are now parse errors * [skip ci] fixed negative integer error message and code * [skip ci] Re-fix nullability in relationships * [skip ci] no-op refactor and removed couple of FIXMEs * [skip ci] uncomment code in 'deleteMetadataObject' * [skip ci] Fix re-fix of nullability for relationships * [skip ci] fix default arguments error code * [skip ci] updated test error message !!! WARNING !!! Since all fields accept `null`, they all are technically optional in the new schema. Meaning there's no such thing as a missing mandatory field anymore: a field that doesn't have a default value, and which therefore isn't labelled as "optional" in the schema, will be assumed to be null if it's missing, meaning it isn't possible anymore to have an error for a missing mandatory field. The only possible error is now when a optional positional argument is omitted but is not the last positional argument. * [skip ci] cleanup of int scalar parser * [skip ci] retro-compatibility of offset as string * [skip ci] Remove commit from commit_diff.txt Although strictly speaking we don't know if this will work correctly in PDV if we would implement query plan caching, the fact is that in the theoretical case that we would have the same issue in PDV, it would probably apply not just to introspection, and the fix would be written completely differently. So this old commit is of no value to us other than the heads-up "make sure query plan caching works correctly even in the presence of unused variables", which is already part of the test suite. * Add MonadTrace and MonadExecuteQuery abstractions (#5383) * [skip ci] Fix accumulation of input object types Just like object types, interface types, and union types, we have to avoid circularities when collecting input types from the GraphQL AST. Additionally, this fixes equality checks for input object types (whose fields are unordered, and hence should be compared as sets) and enum types (ditto). * [skip ci] fix fragment error path * [skip ci] fix node error code * [skip ci] fix paths in insert queries * [skip ci] fix path in objects * [skip ci] manually alter node id path for consistency * [skip ci] more node error fixups * [skip ci] one last relay error message fix * [skip ci] update commit_diff * Propagate the trace context to event triggers (#5409) * Propagate the trace context to event triggers * Handle missing trace and span IDs * Store trace context as one LOCAL * Add migrations * Documentation * changelog * Fix warnings * Respond to code review suggestions * Respond to code review * Undo changelog * Update CHANGELOG.md Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com> * server: log request/response sizes for event triggers (#5463) * server: log request/response sizes for event triggers event triggers (and scheduled triggers) now have request/response size in their logs. * add changelog entry * Tracing: Simplify HTTP traced request (#5451) Remove the Inversion of Control (SuspendRequest) and simplify the tracing of HTTP Requests. Co-authored-by: Phil Freeman <phil@hasura.io> * Attach request ID as tracing metadata (#5456) * Propagate the trace context to event triggers * Handle missing trace and span IDs * Store trace context as one LOCAL * Add migrations * Documentation * Include the request ID as trace metadata * changelog * Fix warnings * Respond to code review suggestions * Respond to code review * Undo changelog * Update CHANGELOG.md * Typo Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com> * server: add logging for action handlers (#5471) * server: add logging for action handlers * add changelog entry * change action-handler log type from internal to non-internal * fix action-handler-log name * server: pass http and websocket request to logging context (#5470) * pass request body to logging context in all cases * add message size logging on the websocket API this is required by graphql-engine-pro/#416 * message size logging on websocket API As we need to log all messages recieved/sent by the websocket server, it makes sense to log them as part of the websocket server event logs. Previously message recieved were logged inside the onMessage handler, and messages sent were logged only for "data" messages (as a server event log) * fix review comments Co-authored-by: Phil Freeman <phil@hasura.io> * server: stop eventing subsystem threads when shutting down (#5479) * server: stop eventing subsystem threads when shutting down * Apply suggestions from code review Co-authored-by: Karthikeyan Chinnakonda <chkarthikeyan95@gmail.com> Co-authored-by: Phil Freeman <phil@hasura.io> Co-authored-by: Phil Freeman <paf31@cantab.net> Co-authored-by: Karthikeyan Chinnakonda <chkarthikeyan95@gmail.com> * [skip ci] update commit_diff with new commits added in master * Bugfix to support 0-size HASURA_GRAPHQL_QUERY_PLAN_CACHE_SIZE Also some minor refactoring of bounded cache module: - the maxBound check in `trim` was confusing and unnecessary - consequently trim was unnecessary for lookupPure Also add some basic tests * Support only the bounded cache, with default HASURA_GRAPHQL_QUERY_PLAN_CACHE_SIZE of 4000. Closes #5363 * [skip ci] remove merge commit from commit_diff * server: Fix compiler warning caused by GHC upgrade (#5489) Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com> * [skip ci] update all non server code from master * [skip ci] aligned object field error message with master * [skip ci] fix remaining undefined? * [skip ci] remove unused import * [skip ci] revert to previous error message, fix tests * Move nullableType/nonNullableType to Schema.hs These are functions on Types, not on Parsers. * [skip ci] fix setup to fix backend only test the order in which permission checks are performed on the branch is slightly different than on master, resulting in a slightly different error if there are no other mutations the user has access to. By adding update permissions, we go back to the expected case. * [skip ci] fix insert geojson tests to reflect new paths * [skip ci] fix enum test for better error message * [skip ci] fix header test for better error message * [skip ci] fix fragment cycle test for better error message * [skip ci] fix error message for type mismatch * [skip ci] fix variable path in test * [skip ci] adjust tests after bug fix * [skip ci] more tests fixing * Add hdb_catalog.current_setting abstraction for reading Hasura settings As the comment in the function’s definition explains, this is needed to work around an awkward Postgres behavior. * [skip ci] Update CONTRIBUTING.md to mention Node setup for Python tests * [skip ci] Add missing Python tests env var to CONTRIBUTING.md * [skip ci] fix order of result when subscription is run with multiple nodes * [skip ci] no-op refactor: fix a warning in Internal/Parser.hs * [skip ci] throw error when a subscription contains remote joins * [skip ci] Enable easier profiling by hiding AssertNF behind a flag In order to compile a profiling build, run: $ cabal new-build -f profiling --enable-profiling * [skip ci] Fix two warnings We used to lookup the objects that implement a given interface by filtering all objects in the schema document. However, one of the tests expects us to generate a warning if the provided `implements` field of an introspection query specifies an object not implementing some interface. So we use that field instead. * [skip ci] Fix warnings by commenting out query plan caching * [skip ci] improve masking/commenting query caching related code & few warning fixes * [skip ci] Fixed compiler warnings in graphql-parser-hs * Sync non-Haskell assets with master * [skip ci] add a test inserting invalid GraphQL but valid JSON value in a jsonb column * [skip ci] Avoid converting to/from Map * [skip ci] Apply some hlint suggestions * [skip ci] remove redundant constraints from buildLiveQueryPlan and explainGQLQuery * [skip ci] add NOTEs about missing Tracing constraints in PDV from master * Remove -fdefer-typed-holes, fix warnings * Update cabal.project.freeze * Limit GHC’s heap size to 8GB in CI to avoid the OOM killer * Commit package-lock.json for Python tests’ remote schema server * restrict env variables start with HASURA_GRAPHQL_ for headers configuration in actions, event triggers & remote schemas (#5519) * restrict env variables start with HASURA_GRAPHQL_ for headers definition in actions & event triggers * update CHANGELOG.md * Apply suggestions from code review Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com> * add test for table_by_pk node when roles doesn't have permission to PK * [skip ci] fix introspection query if any enum column present in primary key (fix #5200) (#5522) * [skip ci] test case fix for a6450e126bc2d98bcfd3791501986e4627ce6c6f * [skip ci] add tests to agg queries when role doesn't have access to any cols * fix backend test * Simplify subscription execution * [skip ci] add test to check if required headers are present while querying * Suppose, table B is related to table A and to query B certain headers are necessary, then the test checks that we are throwing error when the header is not set when B is queried through A * fix mutations not checking for view mutability * [skip ci] add variable type checking and corresponding tests * [skip ci] add test to check if update headers are present while doing an upsert * [skip ci] add positive counterparts to some of the negative permission tests * fix args missing their description in introspect * [skip ci] Remove unused function; insert missing markNotReusable call * [skip ci] Add a Note about InputValue * [skip ci] Delete LegacySchema/ 🎉 * [skip ci] Delete GraphQL/{Resolve,Validate}/ 🎉 * [skip ci] Delete top-level Resolve/Validate modules; tidy .cabal file * [skip ci] Delete LegacySchema top-level module Somehow I missed this one. * fix input value to json * [skip ci] elaborate on JSON objects in GraphQL * [skip ci] add missing file * [skip ci] add a test with subscription containing remote joins * add a test with remote joins in mutation output * [skip ci] Add some comments to Schema/Mutation.hs * [skip ci] Remove no longer needed code from RemoteServer.hs * [skip ci] Use a helper function to generate conflict clause parsers * [skip ci] fix type checker error in fields with default value * capitalize the header keys in select_articles_without_required_headers * Somehow, this was the reason the tests were failing. I have no idea, why! * [skip ci] Add a long Note about optional fields and nullability * Improve comments a bit; simplify Schema/Common.hs a bit * [skip ci] full implementation of 5.8.5 type checking. * [skip ci] fix validation test teardown * [skip ci] fix schema stitching test * fix remote schema ignoring enum nullability * [skip ci] fix fieldOptional to not discard nullability * revert nullability of use_spheroid * fix comment * add required remote fields with arguments for tests * [skip ci] add missing docstrings * [skip ci] fixed description of remote fields * [skip ci] change docstring for consistency * fix several schema inconsistencies * revert behaviour change in function arguments parsing * fix remaining nullability issues in new schema * minor no-op refactor; use isListType from graphql-parser-hs * use nullability of remote schema node, while creating a Remote reln * fix 'ID' input coercing & action 'ID' type relationship mapping * include ASTs in MonadExecuteQuery * needed for PRO code-base * Delete code for "interfaces implementing ifaces" (draft GraphQL spec) Previously I started writing some code that adds support for a future GraphQL feature where interfaces may themselves be sub-types of other interfaces. However, this code was incomplete, and partially incorrect. So this commit deletes support for that entirely. * Ignore a remote schema test during the upgrade/downgrade test The PDV refactor does a better job at exposing a minimal set of types through introspection. In particular, not every type that is present in a remote schema is re-exposed by Hasura. The test test_schema_stitching.py::TestRemoteSchemaBasic::test_introspection assumed that all types were re-exposed, which is not required for GraphQL compatibility, in order to test some aspect of our support for remote schemas. So while this particular test has been updated on PDV, the PDV branch now does not pass the old test, which we argue to be incorrect. Hence this test is disabled while we await a release, after which we can re-enable it. This also re-enables a test that was previously disabled for similar, though unrelated, reasons. * add haddock documentation to the action's field parsers * Deslecting some tests in server-upgrade Some tests with current build are failing on server upgrade which it should not. The response is more accurate than what it was. Also the upgrade tests were not throwing errors when the test is expected to return an error, but succeeds. The test framework is patched to catch this case. * [skip ci] Add a long Note about interfaces and object types * send the response headers back to client after running a query * Deselect a few more tests during upgrade/downgrade test * Update commit_diff.txt * change log kind from db_migrate to catalog_migrate (#5531) * Show method and complete URI in traced HTTP calls (#5525) Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com> * restrict env variables start with HASURA_GRAPHQL_ for headers configuration in actions, event triggers & remote schemas (#5519) * restrict env variables start with HASURA_GRAPHQL_ for headers definition in actions & event triggers * update CHANGELOG.md * Apply suggestions from code review Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com> * fix introspection query if any enum column present in primary key (fix #5200) (#5522) * Fix telemetry reporting of transport (websocket was reported as http) * add log kinds in cli-migrations image (#5529) * add log kinds in cli-migrations image * give hint to resolve timeout error * minor changes and CHANGELOG * server: set hasura.tracecontext in RQL mutations [#5542] (#5555) * server: set hasura.tracecontext in RQL mutations [#5542] * Update test suite Co-authored-by: Tirumarai Selvan <tiru@hasura.io> * Add bulldozer auto-merge and -update configuration We still need to add the github app (as of time of opening this PR) Afterwards devs should be able to allow bulldozer to automatically "update" the branch, merging in parent when it changes, as well as automatically merge when all checks pass. This is opt-in by adding the `auto-update-auto-merge` label to the PR. * Remove 'bulldozer' config, try 'kodiak' for auto-merge see: https://github.com/chdsbd/kodiak The main issue that bit us was not being able to auto update forked branches, also: https://github.com/palantir/bulldozer/issues/66 https://github.com/palantir/bulldozer/issues/145 * Cherry-picked all commits * [skip ci] Slightly improve formatting * Revert "fix introspection query if any enum column present in primary key (fix #5200) (#5522)" This reverts commit 0f9a5afa59a88f6824f4d63d58db246a5ba3fb03. This undoes a cherry-pick of 34288e1eb5f2c5dad9e6d1e05453dd52397dc970 that was already done previously in a6450e126bc2d98bcfd3791501986e4627ce6c6f, and subsequently fixed for PDV in 70e89dc250f8ddc6e2b7930bbe2b3eeaa6dbe1db * Do a small bit of tidying in Hasura.GraphQL.Parser.Collect * Fix cherry-picking work Some previous cherry-picks ended up modifying code that is commented out * [skip ci] clarified comment regarding insert representation * [skip ci] removed obsolete todos * cosmetic change * fix action error message * [skip ci] remove obsolete comment * [skip ci] synchronize stylish haskell extensions list * use previously defined scalar names in parsers rather than ad-hoc literals * Apply most syntax hlint hints. * Clarify comment on update mutation. * [skip ci] Clarify what fields should be specified for objects * Update "_inc" description. * Use record types rather than tuples fo IntrospectionResult and ParsedIntrospection * Get rid of checkFieldNamesUnique (use Data.List.Extended.duplicates) * Throw more errors when collecting query root names * [skip ci] clean column parser comment * Remove dead code inserted in ab65b39 * avoid converting to non-empty list where not needed * add note and TODO about the disabled checks in PDV * minor refactor in remoteField' function * Unify two getObject methods * Nitpicks in Remote.hs * Update CHANGELOG.md * Revert "Unify two getObject methods" This reverts commit bd6bb40355b3d189a46c0312eb52225e18be57b3. We do need two different getObject functions as the corresponding error message is different * Fix error message in Remote.hs * Update CHANGELOG.md Co-authored-by: Auke Booij <auke@tulcod.com> * Apply suggested Changelog fix. Co-authored-by: Auke Booij <auke@tulcod.com> * Fix typo in Changelog. * [skip ci] Update changelog. * reuse type names to avoid duplication * Fix Hashable instance for Definition The presence of `Maybe Unique`, and an optional description, as part of `Definition`s, means that `Definition`s that are considered `Eq`ual may get different hashes. This can happen, for instance, when one object is memoized but another is not. * [skip ci] Update commit_diff.txt * Bump parser version. * Bump freeze file after changes in parser. * [skip ci] Incorporate commits from master * Fix developer flag in server/cabal.project.freeze Co-authored-by: Auke Booij <auke@tulcod.com> * Deselect a changed ENUM test for upgrade/downgrade CI * Deselect test here as well * [skip ci] remove dead code * Disable more tests for upgrade/downgrade * Fix which test gets deselected * Revert "Add hdb_catalog.current_setting abstraction for reading Hasura settings" This reverts commit 66e85ab9fbd56cca2c28a80201f6604fbe811b85. * Remove circular reference in cabal.project.freeze Co-authored-by: Karthikeyan Chinnakonda <karthikeyan@hasura.io> Co-authored-by: Auke Booij <auke@hasura.io> Co-authored-by: Tirumarai Selvan <tiru@hasura.io> Co-authored-by: Marion Schleifer <marion@hasura.io> Co-authored-by: Aleksandra Sikora <ola.zxcvbnm@gmail.com> Co-authored-by: Brandon Simmons <brandon.m.simmons@gmail.com> Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com> Co-authored-by: Anon Ray <rayanon004@gmail.com> Co-authored-by: rakeshkky <12475069+rakeshkky@users.noreply.github.com> Co-authored-by: Anon Ray <ecthiender@users.noreply.github.com> Co-authored-by: Vamshi Surabhi <vamshi@hasura.io> Co-authored-by: Antoine Leblanc <antoine@hasura.io> Co-authored-by: Brandon Simmons <brandon@hasura.io> Co-authored-by: Phil Freeman <phil@hasura.io> Co-authored-by: Lyndon Maydwell <lyndon@sordina.net> Co-authored-by: Phil Freeman <paf31@cantab.net> Co-authored-by: Naveen Naidu <naveennaidu479@gmail.com> Co-authored-by: Karthikeyan Chinnakonda <chkarthikeyan95@gmail.com> Co-authored-by: Nizar Malangadan <nizar-m@users.noreply.github.com> Co-authored-by: Antoine Leblanc <crucuny@gmail.com> Co-authored-by: Auke Booij <auke@tulcod.com>
2020-08-21 20:27:01 +03:00
if is_err_msg(exp) and is_err_msg(out):
run default tests in test_server_upgrade (#3718) * run basic tests after upgrade * terminate before specifying file in pytest cmd * Move fixture definitions out of test classes Previously we had abstract classes with the fixtures defined in them. The test classes then inherits these super classes. This is creating inheritence problems, especially when you want to just inherit the tests in class, but not the fixtures. We have now moved all those fixture definitions outside of the class (in conftest.py). These fixtures are now used by the test classes when and where they are required. * Run pytests on server upgrade Server upgrade tests are run by 1) Run pytest with schema/metadata setup but do not do schema/metadata teardown 2) Upgrade the server 3) Run pytest using the above schema and teardown at the end of the tests 4) Cleanup hasura metadata and start again with next set of tests We have added options --skip-schema-setup and --skip-schema-teardown to help running server upgrade tests. While running the tests, we noticed that error codes and messages for some of the tests have changed. So we have added another option to pytest `--avoid-error-message-checks`. If this flag is set, and if comparing expected and response message fails, and if the expected response has an error message, Pytest will throw warnings instead of an error. * Use marks to specify server-upgrade tests Not all tests can be run as serve upgrade tests, particularly those which themselves change the schema. We introduce two pytest markers. Marker allow_server_upgrade_test will add the test into the list of server upgrade tests that can be run. skip_server_upgrade_test removes it from the list. With this we have added tests for queries, mutations, and selected event trigger and remote schema tests to the list of server upgrade tests. * Remove components not needed anymore * Install curl * Fix error in query validation * Fix error in test_v1_queries.py * install procps for server upgrade tests * Use postgres image which has postgis installed * set pager off with psql * quote the bash variable WORKTREE_DIR Co-authored-by: nizar-m <19857260+nizar-m@users.noreply.github.com> Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com>
2020-02-13 12:14:02 +03:00
if not matched_:
warnings.warn("Response does not have the expected error message\n" + dump_str.getvalue())
return resp, matched
else:
if skip_assertion:
return resp, matched_
else:
assert matched_, '\n' + dump_str.getvalue()
return resp, matched # matched always True unless --accept
# This really sucks; newer ruamel made __eq__ ignore ordering:
# https://bitbucket.org/ruamel/yaml/issues/326/commentedmap-equality-no-longer-takes-into
def equal_CommentedMap(m1, m2):
if isinstance(m1, list) and isinstance(m2, list):
return (len(m1) == len(m2) and all(equal_CommentedMap(x,y) for x,y in zip(m1,m2)))
elif isinstance(m1, dict) and isinstance(m2, dict):
# (see collapse_order_not_selset):
if isinstance(m1, (ordereddict, CommentedMap)) and \
isinstance(m2, (ordereddict, CommentedMap)):
m1_l = list(m1.items())
m2_l = list(m2.items())
else:
m1_l = sorted(list(m1.items()))
m2_l = sorted(list(m2.items()))
return (len(m1_l) == len(m2_l) and
all(k1 == k2 and equal_CommentedMap(v1,v2)
for (k1,v1),(k2,v2) in zip(m1_l,m2_l)))
# else this is a scalar:
else:
return m1 == m2
2018-10-30 12:21:58 +03:00
# Parse test case YAML file
def get_conf_f(f):
with open(f, 'r+') as c:
return yaml.YAML().load(c)
def check_query_f(hge_ctx, f, transport='http', add_auth=True, gqlws = False):
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
print("Test file: " + f)
hge_ctx.may_skip_test_teardown = False
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
print ("transport="+transport)
with open(f, 'r+') as c:
# For `--accept`:
should_write_back = False
# ruamel will preserve order so that we can test the JSON ordering
# property conforms to YAML spec. It also lets us write back the yaml
# nicely when we `--accept.`
yml = yaml.YAML()
conf = yml.load(c)
2018-09-18 09:21:57 +03:00
if isinstance(conf, list):
for ix, sconf in enumerate(conf):
actual_resp, matched = check_query(hge_ctx, sconf, transport, add_auth, None, gqlws)
if PytestConf.config.getoption("--accept") and not matched:
conf[ix]['response'] = actual_resp
should_write_back = True
2018-09-18 09:21:57 +03:00
else:
if conf['status'] != 200:
hge_ctx.may_skip_test_teardown = True
actual_resp, matched = check_query(hge_ctx, conf, transport, add_auth, None, gqlws)
# If using `--accept` write the file back out with the new expected
# response set to the actual response we got:
if PytestConf.config.getoption("--accept") and not matched:
conf['response'] = actual_resp
should_write_back = True
if should_write_back:
warnings.warn(
"\nRecording formerly failing case as correct in: " + f +
"\n NOTE: if this case was marked 'xfail' this won't be correct!"
)
c.seek(0)
yml.dump(conf, stream=c)
c.truncate()
# Return a new dict that discards the object key ordering properties of
# 'result' where the key is not part of the selection set. This lets us compare
# expected and actual results properly with respect to the graphql spec's
# ordering requirements.
def collapse_order_not_selset(result_inp, query):
# Collapse to unordered dict recursively by roundtripping through json
def collapse(x):
return json.loads(json.dumps(x))
result = copy.deepcopy(result_inp)
try:
if 'query' in query:
gql_query_str = query['query']
# We don't support multiple operations in the same query yet:
selset0 = graphql.parse(gql_query_str).definitions[0].selection_set
def go(result_node, selset):
for field in selset.selections:
fname = field.name.value
# If field has no subfields then all its values can be recursively stripped of ordering.
# Also if it's an array for some reason (like in 'returning')
if field.selection_set is None or not isinstance(result_node[fname], (dict, list)):
result_node[fname] = collapse(result_node[fname])
elif isinstance(result_node[fname], list):
for node in result_node[fname]:
go(node, field.selection_set)
else:
go(result_node[fname], field.selection_set)
if 'data' in result:
go(result['data'], selset0)
# errors is unordered
if 'errors' in result:
result['errors'] = collapse(result['errors'])
# and finally remove ordering at just the topmost level
return dict(result)
2018-09-18 09:21:57 +03:00
else:
# this isn't a graphql query, collapse ordering
return collapse(result_inp)
except Exception as e:
print("Bailing out and collapsing all ordering, due to: ", e)
return collapse(result)
# Use this since jsondiff seems to produce object/dict structures that can't
# always be serialized to json.
def stringify_keys(d):
"""Recursively convert a dict's keys to strings."""
if not isinstance(d, dict): return d
def decode(k):
if isinstance(k, str): return k
try:
return k.decode("utf-8")
except Exception:
return repr(k)
return { decode(k): stringify_keys(v) for k, v in d.items() }