2018-09-18 09:21:57 +03:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
2022-08-15 16:14:55 +03:00
|
|
|
import graphql
|
2018-11-23 16:02:46 +03:00
|
|
|
from http import HTTPStatus
|
2018-09-18 09:21:57 +03:00
|
|
|
import http.server
|
|
|
|
import json
|
2022-08-15 16:14:55 +03:00
|
|
|
import os
|
2018-09-18 09:21:57 +03:00
|
|
|
import queue
|
2019-04-08 10:22:38 +03:00
|
|
|
import random
|
2020-02-13 20:38:23 +03:00
|
|
|
import re
|
2018-11-23 16:02:46 +03:00
|
|
|
import requests
|
2022-08-15 16:14:55 +03:00
|
|
|
import ruamel.yaml as yaml
|
|
|
|
from ruamel.yaml.comments import CommentedMap as OrderedDict # to avoid '!!omap' in yaml
|
|
|
|
import socketserver
|
|
|
|
import sqlalchemy
|
|
|
|
import sqlalchemy.schema
|
|
|
|
import string
|
|
|
|
import subprocess
|
|
|
|
import threading
|
|
|
|
import time
|
server/tests-py: Start webhook.py inside the test harness.
We use a helper service to start a webhook-based authentication service for some tests. This moves the initialization of the service out of _test-server.sh_ and into the Python test harness, as a fixture.
In order to do this, I had to make a few changes. The main deviation is that we no longer run _all_ tests against an HGE with this authentication service, just a few (those in _test_webhook.py_). Because this reduced coverage, I have added some more tests there, which actually cover some areas not exacerbated elsewhere (mainly trying to use webhook credentials to talk to an admin-only endpoint).
The webhook service can run both with and without TLS, and decide whether it's necessary to skip one of these based on the arguments passed and how HGE is started, according to the following logic:
* If a TLS CA certificate is passed in, it will run with TLS, otherwise it will skip it.
* If HGE was started externally and a TLS certificate is provided, it will skip running without TLS, as it will assume that HGE was configured to talk to a webhook over HTTPS.
* Some tests should only be run with TLS; this is marked with a `tls_webhook_server` marker.
* Some tests should only be run _without_ TLS; this is marked with a `no_tls_webhook_server` marker.
The actual parameterization of the webhook service configuration is done through test subclasses, because normal pytest parameterization doesn't work with the `hge_fixture_env` hack that we use. Because `hge_fixture_env` is not a sanctioned way of conveying data between fixtures (and, unfortunately, there isn't a sanctioned way of doing this when the fixtures in question may not know about each other directly), parameterizing the `webhook_server` fixture doesn't actually parameterize `hge_server` properly. Subclassing forces this to work correctly.
The certificate generation is moved to a Python fixture, so that we don't have to revoke the CA certificate for _test_webhook_insecure.py_; we can just generate a bogus certificate instead. The CA certificate is still generated in the _test-server.sh_ script, as it needs to be installed into the OS certificate store.
Interestingly, the CA certificate installation wasn't actually working, because the certificates were written to the wrong location. This didn't cause any failures, as we weren't actually testing this behavior. This is now fixed with the other changes.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/6363
GitOrigin-RevId: 0f277d374daa64f657257ed2a4c2057c74b911db
2022-10-20 21:58:36 +03:00
|
|
|
from typing import Any, NamedTuple, Optional
|
2022-08-15 16:14:55 +03:00
|
|
|
from urllib.parse import urlparse
|
2018-11-23 16:02:46 +03:00
|
|
|
import websocket
|
2022-08-15 16:14:55 +03:00
|
|
|
|
server/tests-py: Start webhook.py inside the test harness.
We use a helper service to start a webhook-based authentication service for some tests. This moves the initialization of the service out of _test-server.sh_ and into the Python test harness, as a fixture.
In order to do this, I had to make a few changes. The main deviation is that we no longer run _all_ tests against an HGE with this authentication service, just a few (those in _test_webhook.py_). Because this reduced coverage, I have added some more tests there, which actually cover some areas not exacerbated elsewhere (mainly trying to use webhook credentials to talk to an admin-only endpoint).
The webhook service can run both with and without TLS, and decide whether it's necessary to skip one of these based on the arguments passed and how HGE is started, according to the following logic:
* If a TLS CA certificate is passed in, it will run with TLS, otherwise it will skip it.
* If HGE was started externally and a TLS certificate is provided, it will skip running without TLS, as it will assume that HGE was configured to talk to a webhook over HTTPS.
* Some tests should only be run with TLS; this is marked with a `tls_webhook_server` marker.
* Some tests should only be run _without_ TLS; this is marked with a `no_tls_webhook_server` marker.
The actual parameterization of the webhook service configuration is done through test subclasses, because normal pytest parameterization doesn't work with the `hge_fixture_env` hack that we use. Because `hge_fixture_env` is not a sanctioned way of conveying data between fixtures (and, unfortunately, there isn't a sanctioned way of doing this when the fixtures in question may not know about each other directly), parameterizing the `webhook_server` fixture doesn't actually parameterize `hge_server` properly. Subclassing forces this to work correctly.
The certificate generation is moved to a Python fixture, so that we don't have to revoke the CA certificate for _test_webhook_insecure.py_; we can just generate a bogus certificate instead. The CA certificate is still generated in the _test-server.sh_ script, as it needs to be installed into the OS certificate store.
Interestingly, the CA certificate installation wasn't actually working, because the certificates were written to the wrong location. This didn't cause any failures, as we weren't actually testing this behavior. This is now fixed with the other changes.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/6363
GitOrigin-RevId: 0f277d374daa64f657257ed2a4c2057c74b911db
2022-10-20 21:58:36 +03:00
|
|
|
import fixtures.tls
|
2018-11-26 16:08:16 +03:00
|
|
|
import graphql_server
|
2022-10-21 20:32:58 +03:00
|
|
|
import ports
|
2018-09-18 09:21:57 +03:00
|
|
|
|
2019-11-29 08:14:26 +03:00
|
|
|
# pytest has removed the global pytest.config
|
|
|
|
# As a solution to this we are going to store it in PyTestConf.config
|
|
|
|
class PytestConf():
|
2022-08-24 11:30:55 +03:00
|
|
|
config: Any
|
2019-11-29 08:14:26 +03:00
|
|
|
pass
|
2018-10-30 12:21:58 +03:00
|
|
|
|
2018-09-18 09:21:57 +03:00
|
|
|
class HGECtxError(Exception):
|
|
|
|
pass
|
|
|
|
|
2021-08-24 19:25:12 +03:00
|
|
|
# NOTE: use this to generate a GraphQL client that uses the `Apollo`(subscription-transport-ws) sub-protocol
|
2019-05-10 09:05:11 +03:00
|
|
|
class GQLWsClient():
|
2018-10-30 12:21:58 +03:00
|
|
|
|
2019-05-10 09:05:11 +03:00
|
|
|
def __init__(self, hge_ctx, endpoint):
|
2019-04-08 10:22:38 +03:00
|
|
|
self.hge_ctx = hge_ctx
|
|
|
|
self.ws_queue = queue.Queue(maxsize=-1)
|
2019-05-10 09:05:11 +03:00
|
|
|
self.ws_url = urlparse(hge_ctx.hge_url)._replace(scheme='ws',
|
|
|
|
path=endpoint)
|
2019-04-08 10:22:38 +03:00
|
|
|
self.create_conn()
|
|
|
|
|
|
|
|
def create_conn(self):
|
|
|
|
self.ws_queue.queue.clear()
|
|
|
|
self.ws_id_query_queues = dict()
|
|
|
|
self.ws_active_query_ids = set()
|
2019-08-12 12:44:29 +03:00
|
|
|
|
|
|
|
self.connected_event = threading.Event()
|
|
|
|
self.init_done = False
|
|
|
|
self.is_closing = False
|
|
|
|
self.remote_closed = False
|
|
|
|
|
|
|
|
self._ws = websocket.WebSocketApp(self.ws_url.geturl(),
|
|
|
|
on_open=self._on_open, on_message=self._on_message, on_close=self._on_close)
|
2019-04-08 10:22:38 +03:00
|
|
|
self.wst = threading.Thread(target=self._ws.run_forever)
|
|
|
|
self.wst.daemon = True
|
|
|
|
self.wst.start()
|
|
|
|
|
|
|
|
def recreate_conn(self):
|
|
|
|
self.teardown()
|
|
|
|
self.create_conn()
|
|
|
|
|
2019-08-12 12:44:29 +03:00
|
|
|
def wait_for_connection(self, timeout=10):
|
|
|
|
assert not self.is_closing
|
|
|
|
assert self.connected_event.wait(timeout=timeout)
|
|
|
|
|
2019-04-08 10:22:38 +03:00
|
|
|
def get_ws_event(self, timeout):
|
|
|
|
return self.ws_queue.get(timeout=timeout)
|
|
|
|
|
|
|
|
def has_ws_query_events(self, query_id):
|
|
|
|
return not self.ws_id_query_queues[query_id].empty()
|
|
|
|
|
|
|
|
def get_ws_query_event(self, query_id, timeout):
|
|
|
|
return self.ws_id_query_queues[query_id].get(timeout=timeout)
|
|
|
|
|
|
|
|
def send(self, frame):
|
2019-08-12 12:44:29 +03:00
|
|
|
self.wait_for_connection()
|
2019-04-08 10:22:38 +03:00
|
|
|
if frame.get('type') == 'stop':
|
|
|
|
self.ws_active_query_ids.discard( frame.get('id') )
|
|
|
|
elif frame.get('type') == 'start' and 'id' in frame:
|
|
|
|
self.ws_id_query_queues[frame['id']] = queue.Queue(maxsize=-1)
|
|
|
|
self._ws.send(json.dumps(frame))
|
|
|
|
|
|
|
|
def init_as_admin(self):
|
|
|
|
headers={}
|
|
|
|
if self.hge_ctx.hge_key:
|
|
|
|
headers = {'x-hasura-admin-secret': self.hge_ctx.hge_key}
|
|
|
|
self.init(headers)
|
|
|
|
|
|
|
|
def init(self, headers={}):
|
|
|
|
payload = {'type': 'connection_init', 'payload': {}}
|
|
|
|
|
|
|
|
if headers and len(headers) > 0:
|
|
|
|
payload['payload']['headers'] = headers
|
|
|
|
|
|
|
|
self.send(payload)
|
|
|
|
ev = self.get_ws_event(3)
|
|
|
|
assert ev['type'] == 'connection_ack', ev
|
|
|
|
self.init_done = True
|
|
|
|
|
|
|
|
def stop(self, query_id):
|
|
|
|
data = {'id': query_id, 'type': 'stop'}
|
|
|
|
self.send(data)
|
|
|
|
self.ws_active_query_ids.discard(query_id)
|
|
|
|
|
|
|
|
def gen_id(self, size=6, chars=string.ascii_letters + string.digits):
|
2019-05-10 09:05:11 +03:00
|
|
|
new_id = ''.join(random.choice(chars) for _ in range(size))
|
|
|
|
if new_id in self.ws_active_query_ids:
|
|
|
|
return self.gen_id(size, chars)
|
|
|
|
return new_id
|
2019-04-08 10:22:38 +03:00
|
|
|
|
|
|
|
def send_query(self, query, query_id=None, headers={}, timeout=60):
|
|
|
|
graphql.parse(query['query'])
|
|
|
|
if headers and len(headers) > 0:
|
|
|
|
#Do init If headers are provided
|
|
|
|
self.init(headers)
|
|
|
|
elif not self.init_done:
|
|
|
|
self.init()
|
|
|
|
if query_id == None:
|
|
|
|
query_id = self.gen_id()
|
|
|
|
frame = {
|
|
|
|
'id': query_id,
|
|
|
|
'type': 'start',
|
|
|
|
'payload': query,
|
|
|
|
}
|
|
|
|
self.ws_active_query_ids.add(query_id)
|
|
|
|
self.send(frame)
|
|
|
|
while True:
|
|
|
|
yield self.get_ws_query_event(query_id, timeout)
|
|
|
|
|
|
|
|
def _on_open(self):
|
2019-08-12 12:44:29 +03:00
|
|
|
if not self.is_closing:
|
|
|
|
self.connected_event.set()
|
2019-04-08 10:22:38 +03:00
|
|
|
|
|
|
|
def _on_message(self, message):
|
2019-09-04 18:02:35 +03:00
|
|
|
# NOTE: make sure we preserve key ordering so we can test the ordering
|
|
|
|
# properties in the graphql spec properly
|
|
|
|
json_msg = json.loads(message, object_pairs_hook=OrderedDict)
|
2019-04-08 10:22:38 +03:00
|
|
|
if 'id' in json_msg:
|
|
|
|
query_id = json_msg['id']
|
|
|
|
if json_msg.get('type') == 'stop':
|
|
|
|
#Remove from active queries list
|
|
|
|
self.ws_active_query_ids.discard( query_id )
|
|
|
|
if not query_id in self.ws_id_query_queues:
|
|
|
|
self.ws_id_query_queues[json_msg['id']] = queue.Queue(maxsize=-1)
|
|
|
|
#Put event in the correponding query_queue
|
|
|
|
self.ws_id_query_queues[query_id].put(json_msg)
|
2019-08-12 12:44:29 +03:00
|
|
|
elif json_msg['type'] != 'ka':
|
2019-04-08 10:22:38 +03:00
|
|
|
#Put event in the main queue
|
|
|
|
self.ws_queue.put(json_msg)
|
|
|
|
|
|
|
|
def _on_close(self):
|
|
|
|
self.remote_closed = True
|
|
|
|
self.init_done = False
|
server/tests-py: Start webhook.py inside the test harness.
We use a helper service to start a webhook-based authentication service for some tests. This moves the initialization of the service out of _test-server.sh_ and into the Python test harness, as a fixture.
In order to do this, I had to make a few changes. The main deviation is that we no longer run _all_ tests against an HGE with this authentication service, just a few (those in _test_webhook.py_). Because this reduced coverage, I have added some more tests there, which actually cover some areas not exacerbated elsewhere (mainly trying to use webhook credentials to talk to an admin-only endpoint).
The webhook service can run both with and without TLS, and decide whether it's necessary to skip one of these based on the arguments passed and how HGE is started, according to the following logic:
* If a TLS CA certificate is passed in, it will run with TLS, otherwise it will skip it.
* If HGE was started externally and a TLS certificate is provided, it will skip running without TLS, as it will assume that HGE was configured to talk to a webhook over HTTPS.
* Some tests should only be run with TLS; this is marked with a `tls_webhook_server` marker.
* Some tests should only be run _without_ TLS; this is marked with a `no_tls_webhook_server` marker.
The actual parameterization of the webhook service configuration is done through test subclasses, because normal pytest parameterization doesn't work with the `hge_fixture_env` hack that we use. Because `hge_fixture_env` is not a sanctioned way of conveying data between fixtures (and, unfortunately, there isn't a sanctioned way of doing this when the fixtures in question may not know about each other directly), parameterizing the `webhook_server` fixture doesn't actually parameterize `hge_server` properly. Subclassing forces this to work correctly.
The certificate generation is moved to a Python fixture, so that we don't have to revoke the CA certificate for _test_webhook_insecure.py_; we can just generate a bogus certificate instead. The CA certificate is still generated in the _test-server.sh_ script, as it needs to be installed into the OS certificate store.
Interestingly, the CA certificate installation wasn't actually working, because the certificates were written to the wrong location. This didn't cause any failures, as we weren't actually testing this behavior. This is now fixed with the other changes.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/6363
GitOrigin-RevId: 0f277d374daa64f657257ed2a4c2057c74b911db
2022-10-20 21:58:36 +03:00
|
|
|
|
2022-10-13 12:32:33 +03:00
|
|
|
def get_conn_close_state(self):
|
|
|
|
return self.remote_closed or self.is_closing
|
2019-04-08 10:22:38 +03:00
|
|
|
|
|
|
|
def teardown(self):
|
2019-08-12 12:44:29 +03:00
|
|
|
self.is_closing = True
|
2019-04-08 10:22:38 +03:00
|
|
|
if not self.remote_closed:
|
|
|
|
self._ws.close()
|
|
|
|
self.wst.join()
|
|
|
|
|
2021-08-24 19:25:12 +03:00
|
|
|
# NOTE: use this to generate a GraphQL client that uses the `graphql-ws` sub-protocol
|
|
|
|
class GraphQLWSClient():
|
|
|
|
|
|
|
|
def __init__(self, hge_ctx, endpoint):
|
|
|
|
self.hge_ctx = hge_ctx
|
|
|
|
self.ws_queue = queue.Queue(maxsize=-1)
|
|
|
|
self.ws_url = urlparse(hge_ctx.hge_url)._replace(scheme='ws',
|
|
|
|
path=endpoint)
|
|
|
|
self.create_conn()
|
|
|
|
|
|
|
|
def get_queue(self):
|
|
|
|
return self.ws_queue.queue
|
|
|
|
|
|
|
|
def clear_queue(self):
|
|
|
|
self.ws_queue.queue.clear()
|
|
|
|
|
|
|
|
def create_conn(self):
|
|
|
|
self.ws_queue.queue.clear()
|
|
|
|
self.ws_id_query_queues = dict()
|
|
|
|
self.ws_active_query_ids = set()
|
|
|
|
|
|
|
|
self.connected_event = threading.Event()
|
|
|
|
self.init_done = False
|
|
|
|
self.is_closing = False
|
|
|
|
self.remote_closed = False
|
|
|
|
|
|
|
|
self._ws = websocket.WebSocketApp(self.ws_url.geturl(),
|
|
|
|
on_open=self._on_open, on_message=self._on_message, on_close=self._on_close, subprotocols=["graphql-transport-ws"])
|
|
|
|
self.wst = threading.Thread(target=self._ws.run_forever)
|
|
|
|
self.wst.daemon = True
|
|
|
|
self.wst.start()
|
|
|
|
|
|
|
|
def recreate_conn(self):
|
|
|
|
self.teardown()
|
|
|
|
self.create_conn()
|
|
|
|
|
|
|
|
def wait_for_connection(self, timeout=10):
|
|
|
|
assert not self.is_closing
|
|
|
|
assert self.connected_event.wait(timeout=timeout)
|
|
|
|
|
|
|
|
def get_ws_event(self, timeout):
|
|
|
|
return self.ws_queue.get(timeout=timeout)
|
|
|
|
|
|
|
|
def has_ws_query_events(self, query_id):
|
|
|
|
return not self.ws_id_query_queues[query_id].empty()
|
|
|
|
|
|
|
|
def get_ws_query_event(self, query_id, timeout):
|
|
|
|
print("HELLO", self.ws_active_query_ids)
|
|
|
|
return self.ws_id_query_queues[query_id].get(timeout=timeout)
|
|
|
|
|
|
|
|
def send(self, frame):
|
|
|
|
self.wait_for_connection()
|
|
|
|
if frame.get('type') == 'complete':
|
|
|
|
self.ws_active_query_ids.discard( frame.get('id') )
|
|
|
|
elif frame.get('type') == 'subscribe' and 'id' in frame:
|
|
|
|
self.ws_id_query_queues[frame['id']] = queue.Queue(maxsize=-1)
|
|
|
|
self._ws.send(json.dumps(frame))
|
|
|
|
|
|
|
|
def init_as_admin(self):
|
|
|
|
headers={}
|
|
|
|
if self.hge_ctx.hge_key:
|
|
|
|
headers = {'x-hasura-admin-secret': self.hge_ctx.hge_key}
|
|
|
|
self.init(headers)
|
|
|
|
|
|
|
|
def init(self, headers={}):
|
|
|
|
payload = {'type': 'connection_init', 'payload': {}}
|
|
|
|
|
|
|
|
if headers and len(headers) > 0:
|
|
|
|
payload['payload']['headers'] = headers
|
|
|
|
|
|
|
|
self.send(payload)
|
|
|
|
ev = self.get_ws_event(5)
|
|
|
|
assert ev['type'] == 'connection_ack', ev
|
|
|
|
self.init_done = True
|
|
|
|
|
|
|
|
def stop(self, query_id):
|
|
|
|
data = {'id': query_id, 'type': 'complete'}
|
|
|
|
self.send(data)
|
|
|
|
self.ws_active_query_ids.discard(query_id)
|
|
|
|
|
|
|
|
def gen_id(self, size=6, chars=string.ascii_letters + string.digits):
|
|
|
|
new_id = ''.join(random.choice(chars) for _ in range(size))
|
|
|
|
if new_id in self.ws_active_query_ids:
|
|
|
|
return self.gen_id(size, chars)
|
|
|
|
return new_id
|
|
|
|
|
|
|
|
def send_query(self, query, query_id=None, headers={}, timeout=60):
|
|
|
|
graphql.parse(query['query'])
|
|
|
|
if headers and len(headers) > 0:
|
|
|
|
#Do init If headers are provided
|
|
|
|
self.clear_queue()
|
|
|
|
self.init(headers)
|
|
|
|
elif not self.init_done:
|
|
|
|
self.init()
|
|
|
|
if query_id == None:
|
|
|
|
query_id = self.gen_id()
|
|
|
|
frame = {
|
|
|
|
'id': query_id,
|
|
|
|
'type': 'subscribe',
|
|
|
|
'payload': query,
|
|
|
|
}
|
|
|
|
self.ws_active_query_ids.add(query_id)
|
|
|
|
self.send(frame)
|
|
|
|
while True:
|
|
|
|
yield self.get_ws_query_event(query_id, timeout)
|
|
|
|
|
|
|
|
def _on_open(self):
|
|
|
|
if not self.is_closing:
|
|
|
|
self.connected_event.set()
|
|
|
|
|
|
|
|
def _on_message(self, message):
|
|
|
|
# NOTE: make sure we preserve key ordering so we can test the ordering
|
|
|
|
# properties in the graphql spec properly
|
|
|
|
json_msg = json.loads(message, object_pairs_hook=OrderedDict)
|
|
|
|
if json_msg['type'] == 'ping':
|
|
|
|
new_msg = json_msg
|
|
|
|
new_msg['type'] = 'pong'
|
2022-09-17 03:00:41 +03:00
|
|
|
# Decline to reflect the payload of the ping, because the
|
|
|
|
# graphql-ws specification does not require it
|
|
|
|
new_msg.pop('payload')
|
2021-08-24 19:25:12 +03:00
|
|
|
self.send(json.dumps(new_msg))
|
|
|
|
return
|
2021-10-05 15:28:38 +03:00
|
|
|
|
2021-08-24 19:25:12 +03:00
|
|
|
if 'id' in json_msg:
|
|
|
|
query_id = json_msg['id']
|
|
|
|
if json_msg.get('type') == 'complete':
|
|
|
|
#Remove from active queries list
|
|
|
|
self.ws_active_query_ids.discard( query_id )
|
|
|
|
if not query_id in self.ws_id_query_queues:
|
|
|
|
self.ws_id_query_queues[json_msg['id']] = queue.Queue(maxsize=-1)
|
|
|
|
#Put event in the correponding query_queue
|
|
|
|
self.ws_id_query_queues[query_id].put(json_msg)
|
2021-10-05 15:28:38 +03:00
|
|
|
|
2021-08-24 19:25:12 +03:00
|
|
|
if json_msg['type'] != 'ping':
|
|
|
|
self.ws_queue.put(json_msg)
|
|
|
|
|
|
|
|
def _on_close(self):
|
|
|
|
self.remote_closed = True
|
|
|
|
self.init_done = False
|
2021-10-05 15:28:38 +03:00
|
|
|
|
2021-08-24 19:25:12 +03:00
|
|
|
def get_conn_close_state(self):
|
|
|
|
return self.remote_closed or self.is_closing
|
|
|
|
|
|
|
|
def teardown(self):
|
|
|
|
self.is_closing = True
|
|
|
|
if not self.remote_closed:
|
|
|
|
self._ws.close()
|
|
|
|
self.wst.join()
|
2020-02-13 20:38:23 +03:00
|
|
|
|
|
|
|
class ActionsWebhookHandler(http.server.BaseHTTPRequestHandler):
|
2022-09-28 12:19:47 +03:00
|
|
|
hge_url: str
|
|
|
|
hge_key: Optional[str]
|
2020-02-13 20:38:23 +03:00
|
|
|
|
|
|
|
def do_GET(self):
|
|
|
|
self.send_response(HTTPStatus.OK)
|
|
|
|
self.end_headers()
|
|
|
|
|
|
|
|
def do_POST(self):
|
|
|
|
content_len = self.headers.get('Content-Length')
|
|
|
|
req_body = self.rfile.read(int(content_len)).decode("utf-8")
|
|
|
|
self.req_json = json.loads(req_body)
|
|
|
|
req_headers = self.headers
|
|
|
|
req_path = self.path
|
|
|
|
self.log_message(json.dumps(self.req_json))
|
|
|
|
|
|
|
|
if req_path == "/create-user":
|
|
|
|
resp, status = self.create_user()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2020-09-16 12:53:17 +03:00
|
|
|
elif req_path == "/create-user-timeout":
|
2020-11-12 12:25:48 +03:00
|
|
|
time.sleep(3)
|
2020-09-16 12:53:17 +03:00
|
|
|
resp, status = self.create_user()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2020-02-13 20:38:23 +03:00
|
|
|
elif req_path == "/create-users":
|
|
|
|
resp, status = self.create_users()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2021-12-16 02:51:52 +03:00
|
|
|
elif req_path == "/create-user-nested":
|
|
|
|
resp, status = self.create_user_nested()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2020-03-20 09:46:45 +03:00
|
|
|
elif req_path == "/mirror-action":
|
|
|
|
resp, status = self.mirror_action()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2022-01-31 09:49:11 +03:00
|
|
|
elif req_path == "/mirror-headers":
|
|
|
|
resp, status = self.mirror_headers()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2020-04-16 10:25:19 +03:00
|
|
|
elif req_path == "/get-user-by-email":
|
|
|
|
resp, status = self.get_users_by_email(True)
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2021-12-16 02:51:52 +03:00
|
|
|
elif req_path == "/get-user-by-email-nested":
|
|
|
|
resp, status = self.get_users_by_email_nested(True)
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2020-04-16 10:25:19 +03:00
|
|
|
elif req_path == "/get-users-by-email":
|
|
|
|
resp, status = self.get_users_by_email(False)
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2021-12-16 02:51:52 +03:00
|
|
|
elif req_path == "/get-users-by-email-nested":
|
|
|
|
resp, status = self.get_users_by_email_nested(False)
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2021-09-17 10:43:43 +03:00
|
|
|
elif req_path == "/intentional-error":
|
|
|
|
resp, status = self.intentional_error()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2021-10-11 09:55:05 +03:00
|
|
|
elif req_path == "/null-response":
|
|
|
|
resp, status = self.null_response()
|
|
|
|
self._send_response(status, resp)
|
2023-02-07 18:32:38 +03:00
|
|
|
|
|
|
|
elif req_path == "/omitted-response-field":
|
|
|
|
self._send_response(
|
|
|
|
HTTPStatus.OK,
|
|
|
|
self.get_omitted_response_field()
|
|
|
|
)
|
2022-04-19 20:48:53 +03:00
|
|
|
|
2022-02-15 17:39:35 +03:00
|
|
|
elif req_path == "/scalar-response":
|
|
|
|
self._send_response(HTTPStatus.OK, "some-string")
|
2022-06-03 12:47:10 +03:00
|
|
|
|
|
|
|
elif req_path == "/json-response":
|
|
|
|
resp, status = self.json_response()
|
|
|
|
self._send_response(status, resp)
|
2022-07-05 21:00:08 +03:00
|
|
|
|
2022-06-03 12:47:10 +03:00
|
|
|
elif req_path == "/custom-scalar-array-response":
|
|
|
|
resp, status = self.custom_scalar_array_response()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2022-05-31 08:22:46 +03:00
|
|
|
elif req_path == "/scalar-array-response":
|
|
|
|
self._send_response(HTTPStatus.OK, ["foo", "bar", None])
|
2021-10-11 09:55:05 +03:00
|
|
|
|
2021-12-16 02:51:52 +03:00
|
|
|
elif req_path == "/recursive-output":
|
|
|
|
resp, status = self.recursive_output()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2022-01-19 07:46:42 +03:00
|
|
|
elif req_path == "/get-results":
|
|
|
|
resp, status = self.get_results()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2022-04-18 12:58:15 +03:00
|
|
|
elif req_path == "/typed-nested-null":
|
|
|
|
self._send_response(
|
|
|
|
HTTPStatus.OK,
|
|
|
|
self.get_typed_nested_null()
|
|
|
|
)
|
|
|
|
|
|
|
|
elif req_path == "/typed-nested-null-wrong-field":
|
|
|
|
self._send_response(
|
|
|
|
HTTPStatus.OK,
|
|
|
|
self.get_typed_nested_null_wrong_field()
|
|
|
|
)
|
|
|
|
|
2022-04-19 20:48:53 +03:00
|
|
|
elif req_path == "/get_messages":
|
|
|
|
resp, status = self.get_messages()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2020-02-13 20:38:23 +03:00
|
|
|
else:
|
|
|
|
self.send_response(HTTPStatus.NO_CONTENT)
|
|
|
|
self.end_headers()
|
|
|
|
|
2021-09-17 10:43:43 +03:00
|
|
|
def intentional_error(self):
|
|
|
|
blob = self.req_json['input']['blob']
|
|
|
|
return blob, HTTPStatus.BAD_REQUEST
|
|
|
|
|
2020-02-13 20:38:23 +03:00
|
|
|
def create_user(self):
|
|
|
|
email_address = self.req_json['input']['email']
|
|
|
|
name = self.req_json['input']['name']
|
|
|
|
|
|
|
|
if not self.check_email(email_address):
|
|
|
|
response = {
|
|
|
|
'message': 'Given email address is not valid',
|
|
|
|
'code': 'invalid-email'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
|
|
|
|
gql_query = '''
|
|
|
|
mutation ($email: String! $name: String!) {
|
|
|
|
insert_user_one(object: {email: $email, name: $name}){
|
|
|
|
id
|
|
|
|
}
|
|
|
|
}
|
|
|
|
'''
|
|
|
|
query = {
|
|
|
|
'query': gql_query,
|
|
|
|
'variables': {
|
|
|
|
'email': email_address,
|
|
|
|
'name': name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
code, resp = self.execute_query(query)
|
|
|
|
if code != 200 or 'data' not in resp:
|
|
|
|
response = {
|
|
|
|
'message': 'GraphQL query execution failed',
|
|
|
|
'code': 'unexpected'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
|
|
|
|
response = resp['data']['insert_user_one']
|
2021-09-29 12:53:35 +03:00
|
|
|
|
2020-02-13 20:38:23 +03:00
|
|
|
return response, HTTPStatus.OK
|
|
|
|
|
|
|
|
def create_users(self):
|
|
|
|
inputs = self.req_json['input']['users']
|
|
|
|
for input in inputs:
|
|
|
|
email_address = input['email']
|
|
|
|
if not self.check_email(email_address):
|
|
|
|
response = {
|
|
|
|
'message': 'Email address is not valid: ' + email_address,
|
|
|
|
'code': 'invalid-email'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
|
|
|
|
gql_query = '''
|
|
|
|
mutation ($insert_inputs: [user_insert_input!]!){
|
|
|
|
insert_user(objects: $insert_inputs){
|
|
|
|
returning{
|
|
|
|
id
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
'''
|
|
|
|
query = {
|
|
|
|
'query': gql_query,
|
|
|
|
'variables': {
|
|
|
|
'insert_inputs': inputs
|
|
|
|
}
|
|
|
|
}
|
|
|
|
code, resp = self.execute_query(query)
|
|
|
|
if code != 200 or 'data' not in resp:
|
|
|
|
response = {
|
|
|
|
'message': 'GraphQL query execution failed',
|
|
|
|
'code': 'unexpected'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
|
|
|
|
response = resp['data']['insert_user']['returning']
|
|
|
|
return response, HTTPStatus.OK
|
|
|
|
|
2021-12-16 02:51:52 +03:00
|
|
|
def create_user_nested(self):
|
|
|
|
email_address = self.req_json['input']['email']
|
|
|
|
name = self.req_json['input']['name']
|
|
|
|
|
|
|
|
if not self.check_email(email_address):
|
|
|
|
response = {
|
|
|
|
'message': 'Given email address is not valid',
|
|
|
|
'code': 'invalid-email'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
|
|
|
|
gql_query = '''
|
|
|
|
mutation ($email: String! $name: String!) {
|
|
|
|
insert_user_one(object: {email: $email, name: $name}){
|
|
|
|
id
|
|
|
|
}
|
|
|
|
}
|
|
|
|
'''
|
|
|
|
query = {
|
|
|
|
'query': gql_query,
|
|
|
|
'variables': {
|
|
|
|
'email': email_address,
|
|
|
|
'name': name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
code, resp = self.execute_query(query)
|
|
|
|
if code != 200 or 'data' not in resp:
|
|
|
|
response = {
|
|
|
|
'message': 'GraphQL query execution failed',
|
|
|
|
'code': 'unexpected'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
|
|
|
|
response = {
|
|
|
|
'userObj': resp['data']['insert_user_one']
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.OK
|
|
|
|
|
2022-04-19 20:48:53 +03:00
|
|
|
def get_messages(self):
|
|
|
|
response = [
|
|
|
|
{ "content": "baz", "user_name": "foo"},
|
|
|
|
{ "content": "foo", "user_name": "bar"}
|
|
|
|
]
|
|
|
|
return response, HTTPStatus.OK
|
|
|
|
|
2020-03-20 09:46:45 +03:00
|
|
|
def mirror_action(self):
|
|
|
|
response = self.req_json['input']['arg']
|
|
|
|
return response, HTTPStatus.OK
|
|
|
|
|
2022-01-31 09:49:11 +03:00
|
|
|
def mirror_headers(self):
|
|
|
|
response = {
|
|
|
|
'headers': list(map(lambda header: { 'name': header[0], 'value': header[1] }, self.headers.items()))
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.OK
|
|
|
|
|
2020-04-16 10:25:19 +03:00
|
|
|
def get_users_by_email(self, singleUser = False):
|
|
|
|
email = self.req_json['input']['email']
|
|
|
|
if not self.check_email(email):
|
|
|
|
response = {
|
|
|
|
'message': 'Given email address is not valid',
|
|
|
|
'code': 'invalid-email'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
gql_query = '''
|
|
|
|
query get_user($email:String!) {
|
|
|
|
user(where:{email:{_eq:$email}},order_by: {id: asc}) {
|
|
|
|
id
|
|
|
|
}
|
|
|
|
}
|
|
|
|
'''
|
|
|
|
query = {
|
|
|
|
'query': gql_query,
|
|
|
|
'variables':{
|
|
|
|
'email':email
|
|
|
|
}
|
|
|
|
}
|
|
|
|
code,resp = self.execute_query(query)
|
|
|
|
if code != 200 or 'data' not in resp:
|
|
|
|
response = {
|
|
|
|
'message': 'GraphQL query execution failed',
|
|
|
|
'code': 'unexpected'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
if singleUser:
|
|
|
|
return resp['data']['user'][0], HTTPStatus.OK
|
|
|
|
else:
|
|
|
|
return resp['data']['user'], HTTPStatus.OK
|
2021-12-16 02:51:52 +03:00
|
|
|
|
|
|
|
def get_users_by_email_nested(self, singleUser = False):
|
|
|
|
resp, status = self.get_users_by_email(singleUser)
|
|
|
|
def make_nested_out_object(outObj):
|
|
|
|
address = { 'city': 'New York', 'country': 'USA'}
|
|
|
|
outObj['address'] = address
|
|
|
|
addresses = [{'city': 'Bangalore', 'country': 'India'}, {'city': 'Melbourne', 'country': 'Australia'}]
|
|
|
|
outObj['addresses'] = addresses
|
|
|
|
outObj['user_id'] = { 'id': outObj['id']}
|
|
|
|
return outObj
|
|
|
|
|
|
|
|
if status != HTTPStatus.OK:
|
|
|
|
return resp, status
|
|
|
|
if singleUser:
|
|
|
|
return make_nested_out_object(resp), status
|
|
|
|
else:
|
|
|
|
return map(make_nested_out_object, resp), status
|
|
|
|
|
2022-04-18 12:58:15 +03:00
|
|
|
def get_typed_nested_null(self):
|
|
|
|
return {
|
|
|
|
'id': 1,
|
|
|
|
'child': None
|
|
|
|
}
|
2023-02-07 18:32:38 +03:00
|
|
|
|
|
|
|
def get_omitted_response_field(self):
|
|
|
|
return {
|
|
|
|
'country': 'India'
|
|
|
|
}
|
2022-04-18 12:58:15 +03:00
|
|
|
|
|
|
|
def get_typed_nested_null_wrong_field(self):
|
|
|
|
return {
|
|
|
|
'id': None,
|
|
|
|
'child': None
|
|
|
|
}
|
|
|
|
|
2021-10-11 09:55:05 +03:00
|
|
|
def null_response(self):
|
|
|
|
response = None
|
|
|
|
return response, HTTPStatus.OK
|
2022-07-05 21:00:08 +03:00
|
|
|
|
2022-06-03 12:47:10 +03:00
|
|
|
def json_response(self):
|
|
|
|
response = {
|
|
|
|
'foo': 'bar'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.OK
|
2022-07-05 21:00:08 +03:00
|
|
|
|
2022-06-03 12:47:10 +03:00
|
|
|
def custom_scalar_array_response(self):
|
|
|
|
response = [{
|
|
|
|
'foo': 'bar'
|
|
|
|
}]
|
|
|
|
return response, HTTPStatus.OK
|
2020-04-16 10:25:19 +03:00
|
|
|
|
2021-12-16 02:51:52 +03:00
|
|
|
def recursive_output(self):
|
|
|
|
return {
|
|
|
|
'direct': {'id': 1, 'this': {'id': 2, 'this': {'id': 3 }}},
|
|
|
|
'list': {'id': 1, 'these': [{'id': 2, 'these': [{'id': 3}]}, {'id': 4}]},
|
|
|
|
'mutual': {'id': 1, 'that': {'id': 2, 'other': {'id': 3, 'that': {'id': 4}}}}
|
|
|
|
}, HTTPStatus.OK
|
2020-03-20 09:46:45 +03:00
|
|
|
|
2022-01-19 07:46:42 +03:00
|
|
|
def get_results(self):
|
|
|
|
return {
|
|
|
|
'result_ids': [1,2,3,4]
|
|
|
|
}, HTTPStatus.OK
|
|
|
|
|
2020-02-13 20:38:23 +03:00
|
|
|
def check_email(self, email):
|
2022-08-24 11:30:55 +03:00
|
|
|
regex = '^\\w+([\\.-]?\\w+)*@\\w+([\\.-]?\\w+)*(\\.\\w{2,3})+$'
|
2020-02-13 20:38:23 +03:00
|
|
|
return re.search(regex,email)
|
|
|
|
|
|
|
|
def execute_query(self, query):
|
|
|
|
headers = {}
|
2022-09-28 12:19:47 +03:00
|
|
|
admin_secret = self.hge_key
|
2020-02-13 20:38:23 +03:00
|
|
|
if admin_secret is not None:
|
|
|
|
headers['X-Hasura-Admin-Secret'] = admin_secret
|
2022-09-28 12:19:47 +03:00
|
|
|
resp = requests.post(
|
|
|
|
self.hge_url + '/v1/graphql',
|
|
|
|
json=query,
|
|
|
|
headers=headers,
|
|
|
|
timeout=60,
|
|
|
|
)
|
|
|
|
data = resp.json(object_pairs_hook=OrderedDict)
|
|
|
|
self.log_message(json.dumps(data))
|
|
|
|
return resp.status_code, data
|
2020-02-13 20:38:23 +03:00
|
|
|
|
|
|
|
def _send_response(self, status, body):
|
2020-04-24 10:55:51 +03:00
|
|
|
self.log_request(status)
|
|
|
|
self.send_response_only(status)
|
2020-02-13 20:38:23 +03:00
|
|
|
self.send_header('Content-Type', 'application/json')
|
2020-03-20 09:46:45 +03:00
|
|
|
self.send_header('Set-Cookie', 'abcd')
|
2020-02-13 20:38:23 +03:00
|
|
|
self.end_headers()
|
|
|
|
self.wfile.write(json.dumps(body).encode("utf-8"))
|
|
|
|
|
|
|
|
|
|
|
|
class ActionsWebhookServer(http.server.HTTPServer):
|
2022-09-28 12:19:47 +03:00
|
|
|
def __init__(self, hge_url, hge_key, server_address):
|
2020-02-13 20:38:23 +03:00
|
|
|
handler = ActionsWebhookHandler
|
2022-09-28 12:19:47 +03:00
|
|
|
handler.hge_url = hge_url
|
|
|
|
handler.hge_key = hge_key
|
2020-02-13 20:38:23 +03:00
|
|
|
super().__init__(server_address, handler)
|
|
|
|
|
2022-09-28 12:19:47 +03:00
|
|
|
@property
|
|
|
|
def url(self):
|
|
|
|
return f'http://{self.server_address[0]}:{self.server_address[1]}'
|
|
|
|
|
2019-04-08 10:22:38 +03:00
|
|
|
class EvtsWebhookHandler(http.server.BaseHTTPRequestHandler):
|
2022-09-28 12:19:47 +03:00
|
|
|
server: 'EvtsWebhookServer'
|
|
|
|
|
2018-09-18 09:21:57 +03:00
|
|
|
def do_GET(self):
|
|
|
|
self.send_response(HTTPStatus.OK)
|
|
|
|
self.end_headers()
|
|
|
|
|
|
|
|
def do_POST(self):
|
2018-11-23 16:02:46 +03:00
|
|
|
content_len = self.headers.get('Content-Length')
|
|
|
|
req_body = self.rfile.read(int(content_len)).decode("utf-8")
|
|
|
|
req_json = json.loads(req_body)
|
|
|
|
req_headers = self.headers
|
|
|
|
req_path = self.path
|
|
|
|
self.log_message(json.dumps(req_json))
|
|
|
|
if req_path == "/fail":
|
2018-09-19 15:12:57 +03:00
|
|
|
self.send_response(HTTPStatus.INTERNAL_SERVER_ERROR)
|
|
|
|
self.end_headers()
|
2021-04-27 08:34:14 +03:00
|
|
|
# This endpoint just sleeps for 2 seconds:
|
|
|
|
elif req_path == "/sleep_2s":
|
|
|
|
time.sleep(2)
|
2019-02-14 10:37:59 +03:00
|
|
|
self.send_response(HTTPStatus.NO_CONTENT)
|
|
|
|
self.end_headers()
|
2021-04-29 07:01:06 +03:00
|
|
|
# This is like a sleep endpoint above, but allowing us to decide
|
|
|
|
# externally when the webhook can return, with unblock()
|
|
|
|
elif req_path == "/block":
|
|
|
|
if not self.server.unblocked:
|
|
|
|
self.server.blocked_count += 1
|
|
|
|
with self.server.unblocked_wait:
|
|
|
|
# We expect this timeout never to be reached, but if
|
|
|
|
# something goes wrong the main thread will block forever:
|
|
|
|
self.server.unblocked_wait.wait(timeout=60)
|
|
|
|
self.server.blocked_count -= 1
|
|
|
|
self.send_response(HTTPStatus.NO_CONTENT)
|
|
|
|
self.end_headers()
|
2018-09-19 15:12:57 +03:00
|
|
|
else:
|
|
|
|
self.send_response(HTTPStatus.NO_CONTENT)
|
|
|
|
self.end_headers()
|
2021-04-27 08:34:14 +03:00
|
|
|
|
|
|
|
self.server.resp_queue.put({"path": req_path,
|
|
|
|
"body": req_json,
|
|
|
|
"headers": req_headers})
|
2018-09-19 15:12:57 +03:00
|
|
|
|
2020-03-11 09:27:31 +03:00
|
|
|
# A very slightly more sane/performant http server.
|
2020-03-20 09:46:45 +03:00
|
|
|
# See: https://stackoverflow.com/a/14089457/176841
|
2020-03-11 09:27:31 +03:00
|
|
|
#
|
|
|
|
# TODO use this elsewhere, or better yet: use e.g. bottle + waitress
|
2022-08-15 16:14:55 +03:00
|
|
|
class ThreadedHTTPServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
|
2020-03-11 09:27:31 +03:00
|
|
|
"""Handle requests in a separate thread."""
|
|
|
|
|
2022-10-21 20:32:58 +03:00
|
|
|
@property
|
|
|
|
def url(self):
|
2022-12-21 18:55:24 +03:00
|
|
|
return f'http://{self.server_name}:{self.server_port}'
|
2022-10-21 20:32:58 +03:00
|
|
|
|
2020-03-11 09:27:31 +03:00
|
|
|
class EvtsWebhookServer(ThreadedHTTPServer):
|
2019-04-08 10:22:38 +03:00
|
|
|
def __init__(self, server_address):
|
2021-04-27 08:34:14 +03:00
|
|
|
# Data received from hasura by our web hook, pushed after it returns to the client:
|
|
|
|
self.resp_queue = queue.Queue()
|
2021-04-29 07:01:06 +03:00
|
|
|
# We use these two vars to coordinate unblocking in the /block route
|
|
|
|
self.unblocked = False
|
|
|
|
self.unblocked_wait = threading.Condition()
|
|
|
|
# ...and this for bookkeeping open blocked requests; this becomes
|
|
|
|
# meaningless after the first call to unblock()
|
|
|
|
self.blocked_count = 0
|
|
|
|
|
2019-04-08 10:22:38 +03:00
|
|
|
super().__init__(server_address, EvtsWebhookHandler)
|
2018-09-18 09:21:57 +03:00
|
|
|
|
2021-04-29 07:01:06 +03:00
|
|
|
# Unblock all webhook requests to /block. Idempotent.
|
|
|
|
def unblock(self):
|
|
|
|
self.unblocked = True
|
|
|
|
with self.unblocked_wait:
|
|
|
|
# NOTE: this only affects currently wait()-ing threads, future
|
|
|
|
# wait()s will block again (hence the simple self.unblocked flag)
|
|
|
|
self.unblocked_wait.notify_all()
|
|
|
|
|
2019-04-08 10:22:38 +03:00
|
|
|
def get_event(self, timeout):
|
|
|
|
return self.resp_queue.get(timeout=timeout)
|
|
|
|
|
2020-05-13 15:33:16 +03:00
|
|
|
def is_queue_empty(self):
|
|
|
|
return self.resp_queue.empty
|
|
|
|
|
2019-04-08 10:22:38 +03:00
|
|
|
class HGECtxGQLServer:
|
2022-10-21 20:32:58 +03:00
|
|
|
def __init__(self, server_address: tuple[str, int], tls_ca_configuration: Optional[fixtures.tls.TLSCAConfiguration] = None, hge_urls: list[str] = []):
|
|
|
|
self.server_address = server_address
|
server/tests-py: Start webhook.py inside the test harness.
We use a helper service to start a webhook-based authentication service for some tests. This moves the initialization of the service out of _test-server.sh_ and into the Python test harness, as a fixture.
In order to do this, I had to make a few changes. The main deviation is that we no longer run _all_ tests against an HGE with this authentication service, just a few (those in _test_webhook.py_). Because this reduced coverage, I have added some more tests there, which actually cover some areas not exacerbated elsewhere (mainly trying to use webhook credentials to talk to an admin-only endpoint).
The webhook service can run both with and without TLS, and decide whether it's necessary to skip one of these based on the arguments passed and how HGE is started, according to the following logic:
* If a TLS CA certificate is passed in, it will run with TLS, otherwise it will skip it.
* If HGE was started externally and a TLS certificate is provided, it will skip running without TLS, as it will assume that HGE was configured to talk to a webhook over HTTPS.
* Some tests should only be run with TLS; this is marked with a `tls_webhook_server` marker.
* Some tests should only be run _without_ TLS; this is marked with a `no_tls_webhook_server` marker.
The actual parameterization of the webhook service configuration is done through test subclasses, because normal pytest parameterization doesn't work with the `hge_fixture_env` hack that we use. Because `hge_fixture_env` is not a sanctioned way of conveying data between fixtures (and, unfortunately, there isn't a sanctioned way of doing this when the fixtures in question may not know about each other directly), parameterizing the `webhook_server` fixture doesn't actually parameterize `hge_server` properly. Subclassing forces this to work correctly.
The certificate generation is moved to a Python fixture, so that we don't have to revoke the CA certificate for _test_webhook_insecure.py_; we can just generate a bogus certificate instead. The CA certificate is still generated in the _test-server.sh_ script, as it needs to be installed into the OS certificate store.
Interestingly, the CA certificate installation wasn't actually working, because the certificates were written to the wrong location. This didn't cause any failures, as we weren't actually testing this behavior. This is now fixed with the other changes.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/6363
GitOrigin-RevId: 0f277d374daa64f657257ed2a4c2057c74b911db
2022-10-20 21:58:36 +03:00
|
|
|
self.tls_ca_configuration = tls_ca_configuration
|
2022-10-21 20:32:58 +03:00
|
|
|
self.server: Optional[http.server.HTTPServer] = None
|
2020-03-26 14:52:20 +03:00
|
|
|
|
|
|
|
def start_server(self):
|
2022-10-21 20:32:58 +03:00
|
|
|
if not self.server:
|
|
|
|
self.server = graphql_server.create_server(self.server_address, self.tls_ca_configuration)
|
2022-09-15 00:41:28 +03:00
|
|
|
self.thread = threading.Thread(target=self.server.serve_forever)
|
|
|
|
self.thread.start()
|
2022-10-21 20:32:58 +03:00
|
|
|
# If the port is specified as 0, we will get a different,
|
|
|
|
# dynamically-allocated port whenever we restart. This captures the
|
|
|
|
# actual assigned port so that we re-use it.
|
|
|
|
self.server_address = self.server.server_address
|
|
|
|
ports.wait_for_port(self.port)
|
2019-04-08 10:22:38 +03:00
|
|
|
|
2020-03-26 14:52:20 +03:00
|
|
|
def stop_server(self):
|
2022-10-21 20:32:58 +03:00
|
|
|
if self.server:
|
2022-09-15 00:41:28 +03:00
|
|
|
graphql_server.stop_server(self.server)
|
|
|
|
self.thread.join()
|
2022-10-21 20:32:58 +03:00
|
|
|
self.server = None
|
2018-10-30 12:21:58 +03:00
|
|
|
|
2022-09-28 12:19:47 +03:00
|
|
|
@property
|
|
|
|
def url(self):
|
server/tests-py: Start webhook.py inside the test harness.
We use a helper service to start a webhook-based authentication service for some tests. This moves the initialization of the service out of _test-server.sh_ and into the Python test harness, as a fixture.
In order to do this, I had to make a few changes. The main deviation is that we no longer run _all_ tests against an HGE with this authentication service, just a few (those in _test_webhook.py_). Because this reduced coverage, I have added some more tests there, which actually cover some areas not exacerbated elsewhere (mainly trying to use webhook credentials to talk to an admin-only endpoint).
The webhook service can run both with and without TLS, and decide whether it's necessary to skip one of these based on the arguments passed and how HGE is started, according to the following logic:
* If a TLS CA certificate is passed in, it will run with TLS, otherwise it will skip it.
* If HGE was started externally and a TLS certificate is provided, it will skip running without TLS, as it will assume that HGE was configured to talk to a webhook over HTTPS.
* Some tests should only be run with TLS; this is marked with a `tls_webhook_server` marker.
* Some tests should only be run _without_ TLS; this is marked with a `no_tls_webhook_server` marker.
The actual parameterization of the webhook service configuration is done through test subclasses, because normal pytest parameterization doesn't work with the `hge_fixture_env` hack that we use. Because `hge_fixture_env` is not a sanctioned way of conveying data between fixtures (and, unfortunately, there isn't a sanctioned way of doing this when the fixtures in question may not know about each other directly), parameterizing the `webhook_server` fixture doesn't actually parameterize `hge_server` properly. Subclassing forces this to work correctly.
The certificate generation is moved to a Python fixture, so that we don't have to revoke the CA certificate for _test_webhook_insecure.py_; we can just generate a bogus certificate instead. The CA certificate is still generated in the _test-server.sh_ script, as it needs to be installed into the OS certificate store.
Interestingly, the CA certificate installation wasn't actually working, because the certificates were written to the wrong location. This didn't cause any failures, as we weren't actually testing this behavior. This is now fixed with the other changes.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/6363
GitOrigin-RevId: 0f277d374daa64f657257ed2a4c2057c74b911db
2022-10-20 21:58:36 +03:00
|
|
|
scheme = 'https' if self.tls_ca_configuration else 'http'
|
2022-10-21 20:32:58 +03:00
|
|
|
return f'{scheme}://{self.host}:{self.port}'
|
|
|
|
|
|
|
|
@property
|
|
|
|
def host(self):
|
2022-10-13 18:43:59 +03:00
|
|
|
# We must use 'localhost' and not `self.server.server_address[0]`
|
|
|
|
# because when using TLS, we need a domain name, not an IP address.
|
2022-10-21 20:32:58 +03:00
|
|
|
return 'localhost'
|
|
|
|
|
|
|
|
@property
|
|
|
|
def port(self):
|
|
|
|
if not self.server:
|
|
|
|
raise Exception('The server is not started.')
|
|
|
|
return self.server.server_address[1]
|
2022-09-28 12:19:47 +03:00
|
|
|
|
server/tests-py: Start webhook.py inside the test harness.
We use a helper service to start a webhook-based authentication service for some tests. This moves the initialization of the service out of _test-server.sh_ and into the Python test harness, as a fixture.
In order to do this, I had to make a few changes. The main deviation is that we no longer run _all_ tests against an HGE with this authentication service, just a few (those in _test_webhook.py_). Because this reduced coverage, I have added some more tests there, which actually cover some areas not exacerbated elsewhere (mainly trying to use webhook credentials to talk to an admin-only endpoint).
The webhook service can run both with and without TLS, and decide whether it's necessary to skip one of these based on the arguments passed and how HGE is started, according to the following logic:
* If a TLS CA certificate is passed in, it will run with TLS, otherwise it will skip it.
* If HGE was started externally and a TLS certificate is provided, it will skip running without TLS, as it will assume that HGE was configured to talk to a webhook over HTTPS.
* Some tests should only be run with TLS; this is marked with a `tls_webhook_server` marker.
* Some tests should only be run _without_ TLS; this is marked with a `no_tls_webhook_server` marker.
The actual parameterization of the webhook service configuration is done through test subclasses, because normal pytest parameterization doesn't work with the `hge_fixture_env` hack that we use. Because `hge_fixture_env` is not a sanctioned way of conveying data between fixtures (and, unfortunately, there isn't a sanctioned way of doing this when the fixtures in question may not know about each other directly), parameterizing the `webhook_server` fixture doesn't actually parameterize `hge_server` properly. Subclassing forces this to work correctly.
The certificate generation is moved to a Python fixture, so that we don't have to revoke the CA certificate for _test_webhook_insecure.py_; we can just generate a bogus certificate instead. The CA certificate is still generated in the _test-server.sh_ script, as it needs to be installed into the OS certificate store.
Interestingly, the CA certificate installation wasn't actually working, because the certificates were written to the wrong location. This didn't cause any failures, as we weren't actually testing this behavior. This is now fixed with the other changes.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/6363
GitOrigin-RevId: 0f277d374daa64f657257ed2a4c2057c74b911db
2022-10-20 21:58:36 +03:00
|
|
|
class HGECtxWebhook(NamedTuple):
|
|
|
|
tls_trust: Optional[fixtures.tls.TLSTrust]
|
|
|
|
|
2018-09-18 09:21:57 +03:00
|
|
|
class HGECtx:
|
2019-04-08 10:22:38 +03:00
|
|
|
|
server/tests-py: Start webhook.py inside the test harness.
We use a helper service to start a webhook-based authentication service for some tests. This moves the initialization of the service out of _test-server.sh_ and into the Python test harness, as a fixture.
In order to do this, I had to make a few changes. The main deviation is that we no longer run _all_ tests against an HGE with this authentication service, just a few (those in _test_webhook.py_). Because this reduced coverage, I have added some more tests there, which actually cover some areas not exacerbated elsewhere (mainly trying to use webhook credentials to talk to an admin-only endpoint).
The webhook service can run both with and without TLS, and decide whether it's necessary to skip one of these based on the arguments passed and how HGE is started, according to the following logic:
* If a TLS CA certificate is passed in, it will run with TLS, otherwise it will skip it.
* If HGE was started externally and a TLS certificate is provided, it will skip running without TLS, as it will assume that HGE was configured to talk to a webhook over HTTPS.
* Some tests should only be run with TLS; this is marked with a `tls_webhook_server` marker.
* Some tests should only be run _without_ TLS; this is marked with a `no_tls_webhook_server` marker.
The actual parameterization of the webhook service configuration is done through test subclasses, because normal pytest parameterization doesn't work with the `hge_fixture_env` hack that we use. Because `hge_fixture_env` is not a sanctioned way of conveying data between fixtures (and, unfortunately, there isn't a sanctioned way of doing this when the fixtures in question may not know about each other directly), parameterizing the `webhook_server` fixture doesn't actually parameterize `hge_server` properly. Subclassing forces this to work correctly.
The certificate generation is moved to a Python fixture, so that we don't have to revoke the CA certificate for _test_webhook_insecure.py_; we can just generate a bogus certificate instead. The CA certificate is still generated in the _test-server.sh_ script, as it needs to be installed into the OS certificate store.
Interestingly, the CA certificate installation wasn't actually working, because the certificates were written to the wrong location. This didn't cause any failures, as we weren't actually testing this behavior. This is now fixed with the other changes.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/6363
GitOrigin-RevId: 0f277d374daa64f657257ed2a4c2057c74b911db
2022-10-20 21:58:36 +03:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
hge_url: str,
|
2022-11-15 22:07:34 +03:00
|
|
|
metadata_schema_url: str,
|
server/tests-py: Start webhook.py inside the test harness.
We use a helper service to start a webhook-based authentication service for some tests. This moves the initialization of the service out of _test-server.sh_ and into the Python test harness, as a fixture.
In order to do this, I had to make a few changes. The main deviation is that we no longer run _all_ tests against an HGE with this authentication service, just a few (those in _test_webhook.py_). Because this reduced coverage, I have added some more tests there, which actually cover some areas not exacerbated elsewhere (mainly trying to use webhook credentials to talk to an admin-only endpoint).
The webhook service can run both with and without TLS, and decide whether it's necessary to skip one of these based on the arguments passed and how HGE is started, according to the following logic:
* If a TLS CA certificate is passed in, it will run with TLS, otherwise it will skip it.
* If HGE was started externally and a TLS certificate is provided, it will skip running without TLS, as it will assume that HGE was configured to talk to a webhook over HTTPS.
* Some tests should only be run with TLS; this is marked with a `tls_webhook_server` marker.
* Some tests should only be run _without_ TLS; this is marked with a `no_tls_webhook_server` marker.
The actual parameterization of the webhook service configuration is done through test subclasses, because normal pytest parameterization doesn't work with the `hge_fixture_env` hack that we use. Because `hge_fixture_env` is not a sanctioned way of conveying data between fixtures (and, unfortunately, there isn't a sanctioned way of doing this when the fixtures in question may not know about each other directly), parameterizing the `webhook_server` fixture doesn't actually parameterize `hge_server` properly. Subclassing forces this to work correctly.
The certificate generation is moved to a Python fixture, so that we don't have to revoke the CA certificate for _test_webhook_insecure.py_; we can just generate a bogus certificate instead. The CA certificate is still generated in the _test-server.sh_ script, as it needs to be installed into the OS certificate store.
Interestingly, the CA certificate installation wasn't actually working, because the certificates were written to the wrong location. This didn't cause any failures, as we weren't actually testing this behavior. This is now fixed with the other changes.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/6363
GitOrigin-RevId: 0f277d374daa64f657257ed2a4c2057c74b911db
2022-10-20 21:58:36 +03:00
|
|
|
hge_key: Optional[str],
|
|
|
|
webhook: Optional[HGECtxWebhook],
|
|
|
|
enabled_apis: Optional[set[str]],
|
2022-11-15 22:07:34 +03:00
|
|
|
clear_dbs: bool,
|
server/tests-py: Start webhook.py inside the test harness.
We use a helper service to start a webhook-based authentication service for some tests. This moves the initialization of the service out of _test-server.sh_ and into the Python test harness, as a fixture.
In order to do this, I had to make a few changes. The main deviation is that we no longer run _all_ tests against an HGE with this authentication service, just a few (those in _test_webhook.py_). Because this reduced coverage, I have added some more tests there, which actually cover some areas not exacerbated elsewhere (mainly trying to use webhook credentials to talk to an admin-only endpoint).
The webhook service can run both with and without TLS, and decide whether it's necessary to skip one of these based on the arguments passed and how HGE is started, according to the following logic:
* If a TLS CA certificate is passed in, it will run with TLS, otherwise it will skip it.
* If HGE was started externally and a TLS certificate is provided, it will skip running without TLS, as it will assume that HGE was configured to talk to a webhook over HTTPS.
* Some tests should only be run with TLS; this is marked with a `tls_webhook_server` marker.
* Some tests should only be run _without_ TLS; this is marked with a `no_tls_webhook_server` marker.
The actual parameterization of the webhook service configuration is done through test subclasses, because normal pytest parameterization doesn't work with the `hge_fixture_env` hack that we use. Because `hge_fixture_env` is not a sanctioned way of conveying data between fixtures (and, unfortunately, there isn't a sanctioned way of doing this when the fixtures in question may not know about each other directly), parameterizing the `webhook_server` fixture doesn't actually parameterize `hge_server` properly. Subclassing forces this to work correctly.
The certificate generation is moved to a Python fixture, so that we don't have to revoke the CA certificate for _test_webhook_insecure.py_; we can just generate a bogus certificate instead. The CA certificate is still generated in the _test-server.sh_ script, as it needs to be installed into the OS certificate store.
Interestingly, the CA certificate installation wasn't actually working, because the certificates were written to the wrong location. This didn't cause any failures, as we weren't actually testing this behavior. This is now fixed with the other changes.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/6363
GitOrigin-RevId: 0f277d374daa64f657257ed2a4c2057c74b911db
2022-10-20 21:58:36 +03:00
|
|
|
config,
|
|
|
|
):
|
2018-09-18 09:21:57 +03:00
|
|
|
self.http = requests.Session()
|
2022-09-20 13:54:44 +03:00
|
|
|
self.timeout = 120 # BigQuery can take a while
|
server/tests-py: Start webhook.py inside the test harness.
We use a helper service to start a webhook-based authentication service for some tests. This moves the initialization of the service out of _test-server.sh_ and into the Python test harness, as a fixture.
In order to do this, I had to make a few changes. The main deviation is that we no longer run _all_ tests against an HGE with this authentication service, just a few (those in _test_webhook.py_). Because this reduced coverage, I have added some more tests there, which actually cover some areas not exacerbated elsewhere (mainly trying to use webhook credentials to talk to an admin-only endpoint).
The webhook service can run both with and without TLS, and decide whether it's necessary to skip one of these based on the arguments passed and how HGE is started, according to the following logic:
* If a TLS CA certificate is passed in, it will run with TLS, otherwise it will skip it.
* If HGE was started externally and a TLS certificate is provided, it will skip running without TLS, as it will assume that HGE was configured to talk to a webhook over HTTPS.
* Some tests should only be run with TLS; this is marked with a `tls_webhook_server` marker.
* Some tests should only be run _without_ TLS; this is marked with a `no_tls_webhook_server` marker.
The actual parameterization of the webhook service configuration is done through test subclasses, because normal pytest parameterization doesn't work with the `hge_fixture_env` hack that we use. Because `hge_fixture_env` is not a sanctioned way of conveying data between fixtures (and, unfortunately, there isn't a sanctioned way of doing this when the fixtures in question may not know about each other directly), parameterizing the `webhook_server` fixture doesn't actually parameterize `hge_server` properly. Subclassing forces this to work correctly.
The certificate generation is moved to a Python fixture, so that we don't have to revoke the CA certificate for _test_webhook_insecure.py_; we can just generate a bogus certificate instead. The CA certificate is still generated in the _test-server.sh_ script, as it needs to be installed into the OS certificate store.
Interestingly, the CA certificate installation wasn't actually working, because the certificates were written to the wrong location. This didn't cause any failures, as we weren't actually testing this behavior. This is now fixed with the other changes.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/6363
GitOrigin-RevId: 0f277d374daa64f657257ed2a4c2057c74b911db
2022-10-20 21:58:36 +03:00
|
|
|
|
2019-04-08 10:22:38 +03:00
|
|
|
self.hge_url = hge_url
|
2022-11-15 22:07:34 +03:00
|
|
|
self.metadata_schema_url = metadata_schema_url
|
server/tests-py: Start webhook.py inside the test harness.
We use a helper service to start a webhook-based authentication service for some tests. This moves the initialization of the service out of _test-server.sh_ and into the Python test harness, as a fixture.
In order to do this, I had to make a few changes. The main deviation is that we no longer run _all_ tests against an HGE with this authentication service, just a few (those in _test_webhook.py_). Because this reduced coverage, I have added some more tests there, which actually cover some areas not exacerbated elsewhere (mainly trying to use webhook credentials to talk to an admin-only endpoint).
The webhook service can run both with and without TLS, and decide whether it's necessary to skip one of these based on the arguments passed and how HGE is started, according to the following logic:
* If a TLS CA certificate is passed in, it will run with TLS, otherwise it will skip it.
* If HGE was started externally and a TLS certificate is provided, it will skip running without TLS, as it will assume that HGE was configured to talk to a webhook over HTTPS.
* Some tests should only be run with TLS; this is marked with a `tls_webhook_server` marker.
* Some tests should only be run _without_ TLS; this is marked with a `no_tls_webhook_server` marker.
The actual parameterization of the webhook service configuration is done through test subclasses, because normal pytest parameterization doesn't work with the `hge_fixture_env` hack that we use. Because `hge_fixture_env` is not a sanctioned way of conveying data between fixtures (and, unfortunately, there isn't a sanctioned way of doing this when the fixtures in question may not know about each other directly), parameterizing the `webhook_server` fixture doesn't actually parameterize `hge_server` properly. Subclassing forces this to work correctly.
The certificate generation is moved to a Python fixture, so that we don't have to revoke the CA certificate for _test_webhook_insecure.py_; we can just generate a bogus certificate instead. The CA certificate is still generated in the _test-server.sh_ script, as it needs to be installed into the OS certificate store.
Interestingly, the CA certificate installation wasn't actually working, because the certificates were written to the wrong location. This didn't cause any failures, as we weren't actually testing this behavior. This is now fixed with the other changes.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/6363
GitOrigin-RevId: 0f277d374daa64f657257ed2a4c2057c74b911db
2022-10-20 21:58:36 +03:00
|
|
|
self.hge_key = hge_key
|
|
|
|
self.webhook = webhook
|
2020-02-13 12:14:02 +03:00
|
|
|
hge_jwt_key_file = config.getoption('--hge-jwt-key-file')
|
2018-10-28 21:27:49 +03:00
|
|
|
if hge_jwt_key_file is None:
|
|
|
|
self.hge_jwt_key = None
|
|
|
|
else:
|
|
|
|
with open(hge_jwt_key_file) as f:
|
|
|
|
self.hge_jwt_key = f.read()
|
2020-02-13 12:14:02 +03:00
|
|
|
self.hge_jwt_conf = config.getoption('--hge-jwt-conf')
|
2020-04-16 09:45:21 +03:00
|
|
|
if self.hge_jwt_conf is not None:
|
|
|
|
self.hge_jwt_conf_dict = json.loads(self.hge_jwt_conf)
|
2021-08-12 04:53:13 +03:00
|
|
|
self.hge_jwt_algo = self.hge_jwt_conf_dict["type"]
|
|
|
|
if self.hge_jwt_algo == "Ed25519":
|
|
|
|
self.hge_jwt_algo = "EdDSA"
|
2018-10-28 21:27:49 +03:00
|
|
|
self.may_skip_test_teardown = False
|
2018-09-18 09:21:57 +03:00
|
|
|
|
2021-04-27 08:34:14 +03:00
|
|
|
# This will be GC'd, but we also explicitly dispose() in teardown()
|
2022-11-15 22:07:34 +03:00
|
|
|
self.engine = sqlalchemy.create_engine(self.metadata_schema_url)
|
2022-08-15 16:14:55 +03:00
|
|
|
self.meta = sqlalchemy.schema.MetaData()
|
2018-11-26 16:08:16 +03:00
|
|
|
|
2020-02-13 12:14:02 +03:00
|
|
|
self.hge_scale_url = config.getoption('--test-hge-scale-url')
|
|
|
|
self.avoid_err_msg_checks = config.getoption('--avoid-error-message-checks')
|
2021-05-05 15:25:27 +03:00
|
|
|
self.pro_tests = config.getoption('--pro-tests')
|
2019-03-12 08:46:27 +03:00
|
|
|
|
2019-05-10 09:05:11 +03:00
|
|
|
self.ws_client = GQLWsClient(self, '/v1/graphql')
|
2021-04-27 08:34:14 +03:00
|
|
|
self.ws_client_v1alpha1 = GQLWsClient(self, '/v1alpha1/graphql')
|
|
|
|
self.ws_client_relay = GQLWsClient(self, '/v1beta1/relay')
|
2021-08-24 19:25:12 +03:00
|
|
|
self.ws_client_graphql_ws = GraphQLWSClient(self, '/v1/graphql')
|
2019-04-08 10:22:38 +03:00
|
|
|
|
2021-03-11 21:17:41 +03:00
|
|
|
self.backend = config.getoption('--backend')
|
2021-05-25 16:54:18 +03:00
|
|
|
self.default_backend = 'postgres'
|
|
|
|
self.is_default_backend = self.backend == self.default_backend
|
2021-03-11 21:17:41 +03:00
|
|
|
|
2019-11-26 15:14:21 +03:00
|
|
|
env_version = os.getenv('VERSION')
|
2022-04-19 20:48:53 +03:00
|
|
|
if env_version:
|
|
|
|
self.version = env_version
|
|
|
|
else:
|
|
|
|
# HGE version
|
|
|
|
result = subprocess.run(['../../scripts/get-version.sh'], shell=False, stdout=subprocess.PIPE, check=True)
|
|
|
|
self.version = result.stdout.decode('utf-8').strip()
|
2022-11-15 22:07:34 +03:00
|
|
|
# TODO: remove once parallelization work is completed
|
|
|
|
if clear_dbs and self.is_default_backend and (not enabled_apis or 'metadata' in enabled_apis) and not config.getoption('--skip-schema-setup'):
|
2019-02-28 16:53:03 +03:00
|
|
|
try:
|
2022-07-05 21:00:08 +03:00
|
|
|
self.v2q_f("queries/" + self.backend_suffix("clear_db")+ ".yaml")
|
2019-02-28 16:53:03 +03:00
|
|
|
except requests.exceptions.RequestException as e:
|
|
|
|
self.teardown()
|
|
|
|
raise HGECtxError(repr(e))
|
2018-09-18 09:21:57 +03:00
|
|
|
|
|
|
|
def reflect_tables(self):
|
|
|
|
self.meta.reflect(bind=self.engine)
|
|
|
|
|
2021-01-29 04:02:34 +03:00
|
|
|
def anyq(self, u, q, h, b = None, v = None):
|
2021-12-04 00:56:25 +03:00
|
|
|
|
2021-01-29 04:02:34 +03:00
|
|
|
resp = None
|
|
|
|
if v == 'GET':
|
|
|
|
resp = self.http.get(
|
|
|
|
self.hge_url + u,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2021-01-29 04:02:34 +03:00
|
|
|
)
|
2021-12-04 00:56:25 +03:00
|
|
|
elif v == 'POSTJSON' and b:
|
|
|
|
resp = self.http.post(
|
|
|
|
self.hge_url + u,
|
|
|
|
json=b,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2021-12-04 00:56:25 +03:00
|
|
|
)
|
2021-01-29 04:02:34 +03:00
|
|
|
elif v == 'POST' and b:
|
|
|
|
# TODO: Figure out why the requests are failing with a byte object passed in as `data`
|
|
|
|
resp = self.http.post(
|
|
|
|
self.hge_url + u,
|
|
|
|
data=b,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2021-12-04 00:56:25 +03:00
|
|
|
)
|
2021-01-29 04:02:34 +03:00
|
|
|
elif v == 'PATCH' and b:
|
|
|
|
resp = self.http.patch(
|
|
|
|
self.hge_url + u,
|
|
|
|
data=b,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2021-01-29 04:02:34 +03:00
|
|
|
)
|
|
|
|
elif v == 'PUT' and b:
|
|
|
|
resp = self.http.put(
|
|
|
|
self.hge_url + u,
|
|
|
|
data=b,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2021-01-29 04:02:34 +03:00
|
|
|
)
|
|
|
|
elif v == 'DELETE':
|
|
|
|
resp = self.http.delete(
|
|
|
|
self.hge_url + u,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2021-01-29 04:02:34 +03:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
resp = self.http.post(
|
|
|
|
self.hge_url + u,
|
|
|
|
json=q,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2021-01-29 04:02:34 +03:00
|
|
|
)
|
2019-09-04 18:02:35 +03:00
|
|
|
# NOTE: make sure we preserve key ordering so we can test the ordering
|
|
|
|
# properties in the graphql spec properly
|
2019-12-25 06:35:32 +03:00
|
|
|
# Returning response headers to get the request id from response
|
|
|
|
return resp.status_code, resp.json(object_pairs_hook=OrderedDict), resp.headers
|
2018-09-18 09:21:57 +03:00
|
|
|
|
2022-08-22 10:21:17 +03:00
|
|
|
# Executes a query, but does not return the result.
|
2019-02-28 16:53:03 +03:00
|
|
|
def sql(self, q):
|
2022-08-15 16:14:55 +03:00
|
|
|
with self.engine.connect() as conn:
|
2022-08-22 10:21:17 +03:00
|
|
|
conn.execute(q)
|
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def execute_query(self, q, url_path, headers = {}, expected_status_code = 200):
|
2019-01-28 09:12:52 +03:00
|
|
|
h = headers.copy()
|
server/tests-py: Start webhook.py inside the test harness.
We use a helper service to start a webhook-based authentication service for some tests. This moves the initialization of the service out of _test-server.sh_ and into the Python test harness, as a fixture.
In order to do this, I had to make a few changes. The main deviation is that we no longer run _all_ tests against an HGE with this authentication service, just a few (those in _test_webhook.py_). Because this reduced coverage, I have added some more tests there, which actually cover some areas not exacerbated elsewhere (mainly trying to use webhook credentials to talk to an admin-only endpoint).
The webhook service can run both with and without TLS, and decide whether it's necessary to skip one of these based on the arguments passed and how HGE is started, according to the following logic:
* If a TLS CA certificate is passed in, it will run with TLS, otherwise it will skip it.
* If HGE was started externally and a TLS certificate is provided, it will skip running without TLS, as it will assume that HGE was configured to talk to a webhook over HTTPS.
* Some tests should only be run with TLS; this is marked with a `tls_webhook_server` marker.
* Some tests should only be run _without_ TLS; this is marked with a `no_tls_webhook_server` marker.
The actual parameterization of the webhook service configuration is done through test subclasses, because normal pytest parameterization doesn't work with the `hge_fixture_env` hack that we use. Because `hge_fixture_env` is not a sanctioned way of conveying data between fixtures (and, unfortunately, there isn't a sanctioned way of doing this when the fixtures in question may not know about each other directly), parameterizing the `webhook_server` fixture doesn't actually parameterize `hge_server` properly. Subclassing forces this to work correctly.
The certificate generation is moved to a Python fixture, so that we don't have to revoke the CA certificate for _test_webhook_insecure.py_; we can just generate a bogus certificate instead. The CA certificate is still generated in the _test-server.sh_ script, as it needs to be installed into the OS certificate store.
Interestingly, the CA certificate installation wasn't actually working, because the certificates were written to the wrong location. This didn't cause any failures, as we weren't actually testing this behavior. This is now fixed with the other changes.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/6363
GitOrigin-RevId: 0f277d374daa64f657257ed2a4c2057c74b911db
2022-10-20 21:58:36 +03:00
|
|
|
if self.hge_key is not None and 'X-Hasura-Admin-Secret' not in headers:
|
2019-02-14 12:37:47 +03:00
|
|
|
h['X-Hasura-Admin-Secret'] = self.hge_key
|
2018-09-18 09:21:57 +03:00
|
|
|
resp = self.http.post(
|
2021-01-18 13:38:34 +03:00
|
|
|
self.hge_url + url_path,
|
2018-10-28 21:27:49 +03:00
|
|
|
json=q,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2018-09-18 09:21:57 +03:00
|
|
|
)
|
2019-09-04 18:02:35 +03:00
|
|
|
# NOTE: make sure we preserve key ordering so we can test the ordering
|
|
|
|
# properties in the graphql spec properly
|
2021-10-05 20:25:44 +03:00
|
|
|
# Don't assume `resp` is JSON object
|
|
|
|
resp_obj = {} if resp.status_code == 500 else resp.json(object_pairs_hook=OrderedDict)
|
2022-07-05 21:00:08 +03:00
|
|
|
if expected_status_code:
|
|
|
|
assert \
|
|
|
|
resp.status_code == expected_status_code, \
|
2022-08-15 16:14:55 +03:00
|
|
|
f'Expected {resp.status_code} to be {expected_status_code}.\nRequest:\n{json.dumps(q, indent=2)}\nResponse:\n{json.dumps(resp_obj, indent=2)}'
|
2022-07-05 21:00:08 +03:00
|
|
|
return resp_obj
|
2021-01-18 13:38:34 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v1q(self, q, headers = {}, expected_status_code = 200):
|
|
|
|
return self.execute_query(q, "/v1/query", headers, expected_status_code)
|
2021-01-18 13:38:34 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v1q_f(self, filepath, headers = {}, expected_status_code = 200):
|
|
|
|
with open(filepath) as f:
|
2019-11-14 19:32:11 +03:00
|
|
|
# NOTE: preserve ordering with ruamel
|
|
|
|
yml = yaml.YAML()
|
2022-07-05 21:00:08 +03:00
|
|
|
return self.v1q(yml.load(f), headers, expected_status_code)
|
2018-09-18 09:21:57 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v2q(self, q, headers = {}, expected_status_code = 200):
|
|
|
|
return self.execute_query(q, "/v2/query", headers, expected_status_code)
|
[Preview] Inherited roles for postgres read queries
fixes #3868
docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de`
Note:
To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`.
Introduction
------------
This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`.
How are select permissions of different roles are combined?
------------------------------------------------------------
A select permission includes 5 things:
1. Columns accessible to the role
2. Row selection filter
3. Limit
4. Allow aggregation
5. Scalar computed fields accessible to the role
Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`.
Let's say the following GraphQL query is queried with the `combined_roles` role.
```graphql
query {
employees {
address
phone
}
}
```
This will translate to the following SQL query:
```sql
select
(case when (P1 or P2) then address else null end) as address,
(case when P2 then phone else null end) as phone
from employee
where (P1 or P2)
```
The other parameters of the select permission will be combined in the following manner:
1. Limit - Minimum of the limits will be the limit of the inherited role
2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation
3. Scalar computed fields - same as table column fields, as in the above example
APIs for inherited roles:
----------------------
1. `add_inherited_role`
`add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments
`role_name`: the name of the inherited role to be added (String)
`role_set`: list of roles that need to be combined (Array of Strings)
Example:
```json
{
"type": "add_inherited_role",
"args": {
"role_name":"combined_user",
"role_set":[
"user",
"user1"
]
}
}
```
After adding the inherited role, the inherited role can be used like single roles like earlier
Note:
An inherited role can only be created with non-inherited/singular roles.
2. `drop_inherited_role`
The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument:
`role_name`: name of the inherited role to be dropped
Example:
```json
{
"type": "drop_inherited_role",
"args": {
"role_name":"combined_user"
}
}
```
Metadata
---------
The derived roles metadata will be included under the `experimental_features` key while exporting the metadata.
```json
{
"experimental_features": {
"derived_roles": [
{
"role_name": "manager_is_employee_too",
"role_set": [
"employee",
"manager"
]
}
]
}
}
```
Scope
------
Only postgres queries and subscriptions are supported in this PR.
Important points:
-----------------
1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done.
TODOs
-------
- [ ] Tests
- [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features
- [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?)
- [ ] Introspection test with a inherited role (nullability changes in a inherited role)
- [ ] Docs
- [ ] Changelog
Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com>
GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v2q_f(self, filepath, headers = {}, expected_status_code = 200):
|
|
|
|
with open(filepath) as f:
|
[Preview] Inherited roles for postgres read queries
fixes #3868
docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de`
Note:
To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`.
Introduction
------------
This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`.
How are select permissions of different roles are combined?
------------------------------------------------------------
A select permission includes 5 things:
1. Columns accessible to the role
2. Row selection filter
3. Limit
4. Allow aggregation
5. Scalar computed fields accessible to the role
Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`.
Let's say the following GraphQL query is queried with the `combined_roles` role.
```graphql
query {
employees {
address
phone
}
}
```
This will translate to the following SQL query:
```sql
select
(case when (P1 or P2) then address else null end) as address,
(case when P2 then phone else null end) as phone
from employee
where (P1 or P2)
```
The other parameters of the select permission will be combined in the following manner:
1. Limit - Minimum of the limits will be the limit of the inherited role
2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation
3. Scalar computed fields - same as table column fields, as in the above example
APIs for inherited roles:
----------------------
1. `add_inherited_role`
`add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments
`role_name`: the name of the inherited role to be added (String)
`role_set`: list of roles that need to be combined (Array of Strings)
Example:
```json
{
"type": "add_inherited_role",
"args": {
"role_name":"combined_user",
"role_set":[
"user",
"user1"
]
}
}
```
After adding the inherited role, the inherited role can be used like single roles like earlier
Note:
An inherited role can only be created with non-inherited/singular roles.
2. `drop_inherited_role`
The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument:
`role_name`: name of the inherited role to be dropped
Example:
```json
{
"type": "drop_inherited_role",
"args": {
"role_name":"combined_user"
}
}
```
Metadata
---------
The derived roles metadata will be included under the `experimental_features` key while exporting the metadata.
```json
{
"experimental_features": {
"derived_roles": [
{
"role_name": "manager_is_employee_too",
"role_set": [
"employee",
"manager"
]
}
]
}
}
```
Scope
------
Only postgres queries and subscriptions are supported in this PR.
Important points:
-----------------
1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done.
TODOs
-------
- [ ] Tests
- [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features
- [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?)
- [ ] Introspection test with a inherited role (nullability changes in a inherited role)
- [ ] Docs
- [ ] Changelog
Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com>
GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
|
|
|
# NOTE: preserve ordering with ruamel
|
|
|
|
yml = yaml.YAML()
|
2022-07-05 21:00:08 +03:00
|
|
|
return self.v2q(yml.load(f), headers, expected_status_code)
|
[Preview] Inherited roles for postgres read queries
fixes #3868
docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de`
Note:
To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`.
Introduction
------------
This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`.
How are select permissions of different roles are combined?
------------------------------------------------------------
A select permission includes 5 things:
1. Columns accessible to the role
2. Row selection filter
3. Limit
4. Allow aggregation
5. Scalar computed fields accessible to the role
Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`.
Let's say the following GraphQL query is queried with the `combined_roles` role.
```graphql
query {
employees {
address
phone
}
}
```
This will translate to the following SQL query:
```sql
select
(case when (P1 or P2) then address else null end) as address,
(case when P2 then phone else null end) as phone
from employee
where (P1 or P2)
```
The other parameters of the select permission will be combined in the following manner:
1. Limit - Minimum of the limits will be the limit of the inherited role
2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation
3. Scalar computed fields - same as table column fields, as in the above example
APIs for inherited roles:
----------------------
1. `add_inherited_role`
`add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments
`role_name`: the name of the inherited role to be added (String)
`role_set`: list of roles that need to be combined (Array of Strings)
Example:
```json
{
"type": "add_inherited_role",
"args": {
"role_name":"combined_user",
"role_set":[
"user",
"user1"
]
}
}
```
After adding the inherited role, the inherited role can be used like single roles like earlier
Note:
An inherited role can only be created with non-inherited/singular roles.
2. `drop_inherited_role`
The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument:
`role_name`: name of the inherited role to be dropped
Example:
```json
{
"type": "drop_inherited_role",
"args": {
"role_name":"combined_user"
}
}
```
Metadata
---------
The derived roles metadata will be included under the `experimental_features` key while exporting the metadata.
```json
{
"experimental_features": {
"derived_roles": [
{
"role_name": "manager_is_employee_too",
"role_set": [
"employee",
"manager"
]
}
]
}
}
```
Scope
------
Only postgres queries and subscriptions are supported in this PR.
Important points:
-----------------
1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done.
TODOs
-------
- [ ] Tests
- [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features
- [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?)
- [ ] Introspection test with a inherited role (nullability changes in a inherited role)
- [ ] Docs
- [ ] Changelog
Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com>
GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
|
|
|
|
2021-03-11 21:17:41 +03:00
|
|
|
def backend_suffix(self, filename):
|
2021-05-25 16:54:18 +03:00
|
|
|
if self.is_default_backend:
|
2021-03-11 21:17:41 +03:00
|
|
|
return filename
|
|
|
|
else:
|
|
|
|
return filename + "_" + self.backend
|
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v1metadataq(self, q, headers = {}, expected_status_code = 200):
|
|
|
|
return self.execute_query(q, "/v1/metadata", headers, expected_status_code)
|
2021-01-18 13:38:34 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v1metadataq_f(self, filepath, headers = {}, expected_status_code = 200):
|
|
|
|
with open(filepath) as f:
|
2021-01-18 13:38:34 +03:00
|
|
|
# NOTE: preserve ordering with ruamel
|
|
|
|
yml = yaml.YAML()
|
2022-07-05 21:00:08 +03:00
|
|
|
return self.v1metadataq(yml.load(f), headers, expected_status_code)
|
2021-01-18 13:38:34 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v1graphqlq(self, q, headers = {}, expected_status_code = 200):
|
|
|
|
return self.execute_query(q, "/v1/graphql", headers, expected_status_code)
|
2021-11-19 20:05:01 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v1graphql_f(self, filepath, headers = {}, expected_status_code = 200):
|
|
|
|
with open(filepath) as f:
|
2021-11-19 20:05:01 +03:00
|
|
|
# NOTE: preserve ordering with ruamel
|
|
|
|
yml = yaml.YAML()
|
2022-07-05 21:00:08 +03:00
|
|
|
return self.v1graphqlq(yml.load(f), headers, expected_status_code)
|
2021-11-19 20:05:01 +03:00
|
|
|
|
2018-09-18 09:21:57 +03:00
|
|
|
def teardown(self):
|
|
|
|
self.http.close()
|
|
|
|
self.engine.dispose()
|
2021-04-27 08:34:14 +03:00
|
|
|
# Close websockets:
|
|
|
|
self.ws_client.teardown()
|
|
|
|
self.ws_client_v1alpha1.teardown()
|
|
|
|
self.ws_client_relay.teardown()
|
2021-08-24 19:25:12 +03:00
|
|
|
self.ws_client_graphql_ws.teardown()
|
2021-07-16 19:08:23 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v1GraphqlExplain(self, q, headers = {}, expected_status_code = 200):
|
|
|
|
return self.execute_query(q, '/v1/graphql/explain', headers, expected_status_code)
|