2018-09-18 09:21:57 +03:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
2022-08-15 16:14:55 +03:00
|
|
|
import graphql
|
2018-11-23 16:02:46 +03:00
|
|
|
from http import HTTPStatus
|
2018-09-18 09:21:57 +03:00
|
|
|
import http.server
|
|
|
|
import json
|
2022-08-15 16:14:55 +03:00
|
|
|
import os
|
2018-09-18 09:21:57 +03:00
|
|
|
import queue
|
2019-04-08 10:22:38 +03:00
|
|
|
import random
|
2020-02-13 20:38:23 +03:00
|
|
|
import re
|
2018-11-23 16:02:46 +03:00
|
|
|
import requests
|
2022-08-15 16:14:55 +03:00
|
|
|
import ruamel.yaml as yaml
|
|
|
|
from ruamel.yaml.comments import CommentedMap as OrderedDict # to avoid '!!omap' in yaml
|
|
|
|
import socket
|
|
|
|
import socketserver
|
|
|
|
import sqlalchemy
|
|
|
|
import sqlalchemy.schema
|
|
|
|
import string
|
|
|
|
import subprocess
|
|
|
|
import threading
|
|
|
|
import time
|
2022-08-24 11:30:55 +03:00
|
|
|
from typing import Any
|
2022-08-15 16:14:55 +03:00
|
|
|
from urllib.parse import urlparse
|
2018-11-23 16:02:46 +03:00
|
|
|
import websocket
|
2022-08-15 16:14:55 +03:00
|
|
|
|
2018-11-26 16:08:16 +03:00
|
|
|
import graphql_server
|
2018-09-18 09:21:57 +03:00
|
|
|
|
2019-11-29 08:14:26 +03:00
|
|
|
# pytest has removed the global pytest.config
|
|
|
|
# As a solution to this we are going to store it in PyTestConf.config
|
|
|
|
class PytestConf():
|
2022-08-24 11:30:55 +03:00
|
|
|
config: Any
|
2019-11-29 08:14:26 +03:00
|
|
|
pass
|
2018-10-30 12:21:58 +03:00
|
|
|
|
2018-09-18 09:21:57 +03:00
|
|
|
class HGECtxError(Exception):
|
|
|
|
pass
|
|
|
|
|
2021-08-24 19:25:12 +03:00
|
|
|
# NOTE: use this to generate a GraphQL client that uses the `Apollo`(subscription-transport-ws) sub-protocol
|
2019-05-10 09:05:11 +03:00
|
|
|
class GQLWsClient():
|
2018-10-30 12:21:58 +03:00
|
|
|
|
2019-05-10 09:05:11 +03:00
|
|
|
def __init__(self, hge_ctx, endpoint):
|
2019-04-08 10:22:38 +03:00
|
|
|
self.hge_ctx = hge_ctx
|
|
|
|
self.ws_queue = queue.Queue(maxsize=-1)
|
2019-05-10 09:05:11 +03:00
|
|
|
self.ws_url = urlparse(hge_ctx.hge_url)._replace(scheme='ws',
|
|
|
|
path=endpoint)
|
2019-04-08 10:22:38 +03:00
|
|
|
self.create_conn()
|
|
|
|
|
|
|
|
def create_conn(self):
|
|
|
|
self.ws_queue.queue.clear()
|
|
|
|
self.ws_id_query_queues = dict()
|
|
|
|
self.ws_active_query_ids = set()
|
2019-08-12 12:44:29 +03:00
|
|
|
|
|
|
|
self.connected_event = threading.Event()
|
|
|
|
self.init_done = False
|
|
|
|
self.is_closing = False
|
|
|
|
self.remote_closed = False
|
|
|
|
|
|
|
|
self._ws = websocket.WebSocketApp(self.ws_url.geturl(),
|
|
|
|
on_open=self._on_open, on_message=self._on_message, on_close=self._on_close)
|
2019-04-08 10:22:38 +03:00
|
|
|
self.wst = threading.Thread(target=self._ws.run_forever)
|
|
|
|
self.wst.daemon = True
|
|
|
|
self.wst.start()
|
|
|
|
|
|
|
|
def recreate_conn(self):
|
|
|
|
self.teardown()
|
|
|
|
self.create_conn()
|
|
|
|
|
2019-08-12 12:44:29 +03:00
|
|
|
def wait_for_connection(self, timeout=10):
|
|
|
|
assert not self.is_closing
|
|
|
|
assert self.connected_event.wait(timeout=timeout)
|
|
|
|
|
2019-04-08 10:22:38 +03:00
|
|
|
def get_ws_event(self, timeout):
|
|
|
|
return self.ws_queue.get(timeout=timeout)
|
|
|
|
|
|
|
|
def has_ws_query_events(self, query_id):
|
|
|
|
return not self.ws_id_query_queues[query_id].empty()
|
|
|
|
|
|
|
|
def get_ws_query_event(self, query_id, timeout):
|
|
|
|
return self.ws_id_query_queues[query_id].get(timeout=timeout)
|
|
|
|
|
|
|
|
def send(self, frame):
|
2019-08-12 12:44:29 +03:00
|
|
|
self.wait_for_connection()
|
2019-04-08 10:22:38 +03:00
|
|
|
if frame.get('type') == 'stop':
|
|
|
|
self.ws_active_query_ids.discard( frame.get('id') )
|
|
|
|
elif frame.get('type') == 'start' and 'id' in frame:
|
|
|
|
self.ws_id_query_queues[frame['id']] = queue.Queue(maxsize=-1)
|
|
|
|
self._ws.send(json.dumps(frame))
|
|
|
|
|
|
|
|
def init_as_admin(self):
|
|
|
|
headers={}
|
|
|
|
if self.hge_ctx.hge_key:
|
|
|
|
headers = {'x-hasura-admin-secret': self.hge_ctx.hge_key}
|
|
|
|
self.init(headers)
|
|
|
|
|
|
|
|
def init(self, headers={}):
|
|
|
|
payload = {'type': 'connection_init', 'payload': {}}
|
|
|
|
|
|
|
|
if headers and len(headers) > 0:
|
|
|
|
payload['payload']['headers'] = headers
|
|
|
|
|
|
|
|
self.send(payload)
|
|
|
|
ev = self.get_ws_event(3)
|
|
|
|
assert ev['type'] == 'connection_ack', ev
|
|
|
|
self.init_done = True
|
|
|
|
|
|
|
|
def stop(self, query_id):
|
|
|
|
data = {'id': query_id, 'type': 'stop'}
|
|
|
|
self.send(data)
|
|
|
|
self.ws_active_query_ids.discard(query_id)
|
|
|
|
|
|
|
|
def gen_id(self, size=6, chars=string.ascii_letters + string.digits):
|
2019-05-10 09:05:11 +03:00
|
|
|
new_id = ''.join(random.choice(chars) for _ in range(size))
|
|
|
|
if new_id in self.ws_active_query_ids:
|
|
|
|
return self.gen_id(size, chars)
|
|
|
|
return new_id
|
2019-04-08 10:22:38 +03:00
|
|
|
|
|
|
|
def send_query(self, query, query_id=None, headers={}, timeout=60):
|
|
|
|
graphql.parse(query['query'])
|
|
|
|
if headers and len(headers) > 0:
|
|
|
|
#Do init If headers are provided
|
|
|
|
self.init(headers)
|
|
|
|
elif not self.init_done:
|
|
|
|
self.init()
|
|
|
|
if query_id == None:
|
|
|
|
query_id = self.gen_id()
|
|
|
|
frame = {
|
|
|
|
'id': query_id,
|
|
|
|
'type': 'start',
|
|
|
|
'payload': query,
|
|
|
|
}
|
|
|
|
self.ws_active_query_ids.add(query_id)
|
|
|
|
self.send(frame)
|
|
|
|
while True:
|
|
|
|
yield self.get_ws_query_event(query_id, timeout)
|
|
|
|
|
|
|
|
def _on_open(self):
|
2019-08-12 12:44:29 +03:00
|
|
|
if not self.is_closing:
|
|
|
|
self.connected_event.set()
|
2019-04-08 10:22:38 +03:00
|
|
|
|
|
|
|
def _on_message(self, message):
|
2019-09-04 18:02:35 +03:00
|
|
|
# NOTE: make sure we preserve key ordering so we can test the ordering
|
|
|
|
# properties in the graphql spec properly
|
|
|
|
json_msg = json.loads(message, object_pairs_hook=OrderedDict)
|
2019-04-08 10:22:38 +03:00
|
|
|
if 'id' in json_msg:
|
|
|
|
query_id = json_msg['id']
|
|
|
|
if json_msg.get('type') == 'stop':
|
|
|
|
#Remove from active queries list
|
|
|
|
self.ws_active_query_ids.discard( query_id )
|
|
|
|
if not query_id in self.ws_id_query_queues:
|
|
|
|
self.ws_id_query_queues[json_msg['id']] = queue.Queue(maxsize=-1)
|
|
|
|
#Put event in the correponding query_queue
|
|
|
|
self.ws_id_query_queues[query_id].put(json_msg)
|
2019-08-12 12:44:29 +03:00
|
|
|
elif json_msg['type'] != 'ka':
|
2019-04-08 10:22:38 +03:00
|
|
|
#Put event in the main queue
|
|
|
|
self.ws_queue.put(json_msg)
|
|
|
|
|
|
|
|
def _on_close(self):
|
|
|
|
self.remote_closed = True
|
|
|
|
self.init_done = False
|
|
|
|
|
|
|
|
def teardown(self):
|
2019-08-12 12:44:29 +03:00
|
|
|
self.is_closing = True
|
2019-04-08 10:22:38 +03:00
|
|
|
if not self.remote_closed:
|
|
|
|
self._ws.close()
|
|
|
|
self.wst.join()
|
|
|
|
|
2021-08-24 19:25:12 +03:00
|
|
|
# NOTE: use this to generate a GraphQL client that uses the `graphql-ws` sub-protocol
|
|
|
|
class GraphQLWSClient():
|
|
|
|
|
|
|
|
def __init__(self, hge_ctx, endpoint):
|
|
|
|
self.hge_ctx = hge_ctx
|
|
|
|
self.ws_queue = queue.Queue(maxsize=-1)
|
|
|
|
self.ws_url = urlparse(hge_ctx.hge_url)._replace(scheme='ws',
|
|
|
|
path=endpoint)
|
|
|
|
self.create_conn()
|
|
|
|
|
|
|
|
def get_queue(self):
|
|
|
|
return self.ws_queue.queue
|
|
|
|
|
|
|
|
def clear_queue(self):
|
|
|
|
self.ws_queue.queue.clear()
|
|
|
|
|
|
|
|
def create_conn(self):
|
|
|
|
self.ws_queue.queue.clear()
|
|
|
|
self.ws_id_query_queues = dict()
|
|
|
|
self.ws_active_query_ids = set()
|
|
|
|
|
|
|
|
self.connected_event = threading.Event()
|
|
|
|
self.init_done = False
|
|
|
|
self.is_closing = False
|
|
|
|
self.remote_closed = False
|
|
|
|
|
|
|
|
self._ws = websocket.WebSocketApp(self.ws_url.geturl(),
|
|
|
|
on_open=self._on_open, on_message=self._on_message, on_close=self._on_close, subprotocols=["graphql-transport-ws"])
|
|
|
|
self.wst = threading.Thread(target=self._ws.run_forever)
|
|
|
|
self.wst.daemon = True
|
|
|
|
self.wst.start()
|
|
|
|
|
|
|
|
def recreate_conn(self):
|
|
|
|
self.teardown()
|
|
|
|
self.create_conn()
|
|
|
|
|
|
|
|
def wait_for_connection(self, timeout=10):
|
|
|
|
assert not self.is_closing
|
|
|
|
assert self.connected_event.wait(timeout=timeout)
|
|
|
|
|
|
|
|
def get_ws_event(self, timeout):
|
|
|
|
return self.ws_queue.get(timeout=timeout)
|
|
|
|
|
|
|
|
def has_ws_query_events(self, query_id):
|
|
|
|
return not self.ws_id_query_queues[query_id].empty()
|
|
|
|
|
|
|
|
def get_ws_query_event(self, query_id, timeout):
|
|
|
|
print("HELLO", self.ws_active_query_ids)
|
|
|
|
return self.ws_id_query_queues[query_id].get(timeout=timeout)
|
|
|
|
|
|
|
|
def send(self, frame):
|
|
|
|
self.wait_for_connection()
|
|
|
|
if frame.get('type') == 'complete':
|
|
|
|
self.ws_active_query_ids.discard( frame.get('id') )
|
|
|
|
elif frame.get('type') == 'subscribe' and 'id' in frame:
|
|
|
|
self.ws_id_query_queues[frame['id']] = queue.Queue(maxsize=-1)
|
|
|
|
self._ws.send(json.dumps(frame))
|
|
|
|
|
|
|
|
def init_as_admin(self):
|
|
|
|
headers={}
|
|
|
|
if self.hge_ctx.hge_key:
|
|
|
|
headers = {'x-hasura-admin-secret': self.hge_ctx.hge_key}
|
|
|
|
self.init(headers)
|
|
|
|
|
|
|
|
def init(self, headers={}):
|
|
|
|
payload = {'type': 'connection_init', 'payload': {}}
|
|
|
|
|
|
|
|
if headers and len(headers) > 0:
|
|
|
|
payload['payload']['headers'] = headers
|
|
|
|
|
|
|
|
self.send(payload)
|
|
|
|
ev = self.get_ws_event(5)
|
|
|
|
assert ev['type'] == 'connection_ack', ev
|
|
|
|
self.init_done = True
|
|
|
|
|
|
|
|
def stop(self, query_id):
|
|
|
|
data = {'id': query_id, 'type': 'complete'}
|
|
|
|
self.send(data)
|
|
|
|
self.ws_active_query_ids.discard(query_id)
|
|
|
|
|
|
|
|
def gen_id(self, size=6, chars=string.ascii_letters + string.digits):
|
|
|
|
new_id = ''.join(random.choice(chars) for _ in range(size))
|
|
|
|
if new_id in self.ws_active_query_ids:
|
|
|
|
return self.gen_id(size, chars)
|
|
|
|
return new_id
|
|
|
|
|
|
|
|
def send_query(self, query, query_id=None, headers={}, timeout=60):
|
|
|
|
graphql.parse(query['query'])
|
|
|
|
if headers and len(headers) > 0:
|
|
|
|
#Do init If headers are provided
|
|
|
|
self.clear_queue()
|
|
|
|
self.init(headers)
|
|
|
|
elif not self.init_done:
|
|
|
|
self.init()
|
|
|
|
if query_id == None:
|
|
|
|
query_id = self.gen_id()
|
|
|
|
frame = {
|
|
|
|
'id': query_id,
|
|
|
|
'type': 'subscribe',
|
|
|
|
'payload': query,
|
|
|
|
}
|
|
|
|
self.ws_active_query_ids.add(query_id)
|
|
|
|
self.send(frame)
|
|
|
|
while True:
|
|
|
|
yield self.get_ws_query_event(query_id, timeout)
|
|
|
|
|
|
|
|
def _on_open(self):
|
|
|
|
if not self.is_closing:
|
|
|
|
self.connected_event.set()
|
|
|
|
|
|
|
|
def _on_message(self, message):
|
|
|
|
# NOTE: make sure we preserve key ordering so we can test the ordering
|
|
|
|
# properties in the graphql spec properly
|
|
|
|
json_msg = json.loads(message, object_pairs_hook=OrderedDict)
|
|
|
|
if json_msg['type'] == 'ping':
|
|
|
|
new_msg = json_msg
|
|
|
|
new_msg['type'] = 'pong'
|
|
|
|
self.send(json.dumps(new_msg))
|
|
|
|
return
|
2021-10-05 15:28:38 +03:00
|
|
|
|
2021-08-24 19:25:12 +03:00
|
|
|
if 'id' in json_msg:
|
|
|
|
query_id = json_msg['id']
|
|
|
|
if json_msg.get('type') == 'complete':
|
|
|
|
#Remove from active queries list
|
|
|
|
self.ws_active_query_ids.discard( query_id )
|
|
|
|
if not query_id in self.ws_id_query_queues:
|
|
|
|
self.ws_id_query_queues[json_msg['id']] = queue.Queue(maxsize=-1)
|
|
|
|
#Put event in the correponding query_queue
|
|
|
|
self.ws_id_query_queues[query_id].put(json_msg)
|
2021-10-05 15:28:38 +03:00
|
|
|
|
2021-08-24 19:25:12 +03:00
|
|
|
if json_msg['type'] != 'ping':
|
|
|
|
self.ws_queue.put(json_msg)
|
|
|
|
|
|
|
|
def _on_close(self):
|
|
|
|
self.remote_closed = True
|
|
|
|
self.init_done = False
|
2021-10-05 15:28:38 +03:00
|
|
|
|
2021-08-24 19:25:12 +03:00
|
|
|
def get_conn_close_state(self):
|
|
|
|
return self.remote_closed or self.is_closing
|
|
|
|
|
|
|
|
def teardown(self):
|
|
|
|
self.is_closing = True
|
|
|
|
if not self.remote_closed:
|
|
|
|
self._ws.close()
|
|
|
|
self.wst.join()
|
2020-02-13 20:38:23 +03:00
|
|
|
|
|
|
|
class ActionsWebhookHandler(http.server.BaseHTTPRequestHandler):
|
|
|
|
|
|
|
|
def do_GET(self):
|
|
|
|
self.send_response(HTTPStatus.OK)
|
|
|
|
self.end_headers()
|
|
|
|
|
|
|
|
def do_POST(self):
|
|
|
|
content_len = self.headers.get('Content-Length')
|
|
|
|
req_body = self.rfile.read(int(content_len)).decode("utf-8")
|
|
|
|
self.req_json = json.loads(req_body)
|
|
|
|
req_headers = self.headers
|
|
|
|
req_path = self.path
|
|
|
|
self.log_message(json.dumps(self.req_json))
|
|
|
|
|
|
|
|
if req_path == "/create-user":
|
|
|
|
resp, status = self.create_user()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2020-09-16 12:53:17 +03:00
|
|
|
elif req_path == "/create-user-timeout":
|
2020-11-12 12:25:48 +03:00
|
|
|
time.sleep(3)
|
2020-09-16 12:53:17 +03:00
|
|
|
resp, status = self.create_user()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2020-02-13 20:38:23 +03:00
|
|
|
elif req_path == "/create-users":
|
|
|
|
resp, status = self.create_users()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2021-12-16 02:51:52 +03:00
|
|
|
elif req_path == "/create-user-nested":
|
|
|
|
resp, status = self.create_user_nested()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2020-03-20 09:46:45 +03:00
|
|
|
elif req_path == "/mirror-action":
|
|
|
|
resp, status = self.mirror_action()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2022-01-31 09:49:11 +03:00
|
|
|
elif req_path == "/mirror-headers":
|
|
|
|
resp, status = self.mirror_headers()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2020-04-16 10:25:19 +03:00
|
|
|
elif req_path == "/get-user-by-email":
|
|
|
|
resp, status = self.get_users_by_email(True)
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2021-12-16 02:51:52 +03:00
|
|
|
elif req_path == "/get-user-by-email-nested":
|
|
|
|
resp, status = self.get_users_by_email_nested(True)
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2020-04-16 10:25:19 +03:00
|
|
|
elif req_path == "/get-users-by-email":
|
|
|
|
resp, status = self.get_users_by_email(False)
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2021-12-16 02:51:52 +03:00
|
|
|
elif req_path == "/get-users-by-email-nested":
|
|
|
|
resp, status = self.get_users_by_email_nested(False)
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2021-09-17 10:43:43 +03:00
|
|
|
elif req_path == "/intentional-error":
|
|
|
|
resp, status = self.intentional_error()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2021-10-11 09:55:05 +03:00
|
|
|
elif req_path == "/null-response":
|
|
|
|
resp, status = self.null_response()
|
|
|
|
self._send_response(status, resp)
|
2022-04-19 20:48:53 +03:00
|
|
|
|
2022-02-15 17:39:35 +03:00
|
|
|
elif req_path == "/scalar-response":
|
|
|
|
self._send_response(HTTPStatus.OK, "some-string")
|
2022-06-03 12:47:10 +03:00
|
|
|
|
|
|
|
elif req_path == "/json-response":
|
|
|
|
resp, status = self.json_response()
|
|
|
|
self._send_response(status, resp)
|
2022-07-05 21:00:08 +03:00
|
|
|
|
2022-06-03 12:47:10 +03:00
|
|
|
elif req_path == "/custom-scalar-array-response":
|
|
|
|
resp, status = self.custom_scalar_array_response()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2022-05-31 08:22:46 +03:00
|
|
|
elif req_path == "/scalar-array-response":
|
|
|
|
self._send_response(HTTPStatus.OK, ["foo", "bar", None])
|
2021-10-11 09:55:05 +03:00
|
|
|
|
2021-12-16 02:51:52 +03:00
|
|
|
elif req_path == "/recursive-output":
|
|
|
|
resp, status = self.recursive_output()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2022-01-19 07:46:42 +03:00
|
|
|
elif req_path == "/get-results":
|
|
|
|
resp, status = self.get_results()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2022-04-18 12:58:15 +03:00
|
|
|
elif req_path == "/typed-nested-null":
|
|
|
|
self._send_response(
|
|
|
|
HTTPStatus.OK,
|
|
|
|
self.get_typed_nested_null()
|
|
|
|
)
|
|
|
|
|
|
|
|
elif req_path == "/typed-nested-null-wrong-field":
|
|
|
|
self._send_response(
|
|
|
|
HTTPStatus.OK,
|
|
|
|
self.get_typed_nested_null_wrong_field()
|
|
|
|
)
|
|
|
|
|
2022-04-19 20:48:53 +03:00
|
|
|
elif req_path == "/get_messages":
|
|
|
|
resp, status = self.get_messages()
|
|
|
|
self._send_response(status, resp)
|
|
|
|
|
2020-02-13 20:38:23 +03:00
|
|
|
else:
|
|
|
|
self.send_response(HTTPStatus.NO_CONTENT)
|
|
|
|
self.end_headers()
|
|
|
|
|
2021-09-17 10:43:43 +03:00
|
|
|
def intentional_error(self):
|
|
|
|
blob = self.req_json['input']['blob']
|
|
|
|
return blob, HTTPStatus.BAD_REQUEST
|
|
|
|
|
2020-02-13 20:38:23 +03:00
|
|
|
def create_user(self):
|
|
|
|
email_address = self.req_json['input']['email']
|
|
|
|
name = self.req_json['input']['name']
|
|
|
|
|
|
|
|
if not self.check_email(email_address):
|
|
|
|
response = {
|
|
|
|
'message': 'Given email address is not valid',
|
|
|
|
'code': 'invalid-email'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
|
|
|
|
gql_query = '''
|
|
|
|
mutation ($email: String! $name: String!) {
|
|
|
|
insert_user_one(object: {email: $email, name: $name}){
|
|
|
|
id
|
|
|
|
}
|
|
|
|
}
|
|
|
|
'''
|
|
|
|
query = {
|
|
|
|
'query': gql_query,
|
|
|
|
'variables': {
|
|
|
|
'email': email_address,
|
|
|
|
'name': name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
code, resp = self.execute_query(query)
|
|
|
|
if code != 200 or 'data' not in resp:
|
|
|
|
response = {
|
|
|
|
'message': 'GraphQL query execution failed',
|
|
|
|
'code': 'unexpected'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
|
|
|
|
response = resp['data']['insert_user_one']
|
2021-09-29 12:53:35 +03:00
|
|
|
|
2020-02-13 20:38:23 +03:00
|
|
|
return response, HTTPStatus.OK
|
|
|
|
|
|
|
|
def create_users(self):
|
|
|
|
inputs = self.req_json['input']['users']
|
|
|
|
for input in inputs:
|
|
|
|
email_address = input['email']
|
|
|
|
if not self.check_email(email_address):
|
|
|
|
response = {
|
|
|
|
'message': 'Email address is not valid: ' + email_address,
|
|
|
|
'code': 'invalid-email'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
|
|
|
|
gql_query = '''
|
|
|
|
mutation ($insert_inputs: [user_insert_input!]!){
|
|
|
|
insert_user(objects: $insert_inputs){
|
|
|
|
returning{
|
|
|
|
id
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
'''
|
|
|
|
query = {
|
|
|
|
'query': gql_query,
|
|
|
|
'variables': {
|
|
|
|
'insert_inputs': inputs
|
|
|
|
}
|
|
|
|
}
|
|
|
|
code, resp = self.execute_query(query)
|
|
|
|
if code != 200 or 'data' not in resp:
|
|
|
|
response = {
|
|
|
|
'message': 'GraphQL query execution failed',
|
|
|
|
'code': 'unexpected'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
|
|
|
|
response = resp['data']['insert_user']['returning']
|
|
|
|
return response, HTTPStatus.OK
|
|
|
|
|
2021-12-16 02:51:52 +03:00
|
|
|
def create_user_nested(self):
|
|
|
|
email_address = self.req_json['input']['email']
|
|
|
|
name = self.req_json['input']['name']
|
|
|
|
|
|
|
|
if not self.check_email(email_address):
|
|
|
|
response = {
|
|
|
|
'message': 'Given email address is not valid',
|
|
|
|
'code': 'invalid-email'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
|
|
|
|
gql_query = '''
|
|
|
|
mutation ($email: String! $name: String!) {
|
|
|
|
insert_user_one(object: {email: $email, name: $name}){
|
|
|
|
id
|
|
|
|
}
|
|
|
|
}
|
|
|
|
'''
|
|
|
|
query = {
|
|
|
|
'query': gql_query,
|
|
|
|
'variables': {
|
|
|
|
'email': email_address,
|
|
|
|
'name': name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
code, resp = self.execute_query(query)
|
|
|
|
if code != 200 or 'data' not in resp:
|
|
|
|
response = {
|
|
|
|
'message': 'GraphQL query execution failed',
|
|
|
|
'code': 'unexpected'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
|
|
|
|
response = {
|
|
|
|
'userObj': resp['data']['insert_user_one']
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.OK
|
|
|
|
|
2022-04-19 20:48:53 +03:00
|
|
|
def get_messages(self):
|
|
|
|
response = [
|
|
|
|
{ "content": "baz", "user_name": "foo"},
|
|
|
|
{ "content": "foo", "user_name": "bar"}
|
|
|
|
]
|
|
|
|
return response, HTTPStatus.OK
|
|
|
|
|
2020-03-20 09:46:45 +03:00
|
|
|
def mirror_action(self):
|
|
|
|
response = self.req_json['input']['arg']
|
|
|
|
return response, HTTPStatus.OK
|
|
|
|
|
2022-01-31 09:49:11 +03:00
|
|
|
def mirror_headers(self):
|
|
|
|
response = {
|
|
|
|
'headers': list(map(lambda header: { 'name': header[0], 'value': header[1] }, self.headers.items()))
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.OK
|
|
|
|
|
2020-04-16 10:25:19 +03:00
|
|
|
def get_users_by_email(self, singleUser = False):
|
|
|
|
email = self.req_json['input']['email']
|
|
|
|
if not self.check_email(email):
|
|
|
|
response = {
|
|
|
|
'message': 'Given email address is not valid',
|
|
|
|
'code': 'invalid-email'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
gql_query = '''
|
|
|
|
query get_user($email:String!) {
|
|
|
|
user(where:{email:{_eq:$email}},order_by: {id: asc}) {
|
|
|
|
id
|
|
|
|
}
|
|
|
|
}
|
|
|
|
'''
|
|
|
|
query = {
|
|
|
|
'query': gql_query,
|
|
|
|
'variables':{
|
|
|
|
'email':email
|
|
|
|
}
|
|
|
|
}
|
|
|
|
code,resp = self.execute_query(query)
|
|
|
|
if code != 200 or 'data' not in resp:
|
|
|
|
response = {
|
|
|
|
'message': 'GraphQL query execution failed',
|
|
|
|
'code': 'unexpected'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.BAD_REQUEST
|
|
|
|
if singleUser:
|
|
|
|
return resp['data']['user'][0], HTTPStatus.OK
|
|
|
|
else:
|
|
|
|
return resp['data']['user'], HTTPStatus.OK
|
2021-12-16 02:51:52 +03:00
|
|
|
|
|
|
|
def get_users_by_email_nested(self, singleUser = False):
|
|
|
|
resp, status = self.get_users_by_email(singleUser)
|
|
|
|
def make_nested_out_object(outObj):
|
|
|
|
address = { 'city': 'New York', 'country': 'USA'}
|
|
|
|
outObj['address'] = address
|
|
|
|
addresses = [{'city': 'Bangalore', 'country': 'India'}, {'city': 'Melbourne', 'country': 'Australia'}]
|
|
|
|
outObj['addresses'] = addresses
|
|
|
|
outObj['user_id'] = { 'id': outObj['id']}
|
|
|
|
return outObj
|
|
|
|
|
|
|
|
if status != HTTPStatus.OK:
|
|
|
|
return resp, status
|
|
|
|
if singleUser:
|
|
|
|
return make_nested_out_object(resp), status
|
|
|
|
else:
|
|
|
|
return map(make_nested_out_object, resp), status
|
|
|
|
|
2022-04-18 12:58:15 +03:00
|
|
|
def get_typed_nested_null(self):
|
|
|
|
return {
|
|
|
|
'id': 1,
|
|
|
|
'child': None
|
|
|
|
}
|
|
|
|
|
|
|
|
def get_typed_nested_null_wrong_field(self):
|
|
|
|
return {
|
|
|
|
'id': None,
|
|
|
|
'child': None
|
|
|
|
}
|
|
|
|
|
2021-10-11 09:55:05 +03:00
|
|
|
def null_response(self):
|
|
|
|
response = None
|
|
|
|
return response, HTTPStatus.OK
|
2022-07-05 21:00:08 +03:00
|
|
|
|
2022-06-03 12:47:10 +03:00
|
|
|
def json_response(self):
|
|
|
|
response = {
|
|
|
|
'foo': 'bar'
|
|
|
|
}
|
|
|
|
return response, HTTPStatus.OK
|
2022-07-05 21:00:08 +03:00
|
|
|
|
2022-06-03 12:47:10 +03:00
|
|
|
def custom_scalar_array_response(self):
|
|
|
|
response = [{
|
|
|
|
'foo': 'bar'
|
|
|
|
}]
|
|
|
|
return response, HTTPStatus.OK
|
2020-04-16 10:25:19 +03:00
|
|
|
|
2021-12-16 02:51:52 +03:00
|
|
|
def recursive_output(self):
|
|
|
|
return {
|
|
|
|
'direct': {'id': 1, 'this': {'id': 2, 'this': {'id': 3 }}},
|
|
|
|
'list': {'id': 1, 'these': [{'id': 2, 'these': [{'id': 3}]}, {'id': 4}]},
|
|
|
|
'mutual': {'id': 1, 'that': {'id': 2, 'other': {'id': 3, 'that': {'id': 4}}}}
|
|
|
|
}, HTTPStatus.OK
|
2020-03-20 09:46:45 +03:00
|
|
|
|
2022-01-19 07:46:42 +03:00
|
|
|
def get_results(self):
|
|
|
|
return {
|
|
|
|
'result_ids': [1,2,3,4]
|
|
|
|
}, HTTPStatus.OK
|
|
|
|
|
2020-02-13 20:38:23 +03:00
|
|
|
def check_email(self, email):
|
2022-08-24 11:30:55 +03:00
|
|
|
regex = '^\\w+([\\.-]?\\w+)*@\\w+([\\.-]?\\w+)*(\\.\\w{2,3})+$'
|
2020-02-13 20:38:23 +03:00
|
|
|
return re.search(regex,email)
|
|
|
|
|
|
|
|
def execute_query(self, query):
|
|
|
|
headers = {}
|
|
|
|
admin_secret = self.hge_ctx.hge_key
|
|
|
|
if admin_secret is not None:
|
|
|
|
headers['X-Hasura-Admin-Secret'] = admin_secret
|
|
|
|
code, resp, _ = self.hge_ctx.anyq('/v1/graphql', query, headers)
|
|
|
|
self.log_message(json.dumps(resp))
|
|
|
|
return code, resp
|
|
|
|
|
|
|
|
def _send_response(self, status, body):
|
2020-04-24 10:55:51 +03:00
|
|
|
self.log_request(status)
|
|
|
|
self.send_response_only(status)
|
2020-02-13 20:38:23 +03:00
|
|
|
self.send_header('Content-Type', 'application/json')
|
2020-03-20 09:46:45 +03:00
|
|
|
self.send_header('Set-Cookie', 'abcd')
|
2020-02-13 20:38:23 +03:00
|
|
|
self.end_headers()
|
|
|
|
self.wfile.write(json.dumps(body).encode("utf-8"))
|
|
|
|
|
|
|
|
|
|
|
|
class ActionsWebhookServer(http.server.HTTPServer):
|
|
|
|
def __init__(self, hge_ctx, server_address):
|
|
|
|
handler = ActionsWebhookHandler
|
|
|
|
handler.hge_ctx = hge_ctx
|
|
|
|
super().__init__(server_address, handler)
|
|
|
|
|
|
|
|
def server_bind(self):
|
|
|
|
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
|
|
self.socket.bind(self.server_address)
|
|
|
|
|
2019-04-08 10:22:38 +03:00
|
|
|
class EvtsWebhookHandler(http.server.BaseHTTPRequestHandler):
|
2018-09-18 09:21:57 +03:00
|
|
|
def do_GET(self):
|
|
|
|
self.send_response(HTTPStatus.OK)
|
|
|
|
self.end_headers()
|
|
|
|
|
|
|
|
def do_POST(self):
|
2018-11-23 16:02:46 +03:00
|
|
|
content_len = self.headers.get('Content-Length')
|
|
|
|
req_body = self.rfile.read(int(content_len)).decode("utf-8")
|
|
|
|
req_json = json.loads(req_body)
|
|
|
|
req_headers = self.headers
|
|
|
|
req_path = self.path
|
|
|
|
self.log_message(json.dumps(req_json))
|
|
|
|
if req_path == "/fail":
|
2018-09-19 15:12:57 +03:00
|
|
|
self.send_response(HTTPStatus.INTERNAL_SERVER_ERROR)
|
|
|
|
self.end_headers()
|
2021-04-27 08:34:14 +03:00
|
|
|
# This endpoint just sleeps for 2 seconds:
|
|
|
|
elif req_path == "/sleep_2s":
|
|
|
|
time.sleep(2)
|
2019-02-14 10:37:59 +03:00
|
|
|
self.send_response(HTTPStatus.NO_CONTENT)
|
|
|
|
self.end_headers()
|
2021-04-29 07:01:06 +03:00
|
|
|
# This is like a sleep endpoint above, but allowing us to decide
|
|
|
|
# externally when the webhook can return, with unblock()
|
|
|
|
elif req_path == "/block":
|
|
|
|
if not self.server.unblocked:
|
|
|
|
self.server.blocked_count += 1
|
|
|
|
with self.server.unblocked_wait:
|
|
|
|
# We expect this timeout never to be reached, but if
|
|
|
|
# something goes wrong the main thread will block forever:
|
|
|
|
self.server.unblocked_wait.wait(timeout=60)
|
|
|
|
self.server.blocked_count -= 1
|
|
|
|
self.send_response(HTTPStatus.NO_CONTENT)
|
|
|
|
self.end_headers()
|
2018-09-19 15:12:57 +03:00
|
|
|
else:
|
|
|
|
self.send_response(HTTPStatus.NO_CONTENT)
|
|
|
|
self.end_headers()
|
2021-04-27 08:34:14 +03:00
|
|
|
|
|
|
|
self.server.resp_queue.put({"path": req_path,
|
|
|
|
"body": req_json,
|
|
|
|
"headers": req_headers})
|
2018-09-19 15:12:57 +03:00
|
|
|
|
2020-03-11 09:27:31 +03:00
|
|
|
# A very slightly more sane/performant http server.
|
2020-03-20 09:46:45 +03:00
|
|
|
# See: https://stackoverflow.com/a/14089457/176841
|
2020-03-11 09:27:31 +03:00
|
|
|
#
|
|
|
|
# TODO use this elsewhere, or better yet: use e.g. bottle + waitress
|
2022-08-15 16:14:55 +03:00
|
|
|
class ThreadedHTTPServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
|
2020-03-11 09:27:31 +03:00
|
|
|
"""Handle requests in a separate thread."""
|
|
|
|
|
|
|
|
class EvtsWebhookServer(ThreadedHTTPServer):
|
2019-04-08 10:22:38 +03:00
|
|
|
def __init__(self, server_address):
|
2021-04-27 08:34:14 +03:00
|
|
|
# Data received from hasura by our web hook, pushed after it returns to the client:
|
|
|
|
self.resp_queue = queue.Queue()
|
2021-04-29 07:01:06 +03:00
|
|
|
# We use these two vars to coordinate unblocking in the /block route
|
|
|
|
self.unblocked = False
|
|
|
|
self.unblocked_wait = threading.Condition()
|
|
|
|
# ...and this for bookkeeping open blocked requests; this becomes
|
|
|
|
# meaningless after the first call to unblock()
|
|
|
|
self.blocked_count = 0
|
|
|
|
|
2019-04-08 10:22:38 +03:00
|
|
|
super().__init__(server_address, EvtsWebhookHandler)
|
2018-09-18 09:21:57 +03:00
|
|
|
|
2021-04-29 07:01:06 +03:00
|
|
|
# Unblock all webhook requests to /block. Idempotent.
|
|
|
|
def unblock(self):
|
|
|
|
self.unblocked = True
|
|
|
|
with self.unblocked_wait:
|
|
|
|
# NOTE: this only affects currently wait()-ing threads, future
|
|
|
|
# wait()s will block again (hence the simple self.unblocked flag)
|
|
|
|
self.unblocked_wait.notify_all()
|
|
|
|
|
2018-10-04 15:44:15 +03:00
|
|
|
def server_bind(self):
|
|
|
|
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
|
|
self.socket.bind(self.server_address)
|
|
|
|
|
2019-04-08 10:22:38 +03:00
|
|
|
def get_event(self, timeout):
|
|
|
|
return self.resp_queue.get(timeout=timeout)
|
|
|
|
|
2020-05-13 15:33:16 +03:00
|
|
|
def is_queue_empty(self):
|
|
|
|
return self.resp_queue.empty
|
|
|
|
|
2019-04-08 10:22:38 +03:00
|
|
|
def teardown(self):
|
|
|
|
self.evt_trggr_httpd.shutdown()
|
|
|
|
self.evt_trggr_httpd.server_close()
|
|
|
|
graphql_server.stop_server(self.graphql_server)
|
|
|
|
self.gql_srvr_thread.join()
|
|
|
|
self.evt_trggr_web_server.join()
|
|
|
|
|
|
|
|
class HGECtxGQLServer:
|
2020-03-26 14:52:20 +03:00
|
|
|
def __init__(self, hge_urls, port=5000):
|
2019-04-08 10:22:38 +03:00
|
|
|
# start the graphql server
|
2020-03-26 14:52:20 +03:00
|
|
|
self.port = port
|
|
|
|
self._hge_urls = hge_urls
|
|
|
|
self.is_running = False
|
|
|
|
self.start_server()
|
|
|
|
|
|
|
|
def start_server(self):
|
|
|
|
if not self.is_running:
|
|
|
|
self.graphql_server = graphql_server.create_server('127.0.0.1', self.port)
|
|
|
|
self.hge_urls = graphql_server.set_hge_urls(self._hge_urls)
|
|
|
|
self.gql_srvr_thread = threading.Thread(target=self.graphql_server.serve_forever)
|
|
|
|
self.gql_srvr_thread.start()
|
|
|
|
self.is_running = True
|
2019-04-08 10:22:38 +03:00
|
|
|
|
|
|
|
def teardown(self):
|
2020-03-26 14:52:20 +03:00
|
|
|
self.stop_server()
|
2019-04-08 10:22:38 +03:00
|
|
|
|
2020-03-26 14:52:20 +03:00
|
|
|
def stop_server(self):
|
|
|
|
if self.is_running:
|
|
|
|
graphql_server.stop_server(self.graphql_server)
|
|
|
|
self.gql_srvr_thread.join()
|
|
|
|
self.is_running = False
|
2018-10-30 12:21:58 +03:00
|
|
|
|
2018-09-18 09:21:57 +03:00
|
|
|
class HGECtx:
|
2019-04-08 10:22:38 +03:00
|
|
|
|
2020-02-13 12:14:02 +03:00
|
|
|
def __init__(self, hge_url, pg_url, config):
|
2018-09-18 09:21:57 +03:00
|
|
|
|
|
|
|
self.http = requests.Session()
|
2020-04-16 09:45:21 +03:00
|
|
|
self.hge_key = config.getoption('--hge-key')
|
2022-08-15 15:38:59 +03:00
|
|
|
self.timeout = 60
|
2019-04-08 10:22:38 +03:00
|
|
|
self.hge_url = hge_url
|
|
|
|
self.pg_url = pg_url
|
2020-02-13 12:14:02 +03:00
|
|
|
self.hge_webhook = config.getoption('--hge-webhook')
|
|
|
|
hge_jwt_key_file = config.getoption('--hge-jwt-key-file')
|
2018-10-28 21:27:49 +03:00
|
|
|
if hge_jwt_key_file is None:
|
|
|
|
self.hge_jwt_key = None
|
|
|
|
else:
|
|
|
|
with open(hge_jwt_key_file) as f:
|
|
|
|
self.hge_jwt_key = f.read()
|
2020-02-13 12:14:02 +03:00
|
|
|
self.hge_jwt_conf = config.getoption('--hge-jwt-conf')
|
2020-04-16 09:45:21 +03:00
|
|
|
if self.hge_jwt_conf is not None:
|
|
|
|
self.hge_jwt_conf_dict = json.loads(self.hge_jwt_conf)
|
2021-08-12 04:53:13 +03:00
|
|
|
self.hge_jwt_algo = self.hge_jwt_conf_dict["type"]
|
|
|
|
if self.hge_jwt_algo == "Ed25519":
|
|
|
|
self.hge_jwt_algo = "EdDSA"
|
2020-02-13 12:14:02 +03:00
|
|
|
self.webhook_insecure = config.getoption('--test-webhook-insecure')
|
|
|
|
self.metadata_disabled = config.getoption('--test-metadata-disabled')
|
2018-10-28 21:27:49 +03:00
|
|
|
self.may_skip_test_teardown = False
|
2021-01-29 08:48:17 +03:00
|
|
|
self.function_permissions = config.getoption('--test-function-permissions')
|
2022-04-22 22:53:12 +03:00
|
|
|
self.streaming_subscriptions = config.getoption('--test-streaming-subscriptions')
|
2018-09-18 09:21:57 +03:00
|
|
|
|
2021-04-27 08:34:14 +03:00
|
|
|
# This will be GC'd, but we also explicitly dispose() in teardown()
|
2022-08-15 16:14:55 +03:00
|
|
|
self.engine = sqlalchemy.create_engine(self.pg_url)
|
|
|
|
self.meta = sqlalchemy.schema.MetaData()
|
2018-11-26 16:08:16 +03:00
|
|
|
|
2020-02-13 12:14:02 +03:00
|
|
|
self.ws_read_cookie = config.getoption('--test-ws-init-cookie')
|
2019-03-04 10:46:53 +03:00
|
|
|
|
2020-02-13 12:14:02 +03:00
|
|
|
self.hge_scale_url = config.getoption('--test-hge-scale-url')
|
|
|
|
self.avoid_err_msg_checks = config.getoption('--avoid-error-message-checks')
|
2021-05-05 15:25:27 +03:00
|
|
|
self.pro_tests = config.getoption('--pro-tests')
|
2019-03-12 08:46:27 +03:00
|
|
|
|
2019-05-10 09:05:11 +03:00
|
|
|
self.ws_client = GQLWsClient(self, '/v1/graphql')
|
2021-04-27 08:34:14 +03:00
|
|
|
self.ws_client_v1alpha1 = GQLWsClient(self, '/v1alpha1/graphql')
|
|
|
|
self.ws_client_relay = GQLWsClient(self, '/v1beta1/relay')
|
2021-08-24 19:25:12 +03:00
|
|
|
self.ws_client_graphql_ws = GraphQLWSClient(self, '/v1/graphql')
|
2019-04-08 10:22:38 +03:00
|
|
|
|
2021-03-11 21:17:41 +03:00
|
|
|
self.backend = config.getoption('--backend')
|
2021-05-25 16:54:18 +03:00
|
|
|
self.default_backend = 'postgres'
|
|
|
|
self.is_default_backend = self.backend == self.default_backend
|
2021-03-11 21:17:41 +03:00
|
|
|
|
2019-11-26 15:14:21 +03:00
|
|
|
env_version = os.getenv('VERSION')
|
2022-04-19 20:48:53 +03:00
|
|
|
if env_version:
|
|
|
|
self.version = env_version
|
|
|
|
else:
|
|
|
|
# HGE version
|
|
|
|
result = subprocess.run(['../../scripts/get-version.sh'], shell=False, stdout=subprocess.PIPE, check=True)
|
|
|
|
self.version = result.stdout.decode('utf-8').strip()
|
2021-05-25 16:54:18 +03:00
|
|
|
if self.is_default_backend and not self.metadata_disabled and not config.getoption('--skip-schema-setup'):
|
2019-02-28 16:53:03 +03:00
|
|
|
try:
|
2022-07-05 21:00:08 +03:00
|
|
|
self.v2q_f("queries/" + self.backend_suffix("clear_db")+ ".yaml")
|
2019-02-28 16:53:03 +03:00
|
|
|
except requests.exceptions.RequestException as e:
|
|
|
|
self.teardown()
|
|
|
|
raise HGECtxError(repr(e))
|
2018-09-18 09:21:57 +03:00
|
|
|
|
2020-03-20 09:46:45 +03:00
|
|
|
# Postgres version
|
2021-05-25 16:54:18 +03:00
|
|
|
if self.is_default_backend:
|
2022-08-22 10:21:17 +03:00
|
|
|
with self.sql_query('show server_version_num') as result:
|
|
|
|
pg_version_text = result.first()['server_version_num']
|
2021-03-11 21:17:41 +03:00
|
|
|
self.pg_version = int(pg_version_text)
|
2020-03-20 09:46:45 +03:00
|
|
|
|
2018-09-18 09:21:57 +03:00
|
|
|
def reflect_tables(self):
|
|
|
|
self.meta.reflect(bind=self.engine)
|
|
|
|
|
2021-01-29 04:02:34 +03:00
|
|
|
def anyq(self, u, q, h, b = None, v = None):
|
2021-12-04 00:56:25 +03:00
|
|
|
|
2021-01-29 04:02:34 +03:00
|
|
|
resp = None
|
|
|
|
if v == 'GET':
|
|
|
|
resp = self.http.get(
|
|
|
|
self.hge_url + u,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2021-01-29 04:02:34 +03:00
|
|
|
)
|
2021-12-04 00:56:25 +03:00
|
|
|
elif v == 'POSTJSON' and b:
|
|
|
|
resp = self.http.post(
|
|
|
|
self.hge_url + u,
|
|
|
|
json=b,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2021-12-04 00:56:25 +03:00
|
|
|
)
|
2021-01-29 04:02:34 +03:00
|
|
|
elif v == 'POST' and b:
|
|
|
|
# TODO: Figure out why the requests are failing with a byte object passed in as `data`
|
|
|
|
resp = self.http.post(
|
|
|
|
self.hge_url + u,
|
|
|
|
data=b,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2021-12-04 00:56:25 +03:00
|
|
|
)
|
2021-01-29 04:02:34 +03:00
|
|
|
elif v == 'PATCH' and b:
|
|
|
|
resp = self.http.patch(
|
|
|
|
self.hge_url + u,
|
|
|
|
data=b,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2021-01-29 04:02:34 +03:00
|
|
|
)
|
|
|
|
elif v == 'PUT' and b:
|
|
|
|
resp = self.http.put(
|
|
|
|
self.hge_url + u,
|
|
|
|
data=b,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2021-01-29 04:02:34 +03:00
|
|
|
)
|
|
|
|
elif v == 'DELETE':
|
|
|
|
resp = self.http.delete(
|
|
|
|
self.hge_url + u,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2021-01-29 04:02:34 +03:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
resp = self.http.post(
|
|
|
|
self.hge_url + u,
|
|
|
|
json=q,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2021-01-29 04:02:34 +03:00
|
|
|
)
|
2019-09-04 18:02:35 +03:00
|
|
|
# NOTE: make sure we preserve key ordering so we can test the ordering
|
|
|
|
# properties in the graphql spec properly
|
2019-12-25 06:35:32 +03:00
|
|
|
# Returning response headers to get the request id from response
|
|
|
|
return resp.status_code, resp.json(object_pairs_hook=OrderedDict), resp.headers
|
2018-09-18 09:21:57 +03:00
|
|
|
|
2022-08-22 10:21:17 +03:00
|
|
|
# Executes a query, but does not return the result.
|
2019-02-28 16:53:03 +03:00
|
|
|
def sql(self, q):
|
2022-08-15 16:14:55 +03:00
|
|
|
with self.engine.connect() as conn:
|
2022-08-22 10:21:17 +03:00
|
|
|
conn.execute(q)
|
|
|
|
|
|
|
|
# This allows us to use the result of the query in a callback, before closing the connection.
|
|
|
|
def sql_query(self, q):
|
|
|
|
return HGECtxQuery(self.engine, q)
|
2019-02-28 16:53:03 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def execute_query(self, q, url_path, headers = {}, expected_status_code = 200):
|
2019-01-28 09:12:52 +03:00
|
|
|
h = headers.copy()
|
2018-10-28 21:27:49 +03:00
|
|
|
if self.hge_key is not None:
|
2019-02-14 12:37:47 +03:00
|
|
|
h['X-Hasura-Admin-Secret'] = self.hge_key
|
2018-09-18 09:21:57 +03:00
|
|
|
resp = self.http.post(
|
2021-01-18 13:38:34 +03:00
|
|
|
self.hge_url + url_path,
|
2018-10-28 21:27:49 +03:00
|
|
|
json=q,
|
2022-08-15 15:38:59 +03:00
|
|
|
headers=h,
|
|
|
|
timeout=self.timeout,
|
2018-09-18 09:21:57 +03:00
|
|
|
)
|
2019-09-04 18:02:35 +03:00
|
|
|
# NOTE: make sure we preserve key ordering so we can test the ordering
|
|
|
|
# properties in the graphql spec properly
|
2021-10-05 20:25:44 +03:00
|
|
|
# Don't assume `resp` is JSON object
|
|
|
|
resp_obj = {} if resp.status_code == 500 else resp.json(object_pairs_hook=OrderedDict)
|
2022-07-05 21:00:08 +03:00
|
|
|
if expected_status_code:
|
|
|
|
assert \
|
|
|
|
resp.status_code == expected_status_code, \
|
2022-08-15 16:14:55 +03:00
|
|
|
f'Expected {resp.status_code} to be {expected_status_code}.\nRequest:\n{json.dumps(q, indent=2)}\nResponse:\n{json.dumps(resp_obj, indent=2)}'
|
2022-07-05 21:00:08 +03:00
|
|
|
return resp_obj
|
2021-01-18 13:38:34 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v1q(self, q, headers = {}, expected_status_code = 200):
|
|
|
|
return self.execute_query(q, "/v1/query", headers, expected_status_code)
|
2021-01-18 13:38:34 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v1q_f(self, filepath, headers = {}, expected_status_code = 200):
|
|
|
|
with open(filepath) as f:
|
2019-11-14 19:32:11 +03:00
|
|
|
# NOTE: preserve ordering with ruamel
|
|
|
|
yml = yaml.YAML()
|
2022-07-05 21:00:08 +03:00
|
|
|
return self.v1q(yml.load(f), headers, expected_status_code)
|
2018-09-18 09:21:57 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v2q(self, q, headers = {}, expected_status_code = 200):
|
|
|
|
return self.execute_query(q, "/v2/query", headers, expected_status_code)
|
[Preview] Inherited roles for postgres read queries
fixes #3868
docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de`
Note:
To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`.
Introduction
------------
This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`.
How are select permissions of different roles are combined?
------------------------------------------------------------
A select permission includes 5 things:
1. Columns accessible to the role
2. Row selection filter
3. Limit
4. Allow aggregation
5. Scalar computed fields accessible to the role
Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`.
Let's say the following GraphQL query is queried with the `combined_roles` role.
```graphql
query {
employees {
address
phone
}
}
```
This will translate to the following SQL query:
```sql
select
(case when (P1 or P2) then address else null end) as address,
(case when P2 then phone else null end) as phone
from employee
where (P1 or P2)
```
The other parameters of the select permission will be combined in the following manner:
1. Limit - Minimum of the limits will be the limit of the inherited role
2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation
3. Scalar computed fields - same as table column fields, as in the above example
APIs for inherited roles:
----------------------
1. `add_inherited_role`
`add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments
`role_name`: the name of the inherited role to be added (String)
`role_set`: list of roles that need to be combined (Array of Strings)
Example:
```json
{
"type": "add_inherited_role",
"args": {
"role_name":"combined_user",
"role_set":[
"user",
"user1"
]
}
}
```
After adding the inherited role, the inherited role can be used like single roles like earlier
Note:
An inherited role can only be created with non-inherited/singular roles.
2. `drop_inherited_role`
The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument:
`role_name`: name of the inherited role to be dropped
Example:
```json
{
"type": "drop_inherited_role",
"args": {
"role_name":"combined_user"
}
}
```
Metadata
---------
The derived roles metadata will be included under the `experimental_features` key while exporting the metadata.
```json
{
"experimental_features": {
"derived_roles": [
{
"role_name": "manager_is_employee_too",
"role_set": [
"employee",
"manager"
]
}
]
}
}
```
Scope
------
Only postgres queries and subscriptions are supported in this PR.
Important points:
-----------------
1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done.
TODOs
-------
- [ ] Tests
- [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features
- [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?)
- [ ] Introspection test with a inherited role (nullability changes in a inherited role)
- [ ] Docs
- [ ] Changelog
Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com>
GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v2q_f(self, filepath, headers = {}, expected_status_code = 200):
|
|
|
|
with open(filepath) as f:
|
[Preview] Inherited roles for postgres read queries
fixes #3868
docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de`
Note:
To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`.
Introduction
------------
This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`.
How are select permissions of different roles are combined?
------------------------------------------------------------
A select permission includes 5 things:
1. Columns accessible to the role
2. Row selection filter
3. Limit
4. Allow aggregation
5. Scalar computed fields accessible to the role
Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`.
Let's say the following GraphQL query is queried with the `combined_roles` role.
```graphql
query {
employees {
address
phone
}
}
```
This will translate to the following SQL query:
```sql
select
(case when (P1 or P2) then address else null end) as address,
(case when P2 then phone else null end) as phone
from employee
where (P1 or P2)
```
The other parameters of the select permission will be combined in the following manner:
1. Limit - Minimum of the limits will be the limit of the inherited role
2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation
3. Scalar computed fields - same as table column fields, as in the above example
APIs for inherited roles:
----------------------
1. `add_inherited_role`
`add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments
`role_name`: the name of the inherited role to be added (String)
`role_set`: list of roles that need to be combined (Array of Strings)
Example:
```json
{
"type": "add_inherited_role",
"args": {
"role_name":"combined_user",
"role_set":[
"user",
"user1"
]
}
}
```
After adding the inherited role, the inherited role can be used like single roles like earlier
Note:
An inherited role can only be created with non-inherited/singular roles.
2. `drop_inherited_role`
The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument:
`role_name`: name of the inherited role to be dropped
Example:
```json
{
"type": "drop_inherited_role",
"args": {
"role_name":"combined_user"
}
}
```
Metadata
---------
The derived roles metadata will be included under the `experimental_features` key while exporting the metadata.
```json
{
"experimental_features": {
"derived_roles": [
{
"role_name": "manager_is_employee_too",
"role_set": [
"employee",
"manager"
]
}
]
}
}
```
Scope
------
Only postgres queries and subscriptions are supported in this PR.
Important points:
-----------------
1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done.
TODOs
-------
- [ ] Tests
- [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features
- [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?)
- [ ] Introspection test with a inherited role (nullability changes in a inherited role)
- [ ] Docs
- [ ] Changelog
Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com>
GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
|
|
|
# NOTE: preserve ordering with ruamel
|
|
|
|
yml = yaml.YAML()
|
2022-07-05 21:00:08 +03:00
|
|
|
return self.v2q(yml.load(f), headers, expected_status_code)
|
[Preview] Inherited roles for postgres read queries
fixes #3868
docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de`
Note:
To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`.
Introduction
------------
This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`.
How are select permissions of different roles are combined?
------------------------------------------------------------
A select permission includes 5 things:
1. Columns accessible to the role
2. Row selection filter
3. Limit
4. Allow aggregation
5. Scalar computed fields accessible to the role
Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`.
Let's say the following GraphQL query is queried with the `combined_roles` role.
```graphql
query {
employees {
address
phone
}
}
```
This will translate to the following SQL query:
```sql
select
(case when (P1 or P2) then address else null end) as address,
(case when P2 then phone else null end) as phone
from employee
where (P1 or P2)
```
The other parameters of the select permission will be combined in the following manner:
1. Limit - Minimum of the limits will be the limit of the inherited role
2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation
3. Scalar computed fields - same as table column fields, as in the above example
APIs for inherited roles:
----------------------
1. `add_inherited_role`
`add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments
`role_name`: the name of the inherited role to be added (String)
`role_set`: list of roles that need to be combined (Array of Strings)
Example:
```json
{
"type": "add_inherited_role",
"args": {
"role_name":"combined_user",
"role_set":[
"user",
"user1"
]
}
}
```
After adding the inherited role, the inherited role can be used like single roles like earlier
Note:
An inherited role can only be created with non-inherited/singular roles.
2. `drop_inherited_role`
The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument:
`role_name`: name of the inherited role to be dropped
Example:
```json
{
"type": "drop_inherited_role",
"args": {
"role_name":"combined_user"
}
}
```
Metadata
---------
The derived roles metadata will be included under the `experimental_features` key while exporting the metadata.
```json
{
"experimental_features": {
"derived_roles": [
{
"role_name": "manager_is_employee_too",
"role_set": [
"employee",
"manager"
]
}
]
}
}
```
Scope
------
Only postgres queries and subscriptions are supported in this PR.
Important points:
-----------------
1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done.
TODOs
-------
- [ ] Tests
- [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features
- [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?)
- [ ] Introspection test with a inherited role (nullability changes in a inherited role)
- [ ] Docs
- [ ] Changelog
Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com>
GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
|
|
|
|
2021-03-11 21:17:41 +03:00
|
|
|
def backend_suffix(self, filename):
|
2021-05-25 16:54:18 +03:00
|
|
|
if self.is_default_backend:
|
2021-03-11 21:17:41 +03:00
|
|
|
return filename
|
|
|
|
else:
|
|
|
|
return filename + "_" + self.backend
|
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v1metadataq(self, q, headers = {}, expected_status_code = 200):
|
|
|
|
return self.execute_query(q, "/v1/metadata", headers, expected_status_code)
|
2021-01-18 13:38:34 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v1metadataq_f(self, filepath, headers = {}, expected_status_code = 200):
|
|
|
|
with open(filepath) as f:
|
2021-01-18 13:38:34 +03:00
|
|
|
# NOTE: preserve ordering with ruamel
|
|
|
|
yml = yaml.YAML()
|
2022-07-05 21:00:08 +03:00
|
|
|
return self.v1metadataq(yml.load(f), headers, expected_status_code)
|
2021-01-18 13:38:34 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v1graphqlq(self, q, headers = {}, expected_status_code = 200):
|
|
|
|
return self.execute_query(q, "/v1/graphql", headers, expected_status_code)
|
2021-11-19 20:05:01 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v1graphql_f(self, filepath, headers = {}, expected_status_code = 200):
|
|
|
|
with open(filepath) as f:
|
2021-11-19 20:05:01 +03:00
|
|
|
# NOTE: preserve ordering with ruamel
|
|
|
|
yml = yaml.YAML()
|
2022-07-05 21:00:08 +03:00
|
|
|
return self.v1graphqlq(yml.load(f), headers, expected_status_code)
|
2021-11-19 20:05:01 +03:00
|
|
|
|
2018-09-18 09:21:57 +03:00
|
|
|
def teardown(self):
|
|
|
|
self.http.close()
|
|
|
|
self.engine.dispose()
|
2021-04-27 08:34:14 +03:00
|
|
|
# Close websockets:
|
|
|
|
self.ws_client.teardown()
|
|
|
|
self.ws_client_v1alpha1.teardown()
|
|
|
|
self.ws_client_relay.teardown()
|
2021-08-24 19:25:12 +03:00
|
|
|
self.ws_client_graphql_ws.teardown()
|
2021-07-16 19:08:23 +03:00
|
|
|
|
2022-07-05 21:00:08 +03:00
|
|
|
def v1GraphqlExplain(self, q, headers = {}, expected_status_code = 200):
|
|
|
|
return self.execute_query(q, '/v1/graphql/explain', headers, expected_status_code)
|
2022-08-22 10:21:17 +03:00
|
|
|
|
|
|
|
class HGECtxQuery:
|
|
|
|
def __init__(self, engine: sqlalchemy.engine.Engine, query: str):
|
|
|
|
self.engine = engine
|
|
|
|
self.query = query
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
self.connection = self.engine.connect()
|
|
|
|
return self.connection.execute(self.query)
|
|
|
|
|
|
|
|
def __exit__(self, _exc_type, _exc_val, _exc_tb):
|
|
|
|
if self.connection:
|
|
|
|
self.connection.close()
|