2021-02-23 20:37:27 +03:00
#!/usr/bin/env bash
2019-07-12 06:59:29 +03:00
set -euo pipefail
2020-04-10 00:00:28 +03:00
shopt -s globstar
2019-07-12 06:59:29 +03:00
2020-01-24 02:22:21 +03:00
# A development swiss army knife script. The goals are to:
#
# - encode some best practices and hard-won knowledge of quirks and corners of
# our tooling
# - simplify development; especially for new-comers; instead of writing a huge
# document describing how to do various dev tasks (or worse yet, not writing
# one), make it runnable
#
# This makes use of 'cabal.project.dev-sh*' files when building. See
# 'cabal.project.dev-sh.local'.
2021-04-08 18:02:56 +03:00
#
# The configuration for the containers of each backend is stored in
# separate files, see files in 'scripts/containers'
2019-07-12 06:59:29 +03:00
echo_pretty( ) {
echo " >>> $( tput setaf 2) $1 $( tput sgr0) "
}
2019-11-14 19:32:11 +03:00
echo_error( ) {
echo " >>> $( tput setaf 1) $1 $( tput sgr0) "
}
echo_warn( ) {
echo " >>> $( tput setaf 3) $1 $( tput sgr0) "
}
2019-07-12 06:59:29 +03:00
die_usage( ) {
cat <<EOL
A swiss-army script for local graphql-engine development
Usage: $0 <COMMAND>
Available COMMANDs:
2020-01-18 01:07:15 +03:00
graphql-engine
2021-04-08 18:02:56 +03:00
Launch graphql-engine, connecting to a database launched with
'$0 postgres' .
2019-07-12 06:59:29 +03:00
postgres
2021-04-08 18:02:56 +03:00
Launch a postgres container suitable for use with graphql-engine, watch its
logs, clean up nicely after
2019-07-12 06:59:29 +03:00
2021-02-23 20:37:27 +03:00
mssql
2021-04-08 18:02:56 +03:00
Launch a MSSQL container suitable for use with graphql-engine, watch its
logs, clean up nicely after
citus
Launch a Citus single-node container suitable for use with graphql-engine,
watch its logs, clean up nicely after
2021-02-23 20:37:27 +03:00
2021-07-21 20:22:08 +03:00
mysql
Launch a MySQL container suitable for use with graphql-engine, watch its
logs, clean up nicely after
2020-10-16 14:55:18 +03:00
test [ --integration [ pytest_args...] | --unit | --hlint]
2019-11-14 21:33:30 +03:00
Run the unit and integration tests, handling spinning up all dependencies.
2019-11-14 22:32:27 +03:00
This will force a recompile. A combined code coverage report will be
generated for all test suites.
2021-04-08 18:02:56 +03:00
Either integration or unit tests can be run individually with their
2019-11-14 21:33:30 +03:00
respective flags. With '--integration' any arguments that follow will be
2021-04-08 18:02:56 +03:00
passed to the pytest invocation. Run the hlint code linter individually
using '--hlint' .
2019-07-12 06:59:29 +03:00
EOL
exit 1
}
2020-01-18 01:07:15 +03:00
# Prettify JSON output, if possible
try_jq( ) {
if command -v jq >/dev/null; then
command jq --unbuffered -R -r '. as $line | try fromjson catch $line'
else
cat
fi
}
2019-11-14 19:32:11 +03:00
# Bump this to:
2020-05-14 00:38:16 +03:00
# - force a reinstall of python dependencies, etc.
2022-01-13 13:29:54 +03:00
DEVSH_VERSION = 1.6
2019-11-14 19:32:11 +03:00
2019-07-12 06:59:29 +03:00
case " ${ 1 - } " in
graphql-engine)
case " ${ 2 - } " in
--no-rebuild)
2020-01-18 01:07:15 +03:00
echo_error 'The --no-rebuild option is no longer supported.'
die_usage
2019-07-12 06:59:29 +03:00
; ;
"" )
; ;
*)
die_usage
; ;
esac
; ;
postgres)
; ;
2021-02-23 20:37:27 +03:00
mssql)
; ;
2021-04-08 18:02:56 +03:00
citus)
; ;
2021-07-21 20:22:08 +03:00
mysql)
; ;
2019-07-12 06:59:29 +03:00
test )
2019-11-14 21:33:30 +03:00
case " ${ 2 - } " in
--unit)
RUN_INTEGRATION_TESTS = false
RUN_UNIT_TESTS = true
2020-10-16 14:55:18 +03:00
RUN_HLINT = false
2019-11-14 21:33:30 +03:00
; ;
--integration)
2021-04-08 18:02:56 +03:00
PYTEST_ARGS = ( " ${ @ : 3 } " )
2019-11-14 21:33:30 +03:00
RUN_INTEGRATION_TESTS = true
RUN_UNIT_TESTS = false
2020-10-16 14:55:18 +03:00
RUN_HLINT = false
2021-05-25 16:54:18 +03:00
source scripts/parse-pytest-backend
2020-10-16 14:55:18 +03:00
; ;
--hlint)
RUN_INTEGRATION_TESTS = false
RUN_UNIT_TESTS = false
RUN_HLINT = true
2019-11-14 21:33:30 +03:00
; ;
"" )
RUN_INTEGRATION_TESTS = true
RUN_UNIT_TESTS = true
2020-10-16 14:55:18 +03:00
RUN_HLINT = true
2021-06-09 12:50:02 +03:00
BACKEND = "postgres"
2019-11-14 21:33:30 +03:00
; ;
*)
die_usage
; ;
esac
2019-07-12 06:59:29 +03:00
; ;
*)
die_usage
; ;
esac
# For now:
MODE = " $1 "
PROJECT_ROOT = " $( cd " $( dirname " ${ BASH_SOURCE [0] } " ) /.. " >/dev/null 2>& 1 && pwd ) " # ... https://stackoverflow.com/a/246128/176841
cd " $PROJECT_ROOT "
2019-11-14 19:32:11 +03:00
# Use pyenv if available to set an appropriate python version that will work with pytests etc.
2019-11-14 21:03:33 +03:00
if command -v pyenv >/dev/null; then
2019-11-14 19:32:11 +03:00
# For now I guess use the greatest python3 >= 3.5
2019-12-14 09:47:38 +03:00
v = $( pyenv versions --bare | ( grep '^ *3' || true ) | awk '{if($1>=3.5)print$1}' | tail -n1)
2019-11-14 19:32:11 +03:00
if [ -z " $v " ] ; then
echo_error 'Please `pyenv install` a version of python >= 3.5 so we can use it'
exit 2
fi
echo_pretty " Pyenv found. Using python version: $v "
export PYENV_VERSION = $v
python3 --version
else
echo_warn " Pyenv not installed. Proceeding with system python version: $( python3 --version) "
fi
2019-07-12 06:59:29 +03:00
2021-03-11 21:17:41 +03:00
2021-04-08 18:02:56 +03:00
####################################
### Containers setup ###
####################################
source scripts/containers/postgres
2021-09-09 10:59:04 +03:00
source scripts/containers/mssql.sh
2021-04-08 18:02:56 +03:00
source scripts/containers/citus
2021-07-21 20:22:08 +03:00
source scripts/containers/mysql.sh
2021-07-01 17:40:05 +03:00
source scripts/data-sources-util.sh
2021-04-08 18:02:56 +03:00
PG_RUNNING = 0
MSSQL_RUNNING = 0
CITUS_RUNNING = 0
2021-07-21 20:22:08 +03:00
MYSQL_RUNNING = 0
2021-04-08 18:02:56 +03:00
function cleanup {
echo
if [ -n " ${ GRAPHQL_ENGINE_PID - } " ] ; then
# Kill the cabal new-run and its children. This may already have been killed:
pkill -P " $GRAPHQL_ENGINE_PID " & >/dev/null || true
2021-03-11 21:17:41 +03:00
fi
2021-04-08 18:02:56 +03:00
if [ $PG_RUNNING -eq 1 ] ; then pg_cleanup; fi
if [ $MSSQL_RUNNING -eq 1 ] ; then mssql_cleanup; fi
if [ $CITUS_RUNNING -eq 1 ] ; then citus_cleanup; fi
2021-07-21 20:22:08 +03:00
if [ $MYSQL_RUNNING -eq 1 ] ; then mysql_cleanup; fi
2021-04-08 18:02:56 +03:00
echo_pretty "Done"
2021-03-11 21:17:41 +03:00
}
2021-04-08 18:02:56 +03:00
trap cleanup EXIT
2019-07-12 06:59:29 +03:00
2021-04-08 18:02:56 +03:00
function pg_start( ) {
2021-11-30 19:36:14 +03:00
if [ $PG_RUNNING -eq 0 ] ; then
pg_launch_container
PG_RUNNING = 1
pg_wait
fi
2021-04-08 18:02:56 +03:00
}
2019-07-12 06:59:29 +03:00
2021-04-08 18:02:56 +03:00
function mssql_start( ) {
2021-11-30 19:36:14 +03:00
if [ $MSSQL_RUNNING -eq 0 ] ; then
mssql_launch_container
MSSQL_RUNNING = 1
if [ [ ` uname -m` = = 'arm64' ] ] ; then
# mssql_wait uses the tool sqlcmd to wait for a database connection which unfortunately
# is not available for the azure-sql-edge docker image - which is the only image from microsoft
# that runs on M1 computers. So we sleep for 20 seconds, cross fingers and hope for the best
# see https://github.com/microsoft/mssql-docker/issues/668
echo "Sleeping for 20 sec while mssql comes up..."
sleep 20
else
mssql_wait
fi
2021-11-24 12:58:10 +03:00
fi
2019-07-12 06:59:29 +03:00
}
2021-04-08 18:02:56 +03:00
function citus_start( ) {
2021-11-30 19:36:14 +03:00
if [ $CITUS_RUNNING -eq 0 ] ; then
citus_launch_container
CITUS_RUNNING = 1
citus_wait
fi
2021-02-23 20:37:27 +03:00
}
2021-07-21 20:22:08 +03:00
function mysql_start( ) {
2021-11-30 19:36:14 +03:00
if [ $MYSQL_RUNNING -eq 0 ] ; then
mysql_launch_container
MYSQL_RUNNING = 1
mysql_wait
fi
2021-07-21 20:22:08 +03:00
}
2021-05-25 16:54:18 +03:00
function start_dbs( ) {
# always launch the postgres container
pg_start
case " $BACKEND " in
citus)
citus_start
; ;
mssql)
mssql_start
; ;
2021-07-21 20:22:08 +03:00
mysql)
mysql_start
; ;
2021-07-06 14:12:18 +03:00
# bigquery deliberately omitted as its test setup is atypical. See:
2022-01-13 17:28:03 +03:00
# https://github.com/hasura/graphql-engine/blob/master/server/py-tests/README.md#running-bigquery-tests
2021-05-25 16:54:18 +03:00
esac
}
2021-04-08 18:02:56 +03:00
2019-07-12 06:59:29 +03:00
#################################
### Graphql-engine ###
#################################
2021-04-08 18:02:56 +03:00
2019-07-12 06:59:29 +03:00
if [ " $MODE " = "graphql-engine" ] ; then
2021-08-11 07:18:40 +03:00
cd " $PROJECT_ROOT "
2020-04-03 11:24:51 +03:00
# Existing tix files for a different hge binary will cause issues:
2020-03-05 20:59:26 +03:00
rm -f graphql-engine.tix
2019-07-12 06:59:29 +03:00
2020-04-10 00:00:28 +03:00
# Attempt to run this after a CTRL-C:
function cleanup {
echo
2020-05-13 15:33:16 +03:00
# Generate coverage, which can be useful for debugging or understanding
2020-04-03 11:24:51 +03:00
if command -v hpc >/dev/null && command -v jq >/dev/null ; then
2020-04-10 00:00:28 +03:00
# Get the appropriate mix dir (the newest one). This way this hopefully
# works when cabal.project.dev-sh.local is edited to turn on optimizations.
2020-05-27 18:02:58 +03:00
# See also: https://hackage.haskell.org/package/cabal-plan
2020-04-03 11:24:51 +03:00
distdir = $( cat dist-newstyle/cache/plan.json | jq -r '."install-plan"[] | select(."id" == "graphql-engine-1.0.0-inplace")? | ."dist-dir"' )
2021-04-15 16:47:47 +03:00
hpcdir = " $distdir /hpc/dyn/mix/graphql-engine-1.0.0 "
2020-04-10 00:00:28 +03:00
echo_pretty "Generating code coverage report..."
COVERAGE_DIR = "dist-newstyle/dev.sh-coverage"
2020-05-13 15:33:16 +03:00
hpc_invocation = ( hpc markup
--exclude= Main
--hpcdir " $hpcdir "
--reset-hpcdirs graphql-engine.tix
--fun-entry-count
2020-04-10 00:00:28 +03:00
--destdir= " $COVERAGE_DIR " )
2021-04-08 18:02:56 +03:00
" ${ hpc_invocation [@] } " >/dev/null
2020-04-10 00:00:28 +03:00
echo_pretty "To view full coverage report open:"
echo_pretty " file:// $( pwd ) / $COVERAGE_DIR /hpc_index.html "
tix_archive = dist-newstyle/graphql-engine.tix.$( date "+%Y.%m.%d-%H.%M.%S" )
mv graphql-engine.tix " $tix_archive "
echo_pretty ""
echo_pretty " The tix file we used has been archived to: $tix_archive "
echo_pretty ""
2020-05-13 15:33:16 +03:00
echo_pretty "You might want to use 'hpc combine' to create a diff of two different tix"
2020-04-10 00:00:28 +03:00
echo_pretty "files, and then generate a new report with something like:"
echo_pretty " $ ${ hpc_invocation [*] } "
else
2020-04-03 11:24:51 +03:00
echo_warn "Please install 'hpc' and 'jq' to get a code coverage report"
2020-04-10 00:00:28 +03:00
fi
}
trap cleanup EXIT
2021-04-08 18:02:56 +03:00
export HASURA_GRAPHQL_DATABASE_URL = ${ HASURA_GRAPHQL_DATABASE_URL - $PG_DB_URL }
2019-07-12 06:59:29 +03:00
export HASURA_GRAPHQL_SERVER_PORT = ${ HASURA_GRAPHQL_SERVER_PORT -8181 }
2020-07-01 06:53:10 +03:00
echo_pretty " We will connect to postgres at ' $HASURA_GRAPHQL_DATABASE_URL ' "
echo_pretty "If you haven't overridden HASURA_GRAPHQL_DATABASE_URL, you can"
echo_pretty "launch a fresh postgres container for us to connect to, in a"
echo_pretty "separate terminal with:"
2019-07-12 06:59:29 +03:00
echo_pretty " $ $0 postgres "
echo_pretty ""
2020-09-10 12:30:34 +03:00
RUN_INVOCATION = ( cabal new-run --project-file= cabal.project.dev-sh --RTS --
2020-07-01 06:53:10 +03:00
exe:graphql-engine +RTS -N -T -s -RTS serve
--enable-console --console-assets-dir " $PROJECT_ROOT /console/static/dist "
)
2020-01-18 01:07:15 +03:00
echo_pretty 'About to do:'
2020-01-24 02:22:21 +03:00
echo_pretty ' $ cabal new-build --project-file=cabal.project.dev-sh exe:graphql-engine'
2020-01-18 01:07:15 +03:00
echo_pretty " $ ${ RUN_INVOCATION [*] } "
echo_pretty ''
2020-01-24 02:22:21 +03:00
cabal new-build --project-file= cabal.project.dev-sh exe:graphql-engine
2021-04-08 18:02:56 +03:00
# We assume a PG is *already running*, and therefore bypass the
# cleanup mechanism previously set.
pg_wait
2019-07-12 06:59:29 +03:00
# Print helpful info after startup logs so it's visible:
{
until curl -s " http://127.0.0.1: $HASURA_GRAPHQL_SERVER_PORT /v1/query " & >/dev/null; do
sleep 0.2
done
sleep 1
2019-10-21 19:01:05 +03:00
echo_pretty "▲▲▲ graphql-engine startup logs above ▲▲▲"
echo_pretty ""
2019-07-12 06:59:29 +03:00
echo_pretty "You can set additional environment vars to tailor 'graphql-engine' next time you"
echo_pretty "invoke this script, e.g.:"
echo_pretty " # Keep polling statements out of logs"
echo_pretty " HASURA_GRAPHQL_EVENTS_FETCH_INTERVAL=3000000"
echo_pretty ""
echo_pretty "The hasura console is available at:"
echo_pretty " http://127.0.0.1: $HASURA_GRAPHQL_SERVER_PORT /console "
echo_pretty ""
echo_pretty " If the console was modified since your last build (re)build assets with:"
echo_pretty " $ cd \" $PROJECT_ROOT /console\" "
2020-12-20 09:52:43 +03:00
echo_pretty " $ npm ci && make server-build "
2019-07-12 06:59:29 +03:00
echo_pretty ""
echo_pretty "Useful endpoints when compiling with 'graphql-engine:developer' and running with '+RTS -T'"
echo_pretty " http://127.0.0.1: $HASURA_GRAPHQL_SERVER_PORT /dev/subscriptions "
echo_pretty " http://127.0.0.1: $HASURA_GRAPHQL_SERVER_PORT /dev/plan_cache "
2019-07-23 10:54:21 +03:00
echo_pretty ""
echo_pretty "To view realtime GC stats and other info open in your browser:"
echo_pretty " file:// $PROJECT_ROOT /scripts/ekg/ekg.html# $HASURA_GRAPHQL_SERVER_PORT "
2019-07-12 06:59:29 +03:00
echo_pretty ""
echo_pretty "▼▼▼ additional graphql-engine logs will appear below: ▼▼▼"
} &
# Logs printed until CTRL-C:
2021-04-08 18:02:56 +03:00
" ${ RUN_INVOCATION [@] } " | try_jq
2019-07-12 06:59:29 +03:00
exit 0
### END SCRIPT ###
2021-04-08 18:02:56 +03:00
#################################
### Postgres container ###
#################################
2019-07-12 06:59:29 +03:00
2021-04-08 18:02:56 +03:00
elif [ " $MODE " = "postgres" ] ; then
pg_start
2019-07-12 06:59:29 +03:00
echo_pretty "Postgres logs will start to show up in realtime here. Press CTRL-C to exit and "
echo_pretty "shutdown this container."
echo_pretty ""
echo_pretty "You can use the following to connect to the running instance:"
2021-04-08 18:02:56 +03:00
echo_pretty " $ $PG_DOCKER "
2019-07-12 06:59:29 +03:00
echo_pretty " or..."
2021-04-08 18:02:56 +03:00
echo_pretty " $ PGPASSWORD= $PG_PASSWORD psql -h 127.0.0.1 -p $PG_PORT postgres -U postgres "
2019-07-12 06:59:29 +03:00
echo_pretty ""
echo_pretty "Here is the database URL:"
2021-04-08 18:02:56 +03:00
echo_pretty " $PG_DB_URL "
2019-07-12 06:59:29 +03:00
echo_pretty ""
echo_pretty "If you want to launch a 'graphql-engine' that works with this database:"
echo_pretty " $ $0 graphql-engine "
docker logs -f --tail= 0 " $PG_CONTAINER_NAME "
2021-04-08 18:02:56 +03:00
2021-02-23 20:37:27 +03:00
#################################
2021-04-08 18:02:56 +03:00
### MSSQL Container ###
2021-02-23 20:37:27 +03:00
#################################
2021-04-08 18:02:56 +03:00
elif [ " $MODE " = "mssql" ] ; then
mssql_start
2021-02-23 20:37:27 +03:00
echo_pretty "MSSQL logs will start to show up in realtime here. Press CTRL-C to exit and "
echo_pretty "shutdown this container."
2021-03-01 23:52:49 +03:00
echo_pretty ""
echo_pretty "You can use the following to connect to the running instance:"
2021-04-08 18:02:56 +03:00
echo_pretty " $ $MSSQL_DOCKER "
2021-03-01 23:52:49 +03:00
echo_pretty ""
echo_pretty "If you want to import a SQL file into MSSQL:"
2021-04-08 18:02:56 +03:00
echo_pretty " $ $MSSQL_DOCKER -i <import_file> "
2021-03-11 01:18:16 +03:00
echo_pretty ""
echo_pretty "Here is the database URL:"
2021-09-09 10:59:04 +03:00
echo_pretty " $MSSQL_CONN_STR "
2021-03-11 01:18:16 +03:00
echo_pretty ""
2021-02-23 20:37:27 +03:00
docker logs -f --tail= 0 " $MSSQL_CONTAINER_NAME "
2019-07-12 06:59:29 +03:00
2021-04-08 18:02:56 +03:00
#################################
### Citus Container ###
#################################
elif [ " $MODE " = "citus" ] ; then
citus_start
echo_pretty "CITUS logs will start to show up in realtime here. Press CTRL-C to exit and "
echo_pretty "shutdown this container."
echo_pretty ""
echo_pretty "You can use the following to connect to the running instance:"
echo_pretty " $ $CITUS_DOCKER "
echo_pretty ""
echo_pretty "Here is the database URL:"
echo_pretty " $CITUS_DB_URL "
echo_pretty ""
docker logs -f --tail= 0 " $CITUS_CONTAINER_NAME "
2021-07-21 20:22:08 +03:00
#################################
### MySQL Container ###
#################################
elif [ " $MODE " = "mysql" ] ; then
mysql_start
echo_pretty "MYSQL logs will start to show up in realtime here. Press CTRL-C to exit and "
echo_pretty "shutdown this container."
echo_pretty ""
echo_pretty "You can use the following to connect to the running instance:"
echo_pretty " $ $MYSQL_DOCKER "
echo_pretty ""
echo_pretty "If you want to import a SQL file into MYSQL:"
echo_pretty " $ $MYSQL_DOCKER -i <import_file> "
echo_pretty ""
docker logs -f --tail= 0 " $MYSQL_CONTAINER_NAME "
2021-04-08 18:02:56 +03:00
2019-10-21 19:01:05 +03:00
elif [ " $MODE " = "test" ] ; then
2020-01-24 02:22:21 +03:00
########################################
### Integration / unit tests ###
########################################
2021-08-11 07:18:40 +03:00
cd " $PROJECT_ROOT "
2019-07-12 06:59:29 +03:00
2020-03-18 20:15:41 +03:00
# Until we can use a real webserver for TestEventFlood, limit concurrency
export HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE = 8
2020-01-24 02:22:21 +03:00
# We'll get an hpc error if these exist; they will be deleted below too:
rm -f graphql-engine-tests.tix graphql-engine.tix graphql-engine-combined.tix
2020-09-10 12:30:34 +03:00
# Various tests take some configuration from the environment; set these up here:
2019-07-12 06:59:29 +03:00
export EVENT_WEBHOOK_HEADER = "MyEnvValue"
export WEBHOOK_FROM_ENV = "http://127.0.0.1:5592"
2020-05-13 15:33:16 +03:00
export SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN = "http://127.0.0.1:5594"
2020-09-10 12:30:34 +03:00
export REMOTE_SCHEMAS_WEBHOOK_DOMAIN = "http://127.0.0.1:5000"
2019-07-12 06:59:29 +03:00
2021-04-27 08:34:14 +03:00
if [ " $RUN_INTEGRATION_TESTS " = true ] ; then
2021-08-26 14:52:14 +03:00
# It's better UX to build first (possibly failing) before trying to launch
# PG, but make sure that new-run uses the exact same build plan, else we risk
# rebuilding twice... ugh
# Formerly this was a `cabal build` but mixing cabal build and cabal run
# seems to conflict now, causing re-linking, haddock runs, etc. Instead do a
# `graphql-engine version` to trigger build
cabal new-run --project-file= cabal.project.dev-sh -- exe:graphql-engine \
--metadata-database-url= " $PG_DB_URL " version
2021-05-25 16:54:18 +03:00
start_dbs
2021-04-27 08:34:14 +03:00
fi
2019-11-14 21:03:33 +03:00
2019-11-14 21:33:30 +03:00
if [ " $RUN_UNIT_TESTS " = true ] ; then
echo_pretty "Running Haskell test suite"
2021-09-09 10:59:04 +03:00
# unit tests need access to postgres and mssql instances:
mssql_start
pg_start
HASURA_GRAPHQL_DATABASE_URL = " $PG_DB_URL " \
HASURA_MSSQL_CONN_STR = " $MSSQL_CONN_STR " \
cabal new-run --project-file= cabal.project.dev-sh -- test:graphql-engine-tests
2021-04-08 18:02:56 +03:00
fi
if [ " $RUN_HLINT " = true ] ; then
2021-04-29 07:01:06 +03:00
if command -v hlint >/dev/null; then
( cd " $PROJECT_ROOT /server " && hlint src-*)
else
echo_warn "hlint is not installed: skipping"
fi
2019-11-14 19:32:11 +03:00
fi
2019-11-14 21:33:30 +03:00
if [ " $RUN_INTEGRATION_TESTS " = true ] ; then
GRAPHQL_ENGINE_TEST_LOG = /tmp/hasura-dev-test-engine.log
2020-01-06 22:13:10 +03:00
echo_pretty " Starting graphql-engine, logging to $GRAPHQL_ENGINE_TEST_LOG "
2019-11-14 21:33:30 +03:00
export HASURA_GRAPHQL_SERVER_PORT = 8088
2021-03-11 21:17:41 +03:00
2021-04-20 16:42:06 +03:00
# Extra sources for multi-source tests. Uses the default postgres DB if no extra sources
# are defined.
export HASURA_GRAPHQL_PG_SOURCE_URL_1 = ${ HASURA_GRAPHQL_PG_SOURCE_URL_1 - $PG_DB_URL }
export HASURA_GRAPHQL_PG_SOURCE_URL_2 = ${ HASURA_GRAPHQL_PG_SOURCE_URL_2 - $PG_DB_URL }
2021-09-16 09:24:00 +03:00
export HASURA_GRAPHQL_EXPERIMENTAL_FEATURES = "inherited_roles"
2021-04-20 16:42:06 +03:00
2021-03-11 21:17:41 +03:00
# Using --metadata-database-url flag to test multiple backends
2021-04-27 08:34:14 +03:00
# HASURA_GRAPHQL_PG_SOURCE_URL_* For a couple multi-source pytests:
2021-03-11 21:17:41 +03:00
cabal new-run --project-file= cabal.project.dev-sh -- exe:graphql-engine \
2021-04-21 04:16:08 +03:00
--metadata-database-url= " $PG_DB_URL " serve \
--stringify-numeric-types \
--enable-console \
--console-assets-dir ../console/static/dist \
2020-01-18 01:07:15 +03:00
& > " $GRAPHQL_ENGINE_TEST_LOG " & GRAPHQL_ENGINE_PID = $!
2019-11-14 21:33:30 +03:00
echo -n "Waiting for graphql-engine"
until curl -s " http://127.0.0.1: $HASURA_GRAPHQL_SERVER_PORT /v1/query " & >/dev/null; do
echo -n '.' && sleep 0.2
2020-01-06 22:13:10 +03:00
# If the server stopped abort immediately
if ! kill -0 $GRAPHQL_ENGINE_PID ; then
echo_error "The server crashed or failed to start!!"
2021-04-08 18:02:56 +03:00
exit 42
2020-01-06 22:13:10 +03:00
fi
2019-11-14 21:33:30 +03:00
done
2021-03-11 21:17:41 +03:00
echo ""
2019-11-14 21:33:30 +03:00
echo " Ok"
2021-07-01 17:40:05 +03:00
add_sources $HASURA_GRAPHQL_SERVER_PORT
2021-03-11 21:17:41 +03:00
2019-11-14 21:33:30 +03:00
cd " $PROJECT_ROOT /server/tests-py "
2020-05-27 18:02:58 +03:00
## Install misc test dependencies:
if [ ! -d "node_modules" ] ; then
npm_config_loglevel = error npm install remote_schemas/nodejs/
else
echo_pretty "It looks like node dependencies have been installed already. Skipping."
echo_pretty "If things fail please run this and try again"
echo_pretty " $ rm -r \" $PROJECT_ROOT /server/tests-py/node_modules\" "
fi
### Check for and install dependencies in venv
2019-11-14 21:33:30 +03:00
PY_VENV = .hasura-dev-python-venv
DEVSH_VERSION_FILE = .devsh_version
# Do we need to force reinstall?
if [ " $DEVSH_VERSION " = " $( cat $DEVSH_VERSION_FILE 2>/dev/null || true ) " ] ; then
true # ok
2019-11-14 19:32:11 +03:00
else
2019-12-04 01:36:38 +03:00
echo_warn 'dev.sh version was bumped or fresh install. Forcing reinstallation of dependencies.'
rm -rf " $PY_VENV "
2019-11-14 21:33:30 +03:00
echo " $DEVSH_VERSION " > " $DEVSH_VERSION_FILE "
2019-11-14 19:32:11 +03:00
fi
2021-08-12 04:53:13 +03:00
# cryptography 3.4.7 version requires Rust dependencies by default. But we don't need them for our tests, hence disabling them via the following env var => https://stackoverflow.com/a/66334084
export CRYPTOGRAPHY_DONT_BUILD_RUST = 1
2019-11-14 21:33:30 +03:00
set +u # for venv activate
if [ ! -d " $PY_VENV " ] ; then
python3 -m venv " $PY_VENV "
source " $PY_VENV /bin/activate "
pip3 install wheel
# If the maintainer of this script or pytests needs to change dependencies:
# - alter requirements-top-level.txt as needed
# - delete requirements.txt
# - run this script, then check in the new frozen requirements.txt
if [ -f requirements.txt ] ; then
pip3 install -r requirements.txt
else
pip3 install -r requirements-top-level.txt
pip3 freeze > requirements.txt
fi
else
echo_pretty "It looks like python dependencies have been installed already. Skipping."
echo_pretty "If things fail please run this and try again"
echo_pretty " $ rm -r \" $PROJECT_ROOT /server/tests-py/ $PY_VENV \" "
2019-07-12 06:59:29 +03:00
2019-11-14 21:33:30 +03:00
source " $PY_VENV /bin/activate "
fi
2019-07-12 06:59:29 +03:00
2019-11-14 21:33:30 +03:00
# TODO MAYBE: fix deprecation warnings, make them an error
2021-04-27 08:34:14 +03:00
if ! pytest -W ignore::DeprecationWarning --hge-urls http://127.0.0.1:$HASURA_GRAPHQL_SERVER_PORT --pg-urls " $PG_DB_URL " --durations= 20 " ${ PYTEST_ARGS [@] } " ; then
2020-01-06 22:13:10 +03:00
echo_error " ^^^ graphql-engine logs from failed test run can be inspected at: $GRAPHQL_ENGINE_TEST_LOG "
2019-11-14 21:33:30 +03:00
fi
deactivate # python venv
set -u
2019-10-21 19:01:05 +03:00
2019-11-14 21:33:30 +03:00
cd " $PROJECT_ROOT /server "
2020-01-24 02:22:21 +03:00
# Kill the cabal new-run and its children. INT so we get hpc report:
pkill -INT -P " $GRAPHQL_ENGINE_PID "
2019-11-14 21:33:30 +03:00
wait " $GRAPHQL_ENGINE_PID " || true
echo
2020-01-24 02:22:21 +03:00
fi # RUN_INTEGRATION_TESTS
2020-05-13 15:33:16 +03:00
# If hpc available, combine any tix from haskell/unit tests:
2020-01-24 02:22:21 +03:00
if command -v hpc >/dev/null; then
2020-05-13 15:33:16 +03:00
if [ " $RUN_UNIT_TESTS " = true ] && [ " $RUN_INTEGRATION_TESTS " = true ] ; then
2020-01-24 02:22:21 +03:00
# As below, it seems we variously get errors related to having two Main
# modules, so exclude:
hpc combine --exclude= Main graphql-engine-tests.tix graphql-engine.tix --union > graphql-engine-combined.tix
else
# One of these should exist
cp graphql-engine-tests.tix graphql-engine-combined.tix 2>/dev/null || true
cp graphql-engine.tix graphql-engine-combined.tix 2>/dev/null || true
fi
# Generate a report including the test code itself (see cabal.project.dev-sh.local):
# NOTE: we have to omit the .mix directory for the executable, since it
# seems hpc can't cope with two modules of the same name; '--exclude'
# didn't help.
echo_pretty "Generating code coverage report..."
COVERAGE_DIR = "dist-newstyle/dev.sh-coverage"
hpc markup \
--exclude= Main \
2021-04-15 16:47:47 +03:00
--hpcdir dist-newstyle/build/*/ghc-*/graphql-engine-*/noopt/hpc/dyn/mix/graphql-engine-* \
--hpcdir dist-newstyle/build/*/ghc-*/graphql-engine-*/t/graphql-engine-tests/noopt/hpc/dyn/mix/graphql-engine-tests \
2020-01-24 02:22:21 +03:00
--reset-hpcdirs graphql-engine-combined.tix \
--fun-entry-count \
--destdir= " $COVERAGE_DIR " >/dev/null
hpc report \
--exclude= Main \
2021-04-15 16:47:47 +03:00
--hpcdir dist-newstyle/build/*/ghc-*/graphql-engine-*/noopt/hpc/dyn/mix/graphql-engine-* \
--hpcdir dist-newstyle/build/*/ghc-*/graphql-engine-*/t/graphql-engine-tests/noopt/hpc/dyn/mix/graphql-engine-tests \
2020-05-13 15:33:16 +03:00
--reset-hpcdirs graphql-engine-combined.tix
2020-01-24 02:22:21 +03:00
echo_pretty "To view full coverage report open:"
echo_pretty " file:// $( pwd ) / $COVERAGE_DIR /hpc_index.html "
else
echo_warn "Please install hpc to get a combined code coverage report for tests"
2019-11-14 21:33:30 +03:00
fi
2020-01-24 02:22:21 +03:00
rm -f graphql-engine-tests.tix graphql-engine.tix graphql-engine-combined.tix
2019-07-12 06:59:29 +03:00
else
echo "impossible; fix script."
fi