2019-07-12 06:59:29 +03:00
|
|
|
#!/bin/bash
|
|
|
|
set -euo pipefail
|
2020-04-10 00:00:28 +03:00
|
|
|
shopt -s globstar
|
2019-07-12 06:59:29 +03:00
|
|
|
|
2020-01-24 02:22:21 +03:00
|
|
|
# A development swiss army knife script. The goals are to:
|
|
|
|
#
|
|
|
|
# - encode some best practices and hard-won knowledge of quirks and corners of
|
|
|
|
# our tooling
|
|
|
|
# - simplify development; especially for new-comers; instead of writing a huge
|
|
|
|
# document describing how to do various dev tasks (or worse yet, not writing
|
|
|
|
# one), make it runnable
|
|
|
|
#
|
|
|
|
# This makes use of 'cabal.project.dev-sh*' files when building. See
|
|
|
|
# 'cabal.project.dev-sh.local'.
|
2019-07-12 06:59:29 +03:00
|
|
|
|
|
|
|
|
|
|
|
echo_pretty() {
|
|
|
|
echo ">>> $(tput setaf 2)$1$(tput sgr0)"
|
|
|
|
}
|
2019-11-14 19:32:11 +03:00
|
|
|
echo_error() {
|
|
|
|
echo ">>> $(tput setaf 1)$1$(tput sgr0)"
|
|
|
|
}
|
|
|
|
echo_warn() {
|
|
|
|
echo ">>> $(tput setaf 3)$1$(tput sgr0)"
|
|
|
|
}
|
2019-07-12 06:59:29 +03:00
|
|
|
|
|
|
|
die_usage() {
|
|
|
|
cat <<EOL
|
|
|
|
A swiss-army script for local graphql-engine development
|
|
|
|
|
|
|
|
Usage: $0 <COMMAND>
|
|
|
|
|
|
|
|
Available COMMANDs:
|
|
|
|
|
2020-01-18 01:07:15 +03:00
|
|
|
graphql-engine
|
|
|
|
Launch graphql-engine, connecting to a database launched with '$0 postgres'.
|
2019-07-12 06:59:29 +03:00
|
|
|
|
|
|
|
postgres
|
2019-10-21 19:01:05 +03:00
|
|
|
Launch a postgres container suitable for use with graphql-engine, watch its logs,
|
2019-07-12 06:59:29 +03:00
|
|
|
clean up nicely after
|
|
|
|
|
2020-10-16 14:55:18 +03:00
|
|
|
test [--integration [pytest_args...] | --unit | --hlint]
|
2019-11-14 21:33:30 +03:00
|
|
|
Run the unit and integration tests, handling spinning up all dependencies.
|
2019-11-14 22:32:27 +03:00
|
|
|
This will force a recompile. A combined code coverage report will be
|
|
|
|
generated for all test suites.
|
2019-11-14 21:33:30 +03:00
|
|
|
Either integration or unit tests can be run individually with their
|
|
|
|
respective flags. With '--integration' any arguments that follow will be
|
2020-10-16 14:55:18 +03:00
|
|
|
passed to the pytest invocation. Run the hlint code linter individually using
|
|
|
|
'--hlint'.
|
2019-07-12 06:59:29 +03:00
|
|
|
|
|
|
|
EOL
|
|
|
|
exit 1
|
|
|
|
}
|
|
|
|
|
2020-01-18 01:07:15 +03:00
|
|
|
# Prettify JSON output, if possible
|
|
|
|
try_jq() {
|
|
|
|
if command -v jq >/dev/null; then
|
|
|
|
command jq --unbuffered -R -r '. as $line | try fromjson catch $line'
|
|
|
|
else
|
|
|
|
cat
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2019-11-14 19:32:11 +03:00
|
|
|
# Bump this to:
|
2020-05-14 00:38:16 +03:00
|
|
|
# - force a reinstall of python dependencies, etc.
|
|
|
|
DEVSH_VERSION=1.3
|
2019-11-14 19:32:11 +03:00
|
|
|
|
2019-07-12 06:59:29 +03:00
|
|
|
case "${1-}" in
|
|
|
|
graphql-engine)
|
|
|
|
case "${2-}" in
|
|
|
|
--no-rebuild)
|
2020-01-18 01:07:15 +03:00
|
|
|
echo_error 'The --no-rebuild option is no longer supported.'
|
|
|
|
die_usage
|
2019-07-12 06:59:29 +03:00
|
|
|
;;
|
|
|
|
"")
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
die_usage
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
;;
|
|
|
|
postgres)
|
|
|
|
;;
|
|
|
|
test)
|
2019-11-14 21:33:30 +03:00
|
|
|
case "${2-}" in
|
|
|
|
--unit)
|
|
|
|
RUN_INTEGRATION_TESTS=false
|
|
|
|
RUN_UNIT_TESTS=true
|
2020-10-16 14:55:18 +03:00
|
|
|
RUN_HLINT=false
|
2019-11-14 21:33:30 +03:00
|
|
|
;;
|
|
|
|
--integration)
|
|
|
|
PYTEST_ARGS="${@:3}"
|
|
|
|
RUN_INTEGRATION_TESTS=true
|
|
|
|
RUN_UNIT_TESTS=false
|
2020-10-16 14:55:18 +03:00
|
|
|
RUN_HLINT=false
|
|
|
|
;;
|
|
|
|
--hlint)
|
|
|
|
RUN_INTEGRATION_TESTS=false
|
|
|
|
RUN_UNIT_TESTS=false
|
|
|
|
RUN_HLINT=true
|
2019-11-14 21:33:30 +03:00
|
|
|
;;
|
|
|
|
"")
|
|
|
|
RUN_INTEGRATION_TESTS=true
|
|
|
|
RUN_UNIT_TESTS=true
|
2020-10-16 14:55:18 +03:00
|
|
|
RUN_HLINT=true
|
2019-11-14 21:33:30 +03:00
|
|
|
;;
|
|
|
|
*)
|
|
|
|
die_usage
|
|
|
|
;;
|
|
|
|
esac
|
2019-07-12 06:59:29 +03:00
|
|
|
;;
|
|
|
|
*)
|
|
|
|
die_usage
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
|
|
|
|
# For now:
|
|
|
|
MODE="$1"
|
|
|
|
|
|
|
|
|
|
|
|
PROJECT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." >/dev/null 2>&1 && pwd )" # ... https://stackoverflow.com/a/246128/176841
|
|
|
|
cd "$PROJECT_ROOT"
|
|
|
|
|
2019-11-14 19:32:11 +03:00
|
|
|
# Use pyenv if available to set an appropriate python version that will work with pytests etc.
|
2019-11-14 21:03:33 +03:00
|
|
|
if command -v pyenv >/dev/null; then
|
2019-11-14 19:32:11 +03:00
|
|
|
# For now I guess use the greatest python3 >= 3.5
|
2019-12-14 09:47:38 +03:00
|
|
|
v=$(pyenv versions --bare | (grep '^ *3' || true) | awk '{if($1>=3.5)print$1}' | tail -n1)
|
2019-11-14 19:32:11 +03:00
|
|
|
if [ -z "$v" ]; then
|
|
|
|
echo_error 'Please `pyenv install` a version of python >= 3.5 so we can use it'
|
|
|
|
exit 2
|
|
|
|
fi
|
|
|
|
echo_pretty "Pyenv found. Using python version: $v"
|
|
|
|
export PYENV_VERSION=$v
|
|
|
|
python3 --version
|
|
|
|
else
|
|
|
|
echo_warn "Pyenv not installed. Proceeding with system python version: $(python3 --version)"
|
|
|
|
fi
|
2019-07-12 06:59:29 +03:00
|
|
|
|
|
|
|
####################################
|
|
|
|
### Shared environment stuff ###
|
|
|
|
####################################
|
|
|
|
|
|
|
|
# Hopefully these don't clash with anything. We could try to be smarter:
|
|
|
|
if [ "$MODE" = "test" ]; then
|
|
|
|
# Choose a different port so PG is totally disposable:
|
|
|
|
PG_PORT=35432
|
|
|
|
else
|
|
|
|
PG_PORT=25432
|
|
|
|
fi
|
|
|
|
|
|
|
|
# export for psql, etc.
|
|
|
|
export PGPASSWORD=postgres
|
|
|
|
|
2020-07-01 06:53:10 +03:00
|
|
|
# The URL for the postgres server we might launch
|
|
|
|
CONTAINER_DB_URL="postgres://postgres:$PGPASSWORD@127.0.0.1:$PG_PORT/postgres"
|
|
|
|
# ... but we might like to use a different PG instance when just launching graphql-engine:
|
|
|
|
HASURA_GRAPHQL_DATABASE_URL=${HASURA_GRAPHQL_DATABASE_URL-$CONTAINER_DB_URL}
|
2019-07-12 06:59:29 +03:00
|
|
|
|
|
|
|
PG_CONTAINER_NAME="hasura-dev-postgres-$PG_PORT"
|
|
|
|
|
2020-07-01 06:53:10 +03:00
|
|
|
# We can remove psql as a dependency by using it from the (running) PG container:
|
|
|
|
DOCKER_PSQL="docker exec -u postgres -it $PG_CONTAINER_NAME psql $HASURA_GRAPHQL_DATABASE_URL"
|
2019-07-12 06:59:29 +03:00
|
|
|
|
2020-07-01 06:53:10 +03:00
|
|
|
function wait_postgres {
|
2019-07-12 06:59:29 +03:00
|
|
|
echo -n "Waiting for postgres to come up"
|
2020-07-01 06:53:10 +03:00
|
|
|
until ( $DOCKER_PSQL -c '\l' || psql $HASURA_GRAPHQL_DATABASE_URL -c '\l') &>/dev/null; do
|
2019-07-12 06:59:29 +03:00
|
|
|
echo -n '.' && sleep 0.2
|
|
|
|
done
|
|
|
|
echo " Ok"
|
|
|
|
}
|
|
|
|
|
|
|
|
#################################
|
|
|
|
### Graphql-engine ###
|
|
|
|
#################################
|
|
|
|
if [ "$MODE" = "graphql-engine" ]; then
|
|
|
|
cd "$PROJECT_ROOT/server"
|
2020-04-03 11:24:51 +03:00
|
|
|
# Existing tix files for a different hge binary will cause issues:
|
2020-03-05 20:59:26 +03:00
|
|
|
rm -f graphql-engine.tix
|
2019-07-12 06:59:29 +03:00
|
|
|
|
2020-04-10 00:00:28 +03:00
|
|
|
# Attempt to run this after a CTRL-C:
|
|
|
|
function cleanup {
|
|
|
|
echo
|
2020-05-13 15:33:16 +03:00
|
|
|
# Generate coverage, which can be useful for debugging or understanding
|
2020-04-03 11:24:51 +03:00
|
|
|
if command -v hpc >/dev/null && command -v jq >/dev/null ; then
|
2020-04-10 00:00:28 +03:00
|
|
|
# Get the appropriate mix dir (the newest one). This way this hopefully
|
|
|
|
# works when cabal.project.dev-sh.local is edited to turn on optimizations.
|
2020-05-27 18:02:58 +03:00
|
|
|
# See also: https://hackage.haskell.org/package/cabal-plan
|
2020-04-03 11:24:51 +03:00
|
|
|
distdir=$(cat dist-newstyle/cache/plan.json | jq -r '."install-plan"[] | select(."id" == "graphql-engine-1.0.0-inplace")? | ."dist-dir"')
|
|
|
|
hpcdir="$distdir/hpc/vanilla/mix/graphql-engine-1.0.0"
|
2020-04-10 00:00:28 +03:00
|
|
|
echo_pretty "Generating code coverage report..."
|
|
|
|
COVERAGE_DIR="dist-newstyle/dev.sh-coverage"
|
2020-05-13 15:33:16 +03:00
|
|
|
hpc_invocation=(hpc markup
|
|
|
|
--exclude=Main
|
|
|
|
--hpcdir "$hpcdir"
|
|
|
|
--reset-hpcdirs graphql-engine.tix
|
|
|
|
--fun-entry-count
|
2020-04-10 00:00:28 +03:00
|
|
|
--destdir="$COVERAGE_DIR")
|
|
|
|
${hpc_invocation[@]} >/dev/null
|
|
|
|
|
|
|
|
echo_pretty "To view full coverage report open:"
|
|
|
|
echo_pretty " file://$(pwd)/$COVERAGE_DIR/hpc_index.html"
|
|
|
|
|
|
|
|
tix_archive=dist-newstyle/graphql-engine.tix.$(date "+%Y.%m.%d-%H.%M.%S")
|
|
|
|
mv graphql-engine.tix "$tix_archive"
|
|
|
|
echo_pretty ""
|
|
|
|
echo_pretty "The tix file we used has been archived to: $tix_archive"
|
|
|
|
echo_pretty ""
|
2020-05-13 15:33:16 +03:00
|
|
|
echo_pretty "You might want to use 'hpc combine' to create a diff of two different tix"
|
2020-04-10 00:00:28 +03:00
|
|
|
echo_pretty "files, and then generate a new report with something like:"
|
|
|
|
echo_pretty " $ ${hpc_invocation[*]}"
|
|
|
|
else
|
2020-04-03 11:24:51 +03:00
|
|
|
echo_warn "Please install 'hpc' and 'jq' to get a code coverage report"
|
2020-04-10 00:00:28 +03:00
|
|
|
fi
|
|
|
|
}
|
|
|
|
trap cleanup EXIT
|
|
|
|
|
2020-07-01 06:53:10 +03:00
|
|
|
export HASURA_GRAPHQL_DATABASE_URL # Defined above
|
2019-07-12 06:59:29 +03:00
|
|
|
export HASURA_GRAPHQL_SERVER_PORT=${HASURA_GRAPHQL_SERVER_PORT-8181}
|
|
|
|
|
2020-07-01 06:53:10 +03:00
|
|
|
echo_pretty "We will connect to postgres at '$HASURA_GRAPHQL_DATABASE_URL'"
|
|
|
|
echo_pretty "If you haven't overridden HASURA_GRAPHQL_DATABASE_URL, you can"
|
|
|
|
echo_pretty "launch a fresh postgres container for us to connect to, in a"
|
|
|
|
echo_pretty "separate terminal with:"
|
2019-07-12 06:59:29 +03:00
|
|
|
echo_pretty " $ $0 postgres"
|
|
|
|
echo_pretty ""
|
|
|
|
|
2020-09-10 12:30:34 +03:00
|
|
|
RUN_INVOCATION=(cabal new-run --project-file=cabal.project.dev-sh --RTS --
|
2020-07-01 06:53:10 +03:00
|
|
|
exe:graphql-engine +RTS -N -T -s -RTS serve
|
|
|
|
--enable-console --console-assets-dir "$PROJECT_ROOT/console/static/dist"
|
|
|
|
)
|
2020-01-18 01:07:15 +03:00
|
|
|
|
|
|
|
echo_pretty 'About to do:'
|
2020-01-24 02:22:21 +03:00
|
|
|
echo_pretty ' $ cabal new-build --project-file=cabal.project.dev-sh exe:graphql-engine'
|
2020-01-18 01:07:15 +03:00
|
|
|
echo_pretty " $ ${RUN_INVOCATION[*]}"
|
|
|
|
echo_pretty ''
|
|
|
|
|
2020-01-24 02:22:21 +03:00
|
|
|
cabal new-build --project-file=cabal.project.dev-sh exe:graphql-engine
|
2020-07-01 06:53:10 +03:00
|
|
|
wait_postgres
|
2019-07-12 06:59:29 +03:00
|
|
|
|
|
|
|
# Print helpful info after startup logs so it's visible:
|
|
|
|
{
|
|
|
|
until curl -s "http://127.0.0.1:$HASURA_GRAPHQL_SERVER_PORT/v1/query" &>/dev/null; do
|
|
|
|
sleep 0.2
|
|
|
|
done
|
|
|
|
sleep 1
|
2019-10-21 19:01:05 +03:00
|
|
|
echo_pretty "▲▲▲ graphql-engine startup logs above ▲▲▲"
|
|
|
|
echo_pretty ""
|
2019-07-12 06:59:29 +03:00
|
|
|
echo_pretty "You can set additional environment vars to tailor 'graphql-engine' next time you"
|
|
|
|
echo_pretty "invoke this script, e.g.:"
|
|
|
|
echo_pretty " # Keep polling statements out of logs"
|
|
|
|
echo_pretty " HASURA_GRAPHQL_EVENTS_FETCH_INTERVAL=3000000"
|
|
|
|
echo_pretty ""
|
|
|
|
echo_pretty "The hasura console is available at:"
|
|
|
|
echo_pretty " http://127.0.0.1:$HASURA_GRAPHQL_SERVER_PORT/console"
|
|
|
|
echo_pretty ""
|
|
|
|
echo_pretty " If the console was modified since your last build (re)build assets with:"
|
|
|
|
echo_pretty " $ cd \"$PROJECT_ROOT/console\""
|
|
|
|
echo_pretty " $ npm ci && npm run server-build "
|
|
|
|
echo_pretty ""
|
|
|
|
echo_pretty "Useful endpoints when compiling with 'graphql-engine:developer' and running with '+RTS -T'"
|
|
|
|
echo_pretty " http://127.0.0.1:$HASURA_GRAPHQL_SERVER_PORT/dev/subscriptions"
|
|
|
|
echo_pretty " http://127.0.0.1:$HASURA_GRAPHQL_SERVER_PORT/dev/plan_cache"
|
2019-07-23 10:54:21 +03:00
|
|
|
echo_pretty ""
|
|
|
|
echo_pretty "To view realtime GC stats and other info open in your browser:"
|
|
|
|
echo_pretty " file://$PROJECT_ROOT/scripts/ekg/ekg.html#$HASURA_GRAPHQL_SERVER_PORT"
|
2019-07-12 06:59:29 +03:00
|
|
|
echo_pretty ""
|
|
|
|
echo_pretty "▼▼▼ additional graphql-engine logs will appear below: ▼▼▼"
|
|
|
|
} &
|
|
|
|
|
|
|
|
# Logs printed until CTRL-C:
|
2020-01-18 01:07:15 +03:00
|
|
|
${RUN_INVOCATION[@]} | try_jq
|
2019-07-12 06:59:29 +03:00
|
|
|
exit 0
|
|
|
|
### END SCRIPT ###
|
|
|
|
fi
|
|
|
|
|
|
|
|
#################################
|
|
|
|
### Postgres Container ###
|
|
|
|
#################################
|
|
|
|
|
|
|
|
# Useful development defaults for postgres (no spaces here, please):
|
|
|
|
#
|
|
|
|
# setting 'port' in container is a workaround for the pg_dump endpoint (see tests)
|
|
|
|
# log_hostname=off to avoid timeout failures when running offline due to:
|
2019-10-21 19:01:05 +03:00
|
|
|
# https://forums.aws.amazon.com/thread.jspa?threadID=291285
|
2020-11-25 23:24:34 +03:00
|
|
|
#
|
|
|
|
# All lines up to log_error_verbosity are to support pgBadger:
|
|
|
|
# https://github.com/darold/pgbadger#LOG-STATEMENTS
|
|
|
|
#
|
|
|
|
# Also useful:
|
|
|
|
# log_autovacuum_min_duration=0
|
2019-07-12 06:59:29 +03:00
|
|
|
CONF=$(cat <<-EOF
|
2020-11-25 23:24:34 +03:00
|
|
|
log_min_duration_statement=0
|
|
|
|
log_checkpoints=on
|
2019-07-12 06:59:29 +03:00
|
|
|
log_connections=on
|
|
|
|
log_disconnections=on
|
2020-11-25 23:24:34 +03:00
|
|
|
log_lock_waits=on
|
|
|
|
log_temp_files=0
|
|
|
|
log_error_verbosity=default
|
2019-07-12 06:59:29 +03:00
|
|
|
log_hostname=off
|
|
|
|
log_duration=on
|
|
|
|
port=$PG_PORT
|
|
|
|
EOF
|
|
|
|
)
|
|
|
|
|
|
|
|
# log lines above as -c flag arguments we pass to postgres
|
|
|
|
CONF_FLAGS=$(echo "$CONF" | sed -e 's/^/-c /' | tr '\n' ' ')
|
|
|
|
|
2019-11-14 21:03:33 +03:00
|
|
|
function launch_postgres_container(){
|
|
|
|
echo_pretty "Launching postgres container: $PG_CONTAINER_NAME"
|
|
|
|
docker run --name "$PG_CONTAINER_NAME" -p 127.0.0.1:"$PG_PORT":$PG_PORT --expose="$PG_PORT" \
|
|
|
|
-e POSTGRES_PASSWORD="$PGPASSWORD" -d circleci/postgres:11.5-alpine-postgis \
|
|
|
|
$CONF_FLAGS
|
|
|
|
|
|
|
|
# graphql-engine calls the pg_dump executable. To avoid a version mismatch (and
|
|
|
|
# the dependency entirely) we create a shim that executes the pg_dump in the
|
|
|
|
# postgres container. Note output to file won't work.
|
|
|
|
DEV_SHIM_PATH="/tmp/hasura-dev-shims-$PG_PORT"
|
|
|
|
mkdir -p "$DEV_SHIM_PATH"
|
|
|
|
cat >"$DEV_SHIM_PATH/pg_dump" <<EOL
|
2019-07-12 06:59:29 +03:00
|
|
|
#!/bin/bash
|
|
|
|
# Generated from: $0
|
|
|
|
if [[ \$@ == *" -f"* ]]; then
|
2019-10-21 19:01:05 +03:00
|
|
|
echo "It looks like we're trying to pg_dump to a file, but that won't work with this shim. See $0" >&2
|
2019-07-12 06:59:29 +03:00
|
|
|
exit 1
|
|
|
|
fi
|
|
|
|
docker exec -u postgres $PG_CONTAINER_NAME pg_dump "\$@"
|
|
|
|
EOL
|
2019-11-14 21:03:33 +03:00
|
|
|
chmod a+x "$DEV_SHIM_PATH/pg_dump"
|
|
|
|
export PATH="$DEV_SHIM_PATH":$PATH
|
2019-07-12 06:59:29 +03:00
|
|
|
|
|
|
|
|
2019-11-14 21:03:33 +03:00
|
|
|
# Since launching the postgres container worked we can set up cleanup routines. This will catch CTRL-C
|
|
|
|
function cleanup {
|
|
|
|
echo
|
2019-07-12 06:59:29 +03:00
|
|
|
|
2019-11-14 21:03:33 +03:00
|
|
|
if [ ! -z "${GRAPHQL_ENGINE_PID-}" ]; then
|
2020-01-24 02:22:21 +03:00
|
|
|
# Kill the cabal new-run and its children. This may already have been killed:
|
|
|
|
pkill -P "$GRAPHQL_ENGINE_PID" &>/dev/null || true
|
2019-11-14 21:03:33 +03:00
|
|
|
fi
|
2019-07-12 06:59:29 +03:00
|
|
|
|
2019-11-14 21:03:33 +03:00
|
|
|
case "$MODE" in
|
|
|
|
test|postgres)
|
|
|
|
# Since scripts here are tailored to the env we've just launched:
|
|
|
|
rm -r "$DEV_SHIM_PATH"
|
2019-07-12 06:59:29 +03:00
|
|
|
|
2019-11-14 21:03:33 +03:00
|
|
|
echo_pretty "Removing $PG_CONTAINER_NAME and its volumes in 5 seconds!"
|
|
|
|
echo_pretty " PRESS CTRL-C TO ABORT removal, or ENTER to clean up right away"
|
|
|
|
read -t5 || true
|
|
|
|
docker stop "$PG_CONTAINER_NAME"
|
|
|
|
docker rm -v "$PG_CONTAINER_NAME"
|
|
|
|
;;
|
|
|
|
graphql-engine)
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
|
|
|
|
echo_pretty "Done"
|
|
|
|
}
|
|
|
|
trap cleanup EXIT
|
|
|
|
}
|
2019-07-12 06:59:29 +03:00
|
|
|
|
|
|
|
|
|
|
|
if [ "$MODE" = "postgres" ]; then
|
2019-11-14 21:03:33 +03:00
|
|
|
launch_postgres_container
|
2020-07-01 06:53:10 +03:00
|
|
|
wait_postgres
|
2019-07-12 06:59:29 +03:00
|
|
|
echo_pretty "Postgres logs will start to show up in realtime here. Press CTRL-C to exit and "
|
|
|
|
echo_pretty "shutdown this container."
|
|
|
|
echo_pretty ""
|
|
|
|
echo_pretty "You can use the following to connect to the running instance:"
|
|
|
|
echo_pretty " $ $DOCKER_PSQL"
|
|
|
|
echo_pretty " or..."
|
|
|
|
echo_pretty " $ PGPASSWORD="$PGPASSWORD" psql -h 127.0.0.1 -p "$PG_PORT" postgres -U postgres"
|
|
|
|
echo_pretty ""
|
|
|
|
echo_pretty "Here is the database URL:"
|
2020-07-01 06:53:10 +03:00
|
|
|
echo_pretty " $CONTAINER_DB_URL"
|
2019-07-12 06:59:29 +03:00
|
|
|
echo_pretty ""
|
|
|
|
echo_pretty "If you want to launch a 'graphql-engine' that works with this database:"
|
|
|
|
echo_pretty " $ $0 graphql-engine"
|
|
|
|
# Runs continuously until CTRL-C, jumping to cleanup() above:
|
|
|
|
docker logs -f --tail=0 "$PG_CONTAINER_NAME"
|
|
|
|
|
2019-10-21 19:01:05 +03:00
|
|
|
elif [ "$MODE" = "test" ]; then
|
2020-01-24 02:22:21 +03:00
|
|
|
########################################
|
|
|
|
### Integration / unit tests ###
|
|
|
|
########################################
|
2019-07-12 06:59:29 +03:00
|
|
|
cd "$PROJECT_ROOT/server"
|
|
|
|
|
2020-03-18 20:15:41 +03:00
|
|
|
# Until we can use a real webserver for TestEventFlood, limit concurrency
|
|
|
|
export HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE=8
|
|
|
|
|
2020-01-24 02:22:21 +03:00
|
|
|
# We'll get an hpc error if these exist; they will be deleted below too:
|
|
|
|
rm -f graphql-engine-tests.tix graphql-engine.tix graphql-engine-combined.tix
|
|
|
|
|
2020-09-10 12:30:34 +03:00
|
|
|
# Various tests take some configuration from the environment; set these up here:
|
2019-07-12 06:59:29 +03:00
|
|
|
export EVENT_WEBHOOK_HEADER="MyEnvValue"
|
|
|
|
export WEBHOOK_FROM_ENV="http://127.0.0.1:5592"
|
2020-05-13 15:33:16 +03:00
|
|
|
export SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN="http://127.0.0.1:5594"
|
2020-09-10 12:30:34 +03:00
|
|
|
export REMOTE_SCHEMAS_WEBHOOK_DOMAIN="http://127.0.0.1:5000"
|
2019-07-12 06:59:29 +03:00
|
|
|
|
2020-01-24 02:22:21 +03:00
|
|
|
# It's better UX to build first (possibly failing) before trying to launch
|
|
|
|
# PG, but make sure that new-run uses the exact same build plan, else we risk
|
|
|
|
# rebuilding twice... ugh
|
|
|
|
cabal new-build --project-file=cabal.project.dev-sh exe:graphql-engine test:graphql-engine-tests
|
2019-11-14 21:03:33 +03:00
|
|
|
launch_postgres_container
|
2020-07-01 06:53:10 +03:00
|
|
|
wait_postgres
|
2019-11-14 21:03:33 +03:00
|
|
|
|
2019-11-14 21:33:30 +03:00
|
|
|
# These also depend on a running DB:
|
|
|
|
if [ "$RUN_UNIT_TESTS" = true ]; then
|
|
|
|
echo_pretty "Running Haskell test suite"
|
2020-07-01 06:53:10 +03:00
|
|
|
HASURA_GRAPHQL_DATABASE_URL="$CONTAINER_DB_URL" cabal new-run --project-file=cabal.project.dev-sh -- test:graphql-engine-tests
|
2019-11-14 19:32:11 +03:00
|
|
|
fi
|
2019-11-14 21:33:30 +03:00
|
|
|
|
|
|
|
if [ "$RUN_INTEGRATION_TESTS" = true ]; then
|
|
|
|
GRAPHQL_ENGINE_TEST_LOG=/tmp/hasura-dev-test-engine.log
|
2020-01-06 22:13:10 +03:00
|
|
|
echo_pretty "Starting graphql-engine, logging to $GRAPHQL_ENGINE_TEST_LOG"
|
2019-11-14 21:33:30 +03:00
|
|
|
export HASURA_GRAPHQL_SERVER_PORT=8088
|
2020-07-01 06:53:10 +03:00
|
|
|
cabal new-run --project-file=cabal.project.dev-sh -- exe:graphql-engine --database-url="$CONTAINER_DB_URL" serve --stringify-numeric-types \
|
2020-01-18 01:07:15 +03:00
|
|
|
--enable-console --console-assets-dir ../console/static/dist \
|
|
|
|
&> "$GRAPHQL_ENGINE_TEST_LOG" & GRAPHQL_ENGINE_PID=$!
|
|
|
|
|
2019-11-14 21:33:30 +03:00
|
|
|
echo -n "Waiting for graphql-engine"
|
|
|
|
until curl -s "http://127.0.0.1:$HASURA_GRAPHQL_SERVER_PORT/v1/query" &>/dev/null; do
|
|
|
|
echo -n '.' && sleep 0.2
|
2020-01-06 22:13:10 +03:00
|
|
|
# If the server stopped abort immediately
|
|
|
|
if ! kill -0 $GRAPHQL_ENGINE_PID ; then
|
|
|
|
echo_error "The server crashed or failed to start!!"
|
|
|
|
exit 666
|
|
|
|
fi
|
2019-11-14 21:33:30 +03:00
|
|
|
done
|
|
|
|
echo " Ok"
|
|
|
|
|
|
|
|
cd "$PROJECT_ROOT/server/tests-py"
|
2020-05-27 18:02:58 +03:00
|
|
|
|
|
|
|
## Install misc test dependencies:
|
|
|
|
if [ ! -d "node_modules" ]; then
|
|
|
|
npm_config_loglevel=error npm install remote_schemas/nodejs/
|
|
|
|
else
|
|
|
|
echo_pretty "It looks like node dependencies have been installed already. Skipping."
|
|
|
|
echo_pretty "If things fail please run this and try again"
|
|
|
|
echo_pretty " $ rm -r \"$PROJECT_ROOT/server/tests-py/node_modules\""
|
|
|
|
fi
|
|
|
|
|
|
|
|
### Check for and install dependencies in venv
|
2019-11-14 21:33:30 +03:00
|
|
|
PY_VENV=.hasura-dev-python-venv
|
|
|
|
DEVSH_VERSION_FILE=.devsh_version
|
|
|
|
# Do we need to force reinstall?
|
|
|
|
if [ "$DEVSH_VERSION" = "$(cat $DEVSH_VERSION_FILE 2>/dev/null || true)" ]; then
|
|
|
|
true # ok
|
2019-11-14 19:32:11 +03:00
|
|
|
else
|
2019-12-04 01:36:38 +03:00
|
|
|
echo_warn 'dev.sh version was bumped or fresh install. Forcing reinstallation of dependencies.'
|
|
|
|
rm -rf "$PY_VENV"
|
2019-11-14 21:33:30 +03:00
|
|
|
echo "$DEVSH_VERSION" > "$DEVSH_VERSION_FILE"
|
2019-11-14 19:32:11 +03:00
|
|
|
fi
|
2019-11-14 21:33:30 +03:00
|
|
|
set +u # for venv activate
|
|
|
|
if [ ! -d "$PY_VENV" ]; then
|
|
|
|
python3 -m venv "$PY_VENV"
|
|
|
|
source "$PY_VENV/bin/activate"
|
|
|
|
pip3 install wheel
|
|
|
|
# If the maintainer of this script or pytests needs to change dependencies:
|
|
|
|
# - alter requirements-top-level.txt as needed
|
|
|
|
# - delete requirements.txt
|
|
|
|
# - run this script, then check in the new frozen requirements.txt
|
|
|
|
if [ -f requirements.txt ]; then
|
|
|
|
pip3 install -r requirements.txt
|
|
|
|
else
|
|
|
|
pip3 install -r requirements-top-level.txt
|
|
|
|
pip3 freeze > requirements.txt
|
|
|
|
fi
|
|
|
|
else
|
|
|
|
echo_pretty "It looks like python dependencies have been installed already. Skipping."
|
|
|
|
echo_pretty "If things fail please run this and try again"
|
|
|
|
echo_pretty " $ rm -r \"$PROJECT_ROOT/server/tests-py/$PY_VENV\""
|
2019-07-12 06:59:29 +03:00
|
|
|
|
2019-11-14 21:33:30 +03:00
|
|
|
source "$PY_VENV/bin/activate"
|
|
|
|
fi
|
2019-07-12 06:59:29 +03:00
|
|
|
|
2019-11-14 19:32:11 +03:00
|
|
|
|
2019-11-14 21:33:30 +03:00
|
|
|
# TODO MAYBE: fix deprecation warnings, make them an error
|
2020-07-01 06:53:10 +03:00
|
|
|
if pytest -W ignore::DeprecationWarning --hge-urls http://127.0.0.1:$HASURA_GRAPHQL_SERVER_PORT --pg-urls "$CONTAINER_DB_URL" $PYTEST_ARGS; then
|
2019-11-14 21:33:30 +03:00
|
|
|
PASSED=true
|
|
|
|
else
|
|
|
|
PASSED=false
|
2020-01-06 22:13:10 +03:00
|
|
|
echo_error "^^^ graphql-engine logs from failed test run can be inspected at: $GRAPHQL_ENGINE_TEST_LOG"
|
2019-11-14 21:33:30 +03:00
|
|
|
fi
|
|
|
|
deactivate # python venv
|
|
|
|
set -u
|
2019-10-21 19:01:05 +03:00
|
|
|
|
2019-11-14 21:33:30 +03:00
|
|
|
cd "$PROJECT_ROOT/server"
|
2020-01-24 02:22:21 +03:00
|
|
|
# Kill the cabal new-run and its children. INT so we get hpc report:
|
|
|
|
pkill -INT -P "$GRAPHQL_ENGINE_PID"
|
2019-11-14 21:33:30 +03:00
|
|
|
wait "$GRAPHQL_ENGINE_PID" || true
|
|
|
|
echo
|
2020-01-24 02:22:21 +03:00
|
|
|
fi # RUN_INTEGRATION_TESTS
|
|
|
|
|
2020-10-16 14:55:18 +03:00
|
|
|
if [ "$RUN_HLINT" = true ]; then
|
|
|
|
|
|
|
|
cd "$PROJECT_ROOT/server"
|
2020-10-28 19:40:33 +03:00
|
|
|
hlint src-*
|
2020-10-16 14:55:18 +03:00
|
|
|
|
|
|
|
fi # RUN_HLINT
|
|
|
|
|
2020-05-13 15:33:16 +03:00
|
|
|
# If hpc available, combine any tix from haskell/unit tests:
|
2020-01-24 02:22:21 +03:00
|
|
|
if command -v hpc >/dev/null; then
|
2020-05-13 15:33:16 +03:00
|
|
|
if [ "$RUN_UNIT_TESTS" = true ] && [ "$RUN_INTEGRATION_TESTS" = true ]; then
|
2020-01-24 02:22:21 +03:00
|
|
|
# As below, it seems we variously get errors related to having two Main
|
|
|
|
# modules, so exclude:
|
|
|
|
hpc combine --exclude=Main graphql-engine-tests.tix graphql-engine.tix --union > graphql-engine-combined.tix
|
|
|
|
else
|
|
|
|
# One of these should exist
|
|
|
|
cp graphql-engine-tests.tix graphql-engine-combined.tix 2>/dev/null || true
|
|
|
|
cp graphql-engine.tix graphql-engine-combined.tix 2>/dev/null || true
|
|
|
|
fi
|
|
|
|
# Generate a report including the test code itself (see cabal.project.dev-sh.local):
|
|
|
|
# NOTE: we have to omit the .mix directory for the executable, since it
|
|
|
|
# seems hpc can't cope with two modules of the same name; '--exclude'
|
|
|
|
# didn't help.
|
|
|
|
echo_pretty "Generating code coverage report..."
|
|
|
|
COVERAGE_DIR="dist-newstyle/dev.sh-coverage"
|
|
|
|
hpc markup \
|
|
|
|
--exclude=Main \
|
|
|
|
--hpcdir dist-newstyle/build/*/ghc-*/graphql-engine-*/noopt/hpc/vanilla/mix/graphql-engine-* \
|
|
|
|
--hpcdir dist-newstyle/build/*/ghc-*/graphql-engine-*/t/graphql-engine-tests/noopt/hpc/vanilla/mix/graphql-engine-tests \
|
|
|
|
--reset-hpcdirs graphql-engine-combined.tix \
|
|
|
|
--fun-entry-count \
|
|
|
|
--destdir="$COVERAGE_DIR" >/dev/null
|
|
|
|
hpc report \
|
|
|
|
--exclude=Main \
|
|
|
|
--hpcdir dist-newstyle/build/*/ghc-*/graphql-engine-*/noopt/hpc/vanilla/mix/graphql-engine-* \
|
|
|
|
--hpcdir dist-newstyle/build/*/ghc-*/graphql-engine-*/t/graphql-engine-tests/noopt/hpc/vanilla/mix/graphql-engine-tests \
|
2020-05-13 15:33:16 +03:00
|
|
|
--reset-hpcdirs graphql-engine-combined.tix
|
2020-01-24 02:22:21 +03:00
|
|
|
echo_pretty "To view full coverage report open:"
|
|
|
|
echo_pretty " file://$(pwd)/$COVERAGE_DIR/hpc_index.html"
|
|
|
|
|
|
|
|
else
|
|
|
|
echo_warn "Please install hpc to get a combined code coverage report for tests"
|
2019-11-14 21:33:30 +03:00
|
|
|
fi
|
2020-01-24 02:22:21 +03:00
|
|
|
rm -f graphql-engine-tests.tix graphql-engine.tix graphql-engine-combined.tix
|
|
|
|
|
2019-07-12 06:59:29 +03:00
|
|
|
else
|
|
|
|
echo "impossible; fix script."
|
|
|
|
fi
|