graphql-engine/.circleci/config.yml

559 lines
16 KiB
YAML
Raw Normal View History

2018-07-10 13:01:02 +03:00
# anchor refs to be used elsewhere
refs:
skip_job_on_ciignore: &skip_job_on_ciignore
run:
name: checking if job should be terminated or not
command: |
if [ -f /build/ciignore/skip_job.txt ]; then
echo "halting job due to /build/ciignore/skip_job.txt"
circleci-agent step halt
else
echo "no skip_job file present, full steam ahead"
fi
2018-07-10 13:01:02 +03:00
wait_for_postgres: &wait_for_postgres
run:
name: waiting for postgres to be ready
command: |
for i in `seq 1 60`;
do
nc -z localhost 5432 && echo Success && exit 0
echo -n .
sleep 1
done
echo Failed waiting for Postgres && exit 1
wait_for_hge: &wait_for_hge
run:
name: waiting for graphql-engine to be ready
command: |
for i in `seq 1 60`;
do
nc -z localhost 8080 && echo Success && exit 0
echo -n .
sleep 1
done
echo Failed waiting for graphql-engine && exit 1
2018-07-10 13:01:02 +03:00
filter_only_vtags: &filter_only_vtags
filters:
tags:
only: /^v.*/
filter_only_release_branches: &filter_only_release_branches
filters:
branches:
only: /^release-v.*/
filter_only_dev_branches: &filter_only_dev_branches
filters:
branches:
only: /^dev.*/
2018-07-11 11:37:34 +03:00
filter_only_vtags_dev_release_branches: &filter_only_vtags_dev_release_branches
filters:
2018-07-11 11:37:34 +03:00
tags:
only: /^v.*/
branches:
only: /^(dev|release).*/
2018-07-10 13:01:02 +03:00
filter_ignore_branches: &filter_ignore_branches
filters:
branches:
ignore: /.*/
filter_ignore_dev_release_branches: &filter_ignore_dev_release_branches
2018-07-10 13:01:02 +03:00
filters:
branches:
ignore: /^(dev|release).*/
setup_remote_docker: &setup_remote_docker
setup_remote_docker:
version: 17.09.0-ce
docker_layer_caching: true
# ref pg environment for testing
test_pg_env: &test_pg_env
environment:
POSTGRES_USER: gql_test
POSTGRES_DB: gql_test
# ref test server job
test_server: &test_server
2018-09-18 09:21:57 +03:00
working_directory: ~/graphql-engine
steps:
- attach_workspace:
at: /build
- *skip_job_on_ciignore
2018-09-18 09:21:57 +03:00
- checkout
- *wait_for_postgres
- run:
name: Install deps
# if the man directories are missing, postgresql-client fails
# to install in debian
command: |
mkdir -p /usr/share/man/man{1,7}
apt-get update
apt install --yes pgbouncer jq curl postgresql-client-12
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
- run:
name: Ensure databases are present
environment:
# sqlalchemy throws warnings with postgres://
DATABASE_URL: 'postgresql://gql_test:@localhost:5432/gql_test'
run graphql tests on both http and websocket; add parallelism (close #1868) (#1921) Examples 1) ` pytest --hge-urls "http://127.0.0.1:8080" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" -vv ` 2) `pytest --hge-urls "http://127.0.0.1:8080" "http://127.0.0.1:8081" --pg-urls "postgresql://admin@127.0.0.1:5432/hge_tests" "postgresql://admin@127.0.0.1:5432/hge_tests2" -vv ` ### Solution and Design <!-- How is this issue solved/fixed? What is the design? --> <!-- It's better if we elaborate --> #### Reducing execution time of tests - The Schema setup and teardown, which were earlier done per test method, usually takes around 1 sec. - For mutations, the model has now been changed to only do schema setup and teardown once per test class. - A data setup and teardown will be done once per test instead (usually takes ~10ms). - For the test class to get this behaviour, one can can extend the class `DefaultTestMutations`. - The function `dir()` should be define which returns the location of the configuration folder. - Inside the configuration folder, there should be - Files `<conf_dir>/schema_setup.yaml` and `<conf_dir>/schema_teardown.yaml`, which has the metadata query executed during schema setup and teardown respectively - Files named `<conf_dir>/values_setup.yaml` and `<conf_dir>/values_teardown.yaml`. These files are executed to setup and remove data from the tables respectively. #### Running Graphql queries on both http and websockets - Each GraphQL query/mutation is run on the both HTTP and websocket protocols - Pytests test parameterisation is used to achieve this - The errors over websockets are slightly different from that on HTTP - The code takes care of converting the errors in HTTP to errors in websockets #### Parallel executation of tests. - The plugin pytest-xdist helps in running tests on parallel workers. - We are using this plugin to group tests by file and run on different workers. - Parallel test worker processes operate on separate postgres databases(and separate graphql-engines connected to these databases). Thus tests on one worker will not affect the tests on the other worker. - With two workers, this decreases execution times by half, as the tests on event triggers usually takes a long time, but does not consume much CPU.
2019-04-08 10:22:38 +03:00
command: |
psql "$DATABASE_URL" -c "SELECT 1 FROM pg_database WHERE datname = 'gql_test2'" | grep -q -F '(1 row)' || psql "$DATABASE_URL" -c 'CREATE DATABASE gql_test2;'
2018-09-18 09:21:57 +03:00
- run:
name: Run tests
2018-09-18 09:21:57 +03:00
environment:
# Setting default number of threads to 2
# since circleci allocates 2 cpus per test container
GHCRTS: -N2
HASURA_GRAPHQL_DATABASE_URL: postgresql://gql_test:@localhost:5432/gql_test
HASURA_GRAPHQL_DATABASE_URL_2: postgresql://gql_test:@localhost:5432/gql_test2
GRAPHQL_ENGINE: /build/_server_output/graphql-engine
GRAPHQL_ENGINE_TESTS: /build/_server_output/graphql-engine-tests
MIX_FILES_FOLDER: /build/_server_output/mix
command: OUTPUT_FOLDER="/build/_server_test_output/$PG_VERSION" .circleci/test-server.sh
2018-09-18 09:21:57 +03:00
- store_artifacts:
path: /build/_server_test_output
destination: server_test
2018-07-10 13:01:02 +03:00
2018-06-29 16:09:21 +03:00
version: 2
jobs:
# check if this should be built or not, fails if
# changes only contains files in .ciignore
check_build_worthiness:
docker:
- image: hasura/graphql-engine-cli-builder:20191205
working_directory: ~/graphql-engine
steps:
- attach_workspace:
at: /build
- checkout
- run:
name: check build worthiness
command: .circleci/ciignore.sh
- persist_to_workspace:
root: /build
paths:
- ciignore
build_server:
2018-06-29 16:09:21 +03:00
docker:
- image: hasura/graphql-engine-server-builder:2020-01-14
resource_class: large
working_directory: ~/graphql-engine
2018-06-29 16:09:21 +03:00
steps:
- attach_workspace:
at: /build
- *skip_job_on_ciignore
2018-07-10 13:01:02 +03:00
- checkout
- restore_cache:
keys:
- cabal-store-v1-{{ checksum "server/cabal.project" }}-{{ checksum "server/graphql-engine.cabal" }}-{{ checksum "server/cabal.project.freeze" }}
- cabal-store-v1-{{ checksum "server/cabal.project" }}-{{ checksum "server/graphql-engine.cabal" }}-
- cabal-store-v1-{{ checksum "server/cabal.project" }}-
- cabal-store-v1-
- run:
name: Install latest postgresql client tools
command: |
apt-get -y update
apt-get -y install postgresql-client-12
2018-07-10 13:01:02 +03:00
- run:
name: Build the binary
2018-07-10 13:01:02 +03:00
working_directory: ./server
command: |
# for PRs non-optimized build, else optimized build
if [[ "$CIRCLE_BRANCH" =~ ^(dev|release) || "$CIRCLE_TAG" =~ ^v ]]; then
echo "Branch starts with dev or release, or tagged commit starts with v. Optimized build"
make ci-build
else
echo "Non-release branch, build with coverage"
make enable_coverage=true ci-build
fi
- save_cache:
key: cabal-store-v1-{{ checksum "server/cabal.project" }}-{{ checksum "server/graphql-engine.cabal" }}-{{ checksum "server/cabal.project.freeze" }}
paths:
- ~/.cabal/packages
- ~/.cabal/store
- store_artifacts:
path: /build/_server_output
destination: server
- persist_to_workspace:
root: /build
paths: [_server_output]
build_image:
docker:
- image: hasura/graphql-engine-server-builder:2020-01-14
working_directory: ~/graphql-engine
steps:
- attach_workspace:
at: /build
- *skip_job_on_ciignore
- checkout
- *setup_remote_docker
- run:
name: Build the docker image
working_directory: ./server
command: |
# copy console assets to the rootfs - packaging/build/rootfs
export ROOTFS=packaging/build/rootfs
mkdir -p "$ROOTFS/srv"
cp -r /build/_console_output/assets "$ROOTFS/srv/console-assets"
# build and save the image
2018-07-10 13:01:02 +03:00
make ci-image
make ci-save-image
- store_artifacts:
path: /build/_server_output/image.tar
destination: server/image.tar
2018-07-10 13:01:02 +03:00
- persist_to_workspace:
root: /build
paths: [_server_output/image.tar]
# job to execute when all server tests pass. later we can collect test
# reports and publish them etc.
all_server_tests_pass:
docker:
- image: alpine:edge
steps:
- run:
name: All server tests passed
command: echo 'all server tests passed!'
2018-09-18 09:21:57 +03:00
# pytest the server with postgres versions >= 9.5
test_server_pg_12:
<<: *test_server
2018-09-18 09:21:57 +03:00
environment:
PG_VERSION: "12"
2019-12-19 04:32:15 +03:00
POSTGIS_VERSION: "3.0.0"
2018-09-18 09:21:57 +03:00
docker:
- image: hasura/graphql-engine-server-builder:2020-01-14
2019-12-19 04:32:15 +03:00
- image: hasura/postgres-12.0-alpine-postgis3:6cbd863d47c0
<<: *test_pg_env
test_server_pg_11:
<<: *test_server
environment:
PG_VERSION: "11"
docker:
- image: hasura/graphql-engine-server-builder:2020-01-14
- image: circleci/postgres:11-alpine-postgis
<<: *test_pg_env
test_server_pg_10:
<<: *test_server
environment:
PG_VERSION: "10"
docker:
- image: hasura/graphql-engine-server-builder:2020-01-14
- image: circleci/postgres:10-alpine-postgis
2018-09-18 09:21:57 +03:00
<<: *test_pg_env
test_server_pg_9.6:
<<: *test_server
2018-09-18 09:21:57 +03:00
environment:
PG_VERSION: "9_6"
docker:
- image: hasura/graphql-engine-server-builder:2020-01-14
- image: circleci/postgres:9.6-alpine-postgis
2018-09-18 09:21:57 +03:00
<<: *test_pg_env
test_server_pg_9.5:
<<: *test_server
2018-09-18 09:21:57 +03:00
environment:
PG_VERSION: "9_5"
docker:
- image: hasura/graphql-engine-server-builder:2020-01-14
- image: circleci/postgres:9.5-alpine-postgis
2018-09-18 09:21:57 +03:00
<<: *test_pg_env
server_unit_tests:
resource_class: large
docker:
- image: hasura/graphql-engine-server-builder:2020-01-14
working_directory: ~/graphql-engine
steps:
- attach_workspace:
at: /build
- *skip_job_on_ciignore
- checkout
- run:
name: Run unit tests
environment:
GHCRTS: -N2
command: /build/_server_output/graphql-engine-tests unit
test_cli_with_last_release:
docker:
- image: hasura/graphql-engine-cli-builder:20191205
- image: circleci/postgres:10-alpine
environment:
POSTGRES_USER: gql_test
POSTGRES_DB: gql_test
working_directory: ~/graphql-engine
steps:
- attach_workspace:
at: /build
- *skip_job_on_ciignore
- checkout
- restore_cache:
keys:
- cli-gopkg-{{ checksum "cli/go.mod" }}-{{ checksum "cli/go.sum" }}
- run:
name: get cli dependencies
working_directory: cli
command: make deps
- save_cache:
key: cli-gopkg-{{ checksum "cli/go.mod" }}-{{ checksum "cli/go.sum" }}
paths:
- /go/pkg
- *wait_for_postgres
- run:
name: test cli
command: .circleci/test-cli-with-last-release.sh
- store_artifacts:
path: /build/_cli_output
destination: cli
2018-07-10 13:01:02 +03:00
# test and build cli
test_and_build_cli:
2018-06-29 16:09:21 +03:00
docker:
- image: hasura/graphql-engine-cli-builder:20191205
2018-07-10 13:01:02 +03:00
- image: circleci/postgres:10-alpine
environment:
POSTGRES_USER: gql_test
POSTGRES_DB: gql_test
working_directory: ~/graphql-engine
2018-06-29 16:09:21 +03:00
steps:
2018-07-10 13:01:02 +03:00
- attach_workspace:
at: /build
- *skip_job_on_ciignore
- checkout
2018-06-29 16:09:21 +03:00
- restore_cache:
keys:
- cli-gopkg-{{ checksum "cli/go.mod" }}-{{ checksum "cli/go.sum" }}
2018-06-29 16:09:21 +03:00
- run:
name: get cli dependencies
2018-07-10 13:01:02 +03:00
working_directory: cli
2018-06-29 16:09:21 +03:00
command: make deps
- save_cache:
key: cli-gopkg-{{ checksum "cli/go.mod" }}-{{ checksum "cli/go.sum" }}
2018-06-29 16:09:21 +03:00
paths:
- /go/pkg
2018-07-10 13:01:02 +03:00
- *wait_for_postgres
- run:
name: test cli
command: .circleci/test-cli.sh
2018-06-29 16:09:21 +03:00
- run:
name: build cli
2018-07-10 13:01:02 +03:00
working_directory: cli
2018-06-29 16:09:21 +03:00
command: |
make build
make compress
2018-07-10 13:01:02 +03:00
make ci-copy-binary
2018-06-29 16:09:21 +03:00
- store_artifacts:
2018-07-10 13:01:02 +03:00
path: /build/_cli_output
destination: cli
- persist_to_workspace:
2018-07-10 13:01:02 +03:00
root: /build
paths:
2018-07-10 13:01:02 +03:00
- _cli_output
# build console assets
build_console:
2018-07-10 13:01:02 +03:00
docker:
- image: hasura/graphql-engine-console-builder:20190515
2018-07-10 13:01:02 +03:00
working_directory: ~/graphql-engine
steps:
- attach_workspace:
at: /build
- *skip_job_on_ciignore
- checkout
2018-07-10 13:01:02 +03:00
- restore_cache:
key:
2018-07-10 13:01:02 +03:00
console-npm-cache-{{ checksum "console/package.json" }}-{{ checksum "console/package-lock.json" }}
- run:
name: install dependencies
working_directory: console
command: make ci-deps
- save_cache:
key:
2018-07-10 13:01:02 +03:00
console-npm-cache-{{ checksum "console/package.json" }}-{{ checksum "console/package-lock.json" }}
paths:
- console/node_modules
- ~/.npm
- ~/.cache
- run:
name: build console
working_directory: console
command: |
make build
make ci-copy-assets
- run:
name: setup assets directory
command: |
export ASSETS_PATH=/build/_console_output/assets
mkdir -p "$ASSETS_PATH"
gsutil -m cp -r gs://graphql-engine-cdn.hasura.io/console/assets/common "$ASSETS_PATH"
# gsutil decompresses files automatically, need to compress font-awesome again
# (see https://github.com/GoogleCloudPlatform/gsutil/issues/515)
mv "$ASSETS_PATH/common/css/font-awesome.min.css.gz" "$ASSETS_PATH/common/css/font-awesome.min.css"
gzip "$ASSETS_PATH/common/css/font-awesome.min.css"
# copy versioned assets and compress them
mkdir -p "$ASSETS_PATH/versioned"
cp "$ASSETS_PATH"/../{main.js,main.css,vendor.js} "$ASSETS_PATH/versioned/"
gzip -r "$ASSETS_PATH/versioned/"
2018-07-10 13:01:02 +03:00
- store_artifacts:
path: /build/_console_output
destination: console
- persist_to_workspace:
root: /build
paths:
- _console_output
# test console
test_console:
docker:
- image: hasura/graphql-engine-console-builder:v0.3
environment:
CYPRESS_KEY: 983be0db-0f19-40cc-bfc4-194fcacd85e1
GHCRTS: -N1
- image: circleci/postgres:10-alpine-postgis
environment:
POSTGRES_USER: gql_test
POSTGRES_DB: gql_test
working_directory: ~/graphql-engine
parallelism: 4
steps:
- attach_workspace:
at: /build
- *skip_job_on_ciignore
- checkout
- restore_cache:
key:
console-npm-cache-{{ checksum "console/package.json" }}-{{ checksum "console/package-lock.json" }}
- run:
name: install dependencies
working_directory: console
command: make ci-deps
- save_cache:
key:
console-npm-cache-{{ checksum "console/package.json" }}-{{ checksum "console/package-lock.json" }}
paths:
- console/node_modules
- ~/.npm
- ~/.cache
- *wait_for_postgres
- run:
name: test console
command: .circleci/test-console.sh
# test server upgrade from last version to current build
test_server_upgrade:
docker:
- image: hasura/graphql-engine-upgrade-tester:v0.4
environment:
HASURA_GRAPHQL_DATABASE_URL: postgresql://gql_test:@localhost:5432/gql_test
- image: circleci/postgres:10-alpine
environment:
POSTGRES_USER: gql_test
POSTGRES_DB: gql_test
working_directory: ~/graphql-engine
steps:
- attach_workspace:
at: /build
- *skip_job_on_ciignore
- checkout
- run:
name: upgrade_test
command: .circleci/server-upgrade/run.sh
- store_artifacts:
path: /build/_server_output
destination: server
2018-07-10 13:01:02 +03:00
deploy:
docker:
- image: hasura/graphql-engine-deployer:v0.3
2018-07-10 13:01:02 +03:00
working_directory: ~/graphql-engine
steps:
- attach_workspace:
at: /build
- *skip_job_on_ciignore
- *setup_remote_docker
- checkout
2018-07-10 13:01:02 +03:00
- run:
name: deploy
command: .circleci/deploy.sh
2018-06-29 16:09:21 +03:00
workflows:
version: 2
workflow_v20190516:
2018-06-29 16:09:21 +03:00
jobs:
- check_build_worthiness: *filter_only_vtags
- build_console:
<<: *filter_only_vtags
requires:
- check_build_worthiness
- build_server:
<<: *filter_only_vtags
requires:
- check_build_worthiness
- build_image:
<<: *filter_only_vtags
requires:
- build_server
- build_console
- test_server_pg_12:
<<: *filter_only_vtags
requires:
- build_server
- test_server_pg_11:
<<: *filter_only_vtags
requires:
- build_server
- test_server_pg_10:
2018-09-18 09:21:57 +03:00
<<: *filter_only_vtags
requires:
- build_server
- test_server_pg_9.6:
2018-09-18 09:21:57 +03:00
<<: *filter_only_vtags
requires:
- build_server
- test_server_pg_9.5:
2018-09-18 09:21:57 +03:00
<<: *filter_only_vtags
requires:
- build_server
- server_unit_tests:
<<: *filter_only_vtags
requires:
- build_server
- test_server_upgrade:
<<: *filter_only_vtags
requires:
- build_server
- all_server_tests_pass:
<<: *filter_only_vtags
requires:
- test_server_pg_12
- test_server_pg_11
- test_server_pg_10
- test_server_pg_9.6
- test_server_pg_9.5
- server_unit_tests
- test_server_upgrade
- test_cli_with_last_release:
<<: *filter_only_vtags
requires:
- check_build_worthiness
- test_and_build_cli:
2018-07-10 13:01:02 +03:00
<<: *filter_only_vtags
requires:
- build_server
- test_console:
2018-07-10 13:01:02 +03:00
<<: *filter_only_vtags
requires:
- build_console
2018-07-10 13:01:02 +03:00
- test_and_build_cli
- test_cli_with_last_release
- deploy:
2018-07-11 11:37:34 +03:00
<<: *filter_only_vtags_dev_release_branches
2018-07-10 13:01:02 +03:00
requires:
- build_image
- test_console
- all_server_tests_pass