graphql-engine/server/benchmarks/benchmark_sets/remote_schema/config.query.yaml
Brandon Simmons fc9cc14943 ci/benchmarks: don't measure decompression in k6 (NOT REAL PERFORMANCE IMPROVEMENT)
See: https://github.com/grafana/k6/issues/2685

It might be interesting to think about taking into consideration decompression time when thinking about performance, but In general I think doing so is surprising and I wasted a lot of time trying to figure out why my optimizations to the compression codepath weren't improving things  to the degree I expected

The downside here is we lose error reporting, so you'll need to only set
discardResponseBodies: true after the query has been tested.

PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5940
GitOrigin-RevId: 82a589a59b93f10ffb5391e4a3190459fb6e613b
2022-09-21 02:07:54 +00:00

238 lines
6.5 KiB
YAML

# This tells graphql-bench that it's testing a hasura instance and should
# collect some additional metrics:
extended_hasura_checks: true
headers:
Accept-Encoding: gzip
# Anchors to help us DRY below; settings here may be overridden selectively
constants:
scalars:
- &very_low_load 1
- &low_load 20
- &high_load 500
k6_custom: &k6_custom
tools: [k6]
execution_strategy: CUSTOM
settings: &settings
# This is equivalent to wrk2's approach:
executor: 'constant-arrival-rate'
timeUnit: '1s'
maxVUs: 500 # NOTE: required, else defaults to `preAllocatedVUs`
duration: '60s'
queries:
############################################################################
# A very simple query returning no rows. A baseline for the benchmark below.
- name: small_query_low_load
<<: *k6_custom
options:
k6:
# See note on Chinook.simple_query_low_load
discardResponseBodies: true
scenarios:
main:
<<: *settings
rate: *low_load
# tune this so it's just high enough that we can expect to not need
# to allocate during the test:
preAllocatedVUs: 10
query: |
query MyQuery {
heroes {
name
id
__typename
}
}
- name: small_query_low_load_customized
<<: *k6_custom
options:
k6:
# See note on Chinook.simple_query_low_load
discardResponseBodies: true
scenarios:
main:
<<: *settings
rate: *low_load
# tune this so it's just high enough that we can expect to not need
# to allocate during the test:
preAllocatedVUs: 10
query: |
query MyQuery {
foo {
heroes {
foo_name
foo_id
__typename
}
}
}
- name: big_query_low_load
<<: *k6_custom
options:
k6:
# See note on Chinook.simple_query_low_load
discardResponseBodies: true
scenarios:
main:
<<: *settings
rate: *very_low_load
# tune this so it's just high enough that we can expect to not need
# to allocate during the test:
preAllocatedVUs: 10
query: |
query MyQuery {
start {
hello
__typename
big {
hello
... on Big {
hello
big {
hello
__typename
... on Big {
hello
__typename
big {
hello
__typename
... on Big {
hello
__typename
big {
hello
__typename
... on Big {
hello
big {
hello
__typename
}
many {
hello
}
}
}
}
}
}
}
}
}
many {
... on Big {
hello
__typename
many {
... on Big {
hello
__typename
many {
... on Big {
hello
__typename
many {
... on Big {
hello
__typename
}
}
}
}
}
}
}
}
}
}
- name: big_query_low_load_customized
<<: *k6_custom
options:
k6:
# See note on Chinook.simple_query_low_load
discardResponseBodies: true
scenarios:
main:
<<: *settings
rate: *very_low_load
# tune this so it's just high enough that we can expect to not need
# to allocate during the test:
preAllocatedVUs: 10
query: |
query MyQuery {
big_foo {
start {
foo_hello
__typename
foo_big {
foo_hello
... on foo_Big_bar {
foo_hello
foo_big {
foo_hello
__typename
... on foo_Big_bar {
foo_hello
__typename
foo_big {
foo_hello
__typename
... on foo_Big_bar {
foo_hello
__typename
foo_big {
foo_hello
__typename
... on foo_Big_bar {
foo_hello
foo_big {
foo_hello
__typename
}
foo_many {
foo_hello
}
}
}
}
}
}
}
}
}
foo_many {
... on foo_Big_bar {
foo_hello
__typename
foo_many {
... on foo_Big_bar {
foo_hello
__typename
foo_many {
... on foo_Big_bar {
foo_hello
__typename
foo_many {
... on foo_Big_bar {
foo_hello
__typename
}
}
}
}
}
}
}
}
}
}
}