2018-04-10 17:10:46 +03:00
|
|
|
#!/bin/bash
|
|
|
|
|
|
|
|
print_help () {
|
2018-04-25 11:51:02 +03:00
|
|
|
echo "Usage: $0 "
|
2019-08-08 08:16:18 +03:00
|
|
|
echo " [--benchmarks <all|linear|linear-async|linear-rate|nested|fileio|array|base>]"
|
2018-11-02 18:35:21 +03:00
|
|
|
echo " [--group-diff]"
|
2018-09-23 11:42:23 +03:00
|
|
|
echo " [--graphs]"
|
|
|
|
echo " [--no-measure]"
|
|
|
|
echo " [--append] "
|
2018-11-02 18:35:21 +03:00
|
|
|
echo " [--compare] [--base commit] [--candidate commit]"
|
|
|
|
echo " [--slow]"
|
2018-04-25 11:51:02 +03:00
|
|
|
echo " -- <gauge options>"
|
2018-04-15 17:16:04 +03:00
|
|
|
echo
|
2018-10-13 02:15:29 +03:00
|
|
|
echo "Multiple benchmarks can be specified as a space separate list"
|
|
|
|
echo " e.g. --benchmarks \"linear nested\""
|
|
|
|
echo
|
2018-11-02 18:35:21 +03:00
|
|
|
echo "--group-diff is used to compare groups within a single benchmark"
|
|
|
|
echo " e.g. StreamD vs StreamK in base benchmark."
|
|
|
|
echo
|
2018-10-10 09:06:07 +03:00
|
|
|
echo "When using --compare, by default comparative chart of HEAD^ vs HEAD"
|
|
|
|
echo "commit is generated, in the 'charts' directory."
|
|
|
|
echo "Use --base and --candidate to select the commits to compare."
|
|
|
|
echo
|
2018-04-10 17:10:46 +03:00
|
|
|
echo "Any arguments after a '--' are passed directly to guage"
|
|
|
|
exit
|
|
|
|
}
|
|
|
|
|
|
|
|
# $1: message
|
|
|
|
die () {
|
|
|
|
>&2 echo -e "Error: $1"
|
|
|
|
exit 1
|
|
|
|
}
|
|
|
|
|
2018-09-23 11:42:23 +03:00
|
|
|
set_benchmarks() {
|
|
|
|
if test -z "$BENCHMARKS"
|
|
|
|
then
|
|
|
|
BENCHMARKS=$DEFAULT_BENCHMARKS
|
2018-10-13 02:15:29 +03:00
|
|
|
elif test "$BENCHMARKS" = "all"
|
|
|
|
then
|
|
|
|
BENCHMARKS=$ALL_BENCHMARKS
|
2018-09-23 11:42:23 +03:00
|
|
|
fi
|
2018-10-13 02:15:29 +03:00
|
|
|
echo "Using benchmark suites [$BENCHMARKS]"
|
2018-09-23 11:42:23 +03:00
|
|
|
}
|
2018-04-10 17:10:46 +03:00
|
|
|
|
2018-09-23 11:42:23 +03:00
|
|
|
# $1: benchmark name (linear, nested, base)
|
|
|
|
find_report_prog() {
|
2018-10-09 10:18:53 +03:00
|
|
|
local prog_name="chart"
|
2018-09-23 11:42:23 +03:00
|
|
|
hash -r
|
2019-06-18 21:09:37 +03:00
|
|
|
local prog_path=$($WHICH_COMMAND $prog_name)
|
2018-09-23 11:42:23 +03:00
|
|
|
if test -x "$prog_path"
|
|
|
|
then
|
|
|
|
echo $prog_path
|
|
|
|
else
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
}
|
2018-04-10 17:10:46 +03:00
|
|
|
|
2018-09-23 11:42:23 +03:00
|
|
|
# $1: benchmark name (linear, nested, base)
|
|
|
|
build_report_prog() {
|
2018-10-09 10:18:53 +03:00
|
|
|
local prog_name="chart"
|
2019-06-18 21:09:37 +03:00
|
|
|
local prog_path=$($WHICH_COMMAND $prog_name)
|
2018-05-13 09:00:57 +03:00
|
|
|
|
2018-09-23 11:42:23 +03:00
|
|
|
hash -r
|
|
|
|
if test ! -x "$prog_path" -a "$BUILD_ONCE" = "0"
|
|
|
|
then
|
|
|
|
echo "Building bench-graph executables"
|
|
|
|
BUILD_ONCE=1
|
2019-06-18 21:09:37 +03:00
|
|
|
$BUILD_CHART_EXE || die "build failed"
|
2018-09-23 11:42:23 +03:00
|
|
|
elif test ! -x "$prog_path"
|
|
|
|
then
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
return 0
|
|
|
|
}
|
2018-04-10 17:10:46 +03:00
|
|
|
|
2018-09-23 11:42:23 +03:00
|
|
|
build_report_progs() {
|
|
|
|
if test "$RAW" = "0"
|
2018-04-10 17:10:46 +03:00
|
|
|
then
|
2018-10-09 10:18:53 +03:00
|
|
|
build_report_prog || exit 1
|
2018-09-23 11:42:23 +03:00
|
|
|
local prog
|
2018-10-09 10:18:53 +03:00
|
|
|
prog=$(find_report_prog) || \
|
|
|
|
die "Cannot find bench-graph executable"
|
2018-09-23 11:42:23 +03:00
|
|
|
echo "Using bench-graph executable [$prog]"
|
2018-04-10 17:10:46 +03:00
|
|
|
fi
|
2018-09-23 11:42:23 +03:00
|
|
|
}
|
2018-04-10 17:10:46 +03:00
|
|
|
|
|
|
|
# We run the benchmarks in isolation in a separate process so that different
|
|
|
|
# benchmarks do not interfere with other. To enable that we need to pass the
|
|
|
|
# benchmark exe path to guage as an argument. Unfortunately it cannot find its
|
|
|
|
# own path currently.
|
|
|
|
|
|
|
|
# The path is dependent on the architecture and cabal version.
|
|
|
|
# Use this command to find the exe if this script fails with an error:
|
|
|
|
# find .stack-work/ -type f -name "benchmarks"
|
|
|
|
|
2019-06-18 21:09:37 +03:00
|
|
|
stack_bench_prog () {
|
2018-09-23 11:42:23 +03:00
|
|
|
local bench_name=$1
|
2019-06-18 21:09:37 +03:00
|
|
|
local bench_prog=`stack path --dist-dir`/build/$bench_name/$bench_name
|
|
|
|
if test -x "$bench_prog"
|
|
|
|
then
|
|
|
|
echo $bench_prog
|
|
|
|
else
|
|
|
|
return 1
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
cabal_bench_prog () {
|
|
|
|
local bench_name=$1
|
|
|
|
local bench_prog=`$WHICH_COMMAND $1`
|
2018-09-23 11:42:23 +03:00
|
|
|
if test -x "$bench_prog"
|
2018-04-10 17:10:46 +03:00
|
|
|
then
|
2018-09-23 11:42:23 +03:00
|
|
|
echo $bench_prog
|
|
|
|
else
|
|
|
|
return 1
|
2018-04-10 17:10:46 +03:00
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2018-09-23 11:42:23 +03:00
|
|
|
bench_output_file() {
|
|
|
|
local bench_name=$1
|
|
|
|
echo "charts/$bench_name/results.csv"
|
|
|
|
}
|
|
|
|
|
2018-04-10 17:10:46 +03:00
|
|
|
# --min-duration 0 means exactly one iteration per sample. We use a million
|
|
|
|
# iterations in the benchmarking code explicitly and do not use the iterations
|
|
|
|
# done by the benchmarking tool.
|
|
|
|
#
|
|
|
|
# Benchmarking tool by default discards the first iteration to remove
|
|
|
|
# aberrations due to initial evaluations etc. We do not discard it because we
|
|
|
|
# are anyway doing iterations in the benchmarking code and many of them so that
|
|
|
|
# any constant factor gets amortized and anyway it is a cost that we pay in
|
|
|
|
# real life.
|
|
|
|
#
|
|
|
|
# We can pass --min-samples value from the command line as second argument
|
|
|
|
# after the benchmark name in case we want to use more than one sample.
|
|
|
|
|
2018-09-23 11:42:23 +03:00
|
|
|
run_bench () {
|
|
|
|
local bench_name=$1
|
|
|
|
local output_file=$(bench_output_file $bench_name)
|
|
|
|
local bench_prog
|
2019-06-18 21:09:37 +03:00
|
|
|
bench_prog=$($GET_BENCH_PROG $bench_name) || \
|
2018-09-23 11:42:23 +03:00
|
|
|
die "Cannot find benchmark executable for benchmark $bench_name"
|
2018-04-10 17:10:46 +03:00
|
|
|
|
2018-09-23 11:42:23 +03:00
|
|
|
mkdir -p `dirname $output_file`
|
2018-04-10 17:10:46 +03:00
|
|
|
|
2018-09-23 11:42:23 +03:00
|
|
|
echo "Running benchmark $bench_name ..."
|
|
|
|
|
|
|
|
$bench_prog $SPEED_OPTIONS \
|
|
|
|
--csvraw=$output_file \
|
2018-04-10 17:10:46 +03:00
|
|
|
-v 2 \
|
2018-09-23 11:42:23 +03:00
|
|
|
--measure-with $bench_prog $GAUGE_ARGS || die "Benchmarking failed"
|
2018-04-10 17:10:46 +03:00
|
|
|
}
|
|
|
|
|
2018-09-23 11:42:23 +03:00
|
|
|
run_benches() {
|
|
|
|
for i in $1
|
|
|
|
do
|
|
|
|
run_bench $i
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
|
|
|
run_benches_comparing() {
|
|
|
|
local bench_list=$1
|
2018-04-10 17:10:46 +03:00
|
|
|
|
|
|
|
if test -z "$CANDIDATE"
|
|
|
|
then
|
2018-04-15 17:16:04 +03:00
|
|
|
CANDIDATE=$(git rev-parse HEAD)
|
2018-04-10 17:10:46 +03:00
|
|
|
fi
|
|
|
|
if test -z "$BASE"
|
|
|
|
then
|
|
|
|
# XXX Should be where the current branch is forked from master
|
|
|
|
BASE="$CANDIDATE^"
|
|
|
|
fi
|
2018-10-13 01:38:45 +03:00
|
|
|
echo "Comparing baseline commit [$BASE] with candidate [$CANDIDATE]"
|
|
|
|
echo "Checking out base commit [$BASE] for benchmarking"
|
|
|
|
git checkout "$BASE" || die "Checkout of base commit [$BASE] failed"
|
2018-09-23 11:42:23 +03:00
|
|
|
|
2019-06-18 21:09:37 +03:00
|
|
|
$BUILD_BENCH || die "build failed"
|
2018-09-23 11:42:23 +03:00
|
|
|
run_benches "$bench_list"
|
|
|
|
|
2018-10-13 01:38:45 +03:00
|
|
|
echo "Checking out candidate commit [$CANDIDATE] for benchmarking"
|
|
|
|
git checkout "$CANDIDATE" || \
|
|
|
|
die "Checkout of candidate [$CANDIDATE] commit failed"
|
2018-09-23 11:42:23 +03:00
|
|
|
|
2019-06-18 21:09:37 +03:00
|
|
|
$BUILD_BENCH || die "build failed"
|
2018-09-23 11:42:23 +03:00
|
|
|
run_benches "$bench_list"
|
|
|
|
# XXX reset back to the original commit
|
|
|
|
}
|
|
|
|
|
|
|
|
backup_output_file() {
|
|
|
|
local bench_name=$1
|
|
|
|
local output_file=$(bench_output_file $bench_name)
|
|
|
|
|
|
|
|
if test -e $output_file -a "$APPEND" != 1
|
|
|
|
then
|
|
|
|
mv -f -v $output_file ${output_file}.prev
|
2018-04-10 17:10:46 +03:00
|
|
|
fi
|
2018-09-23 11:42:23 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
run_measurements() {
|
|
|
|
local bench_list=$1
|
|
|
|
|
|
|
|
for i in $bench_list
|
|
|
|
do
|
|
|
|
backup_output_file $i
|
|
|
|
done
|
|
|
|
|
|
|
|
if test "$COMPARE" = "0"
|
|
|
|
then
|
|
|
|
run_benches "$bench_list"
|
|
|
|
else
|
|
|
|
run_benches_comparing "$bench_list"
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2018-10-09 10:18:53 +03:00
|
|
|
run_reports() {
|
2018-09-23 11:42:23 +03:00
|
|
|
local prog
|
2018-10-09 10:18:53 +03:00
|
|
|
prog=$(find_report_prog) || \
|
|
|
|
die "Cannot find bench-graph executable"
|
2018-09-23 11:42:23 +03:00
|
|
|
echo
|
|
|
|
|
|
|
|
for i in $1
|
|
|
|
do
|
2018-10-09 10:18:53 +03:00
|
|
|
echo "Generating reports for ${i}..."
|
2018-11-02 18:35:21 +03:00
|
|
|
$prog $(test "$GRAPH" = 1 && echo "--graphs") \
|
|
|
|
$(test "$GROUP_DIFF" = 1 && echo "--group-diff") \
|
|
|
|
--benchmark $i
|
2018-09-23 11:42:23 +03:00
|
|
|
done
|
|
|
|
}
|
|
|
|
|
|
|
|
#-----------------------------------------------------------------------------
|
|
|
|
# Execution starts here
|
|
|
|
#-----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
DEFAULT_BENCHMARKS="linear"
|
2019-08-08 08:16:18 +03:00
|
|
|
ALL_BENCHMARKS="linear linear-async linear-rate nested fileio array base"
|
2018-11-02 18:35:21 +03:00
|
|
|
GROUP_DIFF=0
|
2018-09-23 11:42:23 +03:00
|
|
|
|
|
|
|
COMPARE=0
|
|
|
|
BASE=
|
|
|
|
CANDIDATE=
|
|
|
|
|
|
|
|
APPEND=0
|
|
|
|
RAW=0
|
|
|
|
GRAPH=0
|
|
|
|
MEASURE=1
|
|
|
|
SPEED_OPTIONS="--quick --min-samples 10 --time-limit 1 --min-duration 0"
|
2018-04-10 17:10:46 +03:00
|
|
|
|
2018-09-23 11:42:23 +03:00
|
|
|
GAUGE_ARGS=
|
|
|
|
BUILD_ONCE=0
|
2019-06-18 21:09:37 +03:00
|
|
|
USE_STACK=0
|
|
|
|
|
2019-06-27 12:55:20 +03:00
|
|
|
GHC_VERSION=$(ghc --numeric-version)
|
|
|
|
|
|
|
|
cabal_which() {
|
|
|
|
find dist-newstyle -type f -path "*${GHC_VERSION}*/$1"
|
|
|
|
}
|
|
|
|
|
2019-06-18 21:09:37 +03:00
|
|
|
if test "$USE_STACK" = "1"
|
|
|
|
then
|
|
|
|
WHICH_COMMAND="stack exec which"
|
|
|
|
BUILD_CHART_EXE="stack build --flag streamly:dev"
|
|
|
|
GET_BENCH_PROG=stack_bench_prog
|
|
|
|
BUILD_BENCH="stack build $STACK_BUILD_FLAGS --bench --no-run-benchmarks"
|
|
|
|
else
|
2019-06-27 12:55:20 +03:00
|
|
|
# XXX cabal issue "cabal v2-exec which" cannot find benchmark/test executables
|
|
|
|
#WHICH_COMMAND="cabal v2-exec which"
|
|
|
|
WHICH_COMMAND=cabal_which
|
2019-06-18 21:09:37 +03:00
|
|
|
BUILD_CHART_EXE="cabal v2-build --flags dev chart"
|
|
|
|
GET_BENCH_PROG=cabal_bench_prog
|
|
|
|
BUILD_BENCH="cabal v2-build $CABAL_BUILD_FLAGS --enable-benchmarks"
|
|
|
|
fi
|
2018-09-23 11:42:23 +03:00
|
|
|
|
|
|
|
#-----------------------------------------------------------------------------
|
|
|
|
# Read command line
|
|
|
|
#-----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
while test -n "$1"
|
|
|
|
do
|
|
|
|
case $1 in
|
|
|
|
-h|--help|help) print_help ;;
|
2018-11-02 18:35:21 +03:00
|
|
|
# options with arguments
|
2018-09-23 11:42:23 +03:00
|
|
|
--slow) SPEED_OPTIONS="--min-duration 0"; shift ;;
|
2018-10-13 02:15:29 +03:00
|
|
|
--benchmarks) shift; BENCHMARKS=$1; shift ;;
|
2018-09-23 11:42:23 +03:00
|
|
|
--base) shift; BASE=$1; shift ;;
|
|
|
|
--candidate) shift; CANDIDATE=$1; shift ;;
|
2018-11-02 18:35:21 +03:00
|
|
|
# flags
|
2018-09-23 11:42:23 +03:00
|
|
|
--compare) COMPARE=1; shift ;;
|
|
|
|
--raw) RAW=1; shift ;;
|
2018-11-02 18:35:21 +03:00
|
|
|
--append) APPEND=1; shift ;;
|
|
|
|
--group-diff) GROUP_DIFF=1; shift ;;
|
2018-09-23 11:42:23 +03:00
|
|
|
--graphs) GRAPH=1; shift ;;
|
|
|
|
--no-measure) MEASURE=0; shift ;;
|
|
|
|
--) shift; break ;;
|
|
|
|
-*|--*) print_help ;;
|
|
|
|
*) break ;;
|
|
|
|
esac
|
|
|
|
done
|
|
|
|
GAUGE_ARGS=$*
|
|
|
|
|
|
|
|
echo "Using stack command [$STACK]"
|
|
|
|
set_benchmarks
|
|
|
|
|
2018-11-06 03:05:59 +03:00
|
|
|
if echo "$BENCHMARKS" | grep -q base
|
|
|
|
then
|
|
|
|
STACK_BUILD_FLAGS="--flag streamly:dev"
|
2019-06-18 21:09:37 +03:00
|
|
|
CABAL_BUILD_FLAGS="--flags dev"
|
2018-11-06 03:05:59 +03:00
|
|
|
fi
|
|
|
|
|
2018-09-23 11:42:23 +03:00
|
|
|
#-----------------------------------------------------------------------------
|
|
|
|
# Build stuff
|
|
|
|
#-----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
# We need to build the report progs first at the current (latest) commit before
|
|
|
|
# checking out any other commit for benchmarking.
|
|
|
|
build_report_progs "$BENCHMARKS"
|
|
|
|
|
|
|
|
#-----------------------------------------------------------------------------
|
|
|
|
# Run benchmarks
|
|
|
|
#-----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
if test "$MEASURE" = "1"
|
2018-04-10 17:10:46 +03:00
|
|
|
then
|
2019-06-18 21:09:37 +03:00
|
|
|
$BUILD_BENCH || die "build failed"
|
2018-09-23 11:42:23 +03:00
|
|
|
run_measurements "$BENCHMARKS"
|
2018-04-10 17:10:46 +03:00
|
|
|
fi
|
|
|
|
|
2018-09-23 11:42:23 +03:00
|
|
|
#-----------------------------------------------------------------------------
|
|
|
|
# Run reports
|
|
|
|
#-----------------------------------------------------------------------------
|
|
|
|
|
2018-10-09 10:18:53 +03:00
|
|
|
if test "$RAW" = "0"
|
|
|
|
then
|
2018-10-28 17:46:16 +03:00
|
|
|
run_reports "$BENCHMARKS"
|
2018-10-09 10:18:53 +03:00
|
|
|
fi
|