Remove shell versions of bench.sh and test.sh

This commit is contained in:
Harendra Kumar 2022-04-28 18:59:27 +05:30
parent 5435aa1b16
commit 7d12c647f1
11 changed files with 0 additions and 1806 deletions

View File

@ -1,272 +0,0 @@
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE ScopedTypeVariables #-}
module Main where
import Control.Exception (catch, ErrorCall(..))
import Control.Monad (mzero)
import Control.Monad.IO.Class (liftIO)
import Control.Monad.Trans.Maybe
import Control.Monad.Trans.State
import Data.Char (toLower)
import Data.List
import System.Environment (getArgs)
import Text.Read (readMaybe)
import BenchShow
------------------------------------------------------------------------------
-- Command line parsing
------------------------------------------------------------------------------
data BenchType
= Compare String
| Standard String
deriving Show
data Options = Options
{ genGraphs :: Bool
, sortByName :: Bool
, useGauge :: Bool
, benchType :: Maybe BenchType
, fields :: [String]
, diffStyle :: GroupStyle
, cutOffPercent :: Double
} deriving Show
defaultOptions :: Options
defaultOptions = Options False False False Nothing ["time"] PercentDiff 0
setGenGraphs :: Monad m => Bool -> StateT (a, Options) m ()
setGenGraphs val = do
(args, opts) <- get
put (args, opts { genGraphs = val })
setSortByName :: Monad m => Bool -> StateT (a, Options) m ()
setSortByName val = do
(args, opts) <- get
put (args, opts { sortByName = val })
setUseGauge :: Monad m => Bool -> StateT (a, Options) m ()
setUseGauge val = do
(args, opts) <- get
put (args, opts { useGauge = val })
setBenchType :: Monad m => BenchType -> StateT (a, Options) m ()
setBenchType val = do
(args, opts) <- get
put (args, opts { benchType = Just val })
setFields :: Monad m => [String] -> StateT (a, Options) m ()
setFields val = do
(args, opts) <- get
put (args, opts { fields = val })
setDiff :: Monad m => String -> StateT (a, Options) m ()
setDiff val = do
(args, opts) <- get
let cmpStyle =
case val of
"absolute" -> Absolute
"multiples" -> Multiples
"percent" -> PercentDiff
x -> error $ "Unknown diff option: " ++ show x
in put (args, opts { diffStyle = cmpStyle })
setCutOff :: Monad m => String -> StateT (a, Options) m ()
setCutOff val = do
(args, opts) <- get
case readMaybe val of
Just x -> put (args, opts { cutOffPercent = x })
Nothing -> error $ "Invalid cutoff value: " ++ show val
-- Like the shell "shift" to shift the command line arguments
shift :: StateT ([String], Options) (MaybeT IO) (Maybe String)
shift = do
s <- get
case s of
([], _) -> return Nothing
(x : xs, opts) -> put (xs, opts) >> return (Just x)
parseBench :: StateT ([String], Options) (MaybeT IO) ()
parseBench = do
x <- shift
case x of
Just str | "_cmp" `isSuffixOf` str -> setBenchType (Compare str)
Just str -> setBenchType (Standard str)
Nothing -> do
liftIO $ putStrLn "please provide a benchmark type "
mzero
parseFields :: StateT ([String], Options) (MaybeT IO) ()
parseFields = do
x <- shift
case x of
Just str -> setFields (words str)
Nothing -> do
liftIO $ putStrLn
"please provide a list of fields after --fields"
mzero
parseDiff :: StateT ([String], Options) (MaybeT IO) ()
parseDiff = do
x <- shift
case x of
Just str -> setDiff str
Nothing -> do
liftIO $ putStrLn "please provide a diff type"
mzero
parseCutOff :: StateT ([String], Options) (MaybeT IO) ()
parseCutOff = do
x <- shift
case x of
Just str -> setCutOff str
Nothing -> do
liftIO $ putStrLn "please provide a cutoff percent"
mzero
-- totally imperative style option parsing
parseOptions :: IO (Maybe Options)
parseOptions = do
args <- getArgs
runMaybeT $ flip evalStateT (args, defaultOptions) $ do
parseLoop
fmap snd get
where
parseOpt opt =
case opt of
"--graphs" -> setGenGraphs True
"--sort-by-name" -> setSortByName True
"--use-gauge" -> setUseGauge True
"--benchmark" -> parseBench
"--fields" -> parseFields
"--diff-style" -> parseDiff
"--diff-cutoff-percent" -> parseCutOff
str -> do
liftIO $ putStrLn $ "Unrecognized option " <> str
mzero
parseLoop = do
next <- shift
case next of
Just opt -> parseOpt opt >> parseLoop
Nothing -> return ()
ignoringErr :: IO () -> IO ()
ignoringErr a = catch a (\(ErrorCall err :: ErrorCall) ->
putStrLn $ "Failed with error:\n" <> err <> "\nSkipping.")
------------------------------------------------------------------------------
-- Generic
------------------------------------------------------------------------------
makeGraphs :: String -> Config -> String -> IO ()
makeGraphs name cfg inputFile =
ignoringErr $ graph inputFile name cfg
------------------------------------------------------------------------------
-- Arrays
------------------------------------------------------------------------------
showComparisons :: Options -> Config -> FilePath -> FilePath -> IO ()
showComparisons Options{..} cfg inp out =
let cfg1 = cfg { classifyBenchmark = classifyComparison }
in if genGraphs
then ignoringErr $ graph inp "comparison"
cfg1 { outputDir = Just out
, presentation = Groups Absolute
}
else ignoringErr $ report inp Nothing cfg1
where
separator = if useGauge then '/' else '.'
dropComponent sep = dropWhile (== sep) . dropWhile (/= sep)
-- In case of tasty-bench the names could be like
-- All.Data.Array.Prim.Pinned/o-1-space.generation.show
-- All.Data.Array.Foreign/o-1-space.generation.show
classifyComparison b =
let b1 =
if useGauge
then b
else dropComponent separator b --- drop "All." at the beginning
in Just
( takeWhile (/= '/') b1
, dropComponent '/' b1 -- for tasty-bench drop up to "/"
)
------------------------------------------------------------------------------
-- text reports
------------------------------------------------------------------------------
selectBench
:: Options
-> (SortColumn -> Maybe GroupStyle -> Either String [(String, Double)])
-> [String]
selectBench Options{..} f =
-- Apply filterPred only if at least 2 columns exist
let colVals =
case f (ColumnIndex 1) (Just PercentDiff) of
Left _ -> either error id $ f (ColumnIndex 0) (Just PercentDiff)
Right xs -> filter (filterPred . snd) xs
in reverse
$ fmap fst
$ sortFunc colVals
where
sortFunc = if sortByName then sortOn fst else sortOn snd
filterPred x
| isInfinite x = False
| isNaN x = False
| cutOffPercent > 0 = x >= cutOffPercent
| cutOffPercent < 0 = x <= cutOffPercent
| otherwise = True
benchShow ::
Options
-> Config
-> (Config -> String -> IO ())
-> String
-> FilePath
-> IO ()
benchShow Options{..} cfg func inp out =
if genGraphs
then func cfg {outputDir = Just out} inp
else ignoringErr $ report inp Nothing cfg
main :: IO ()
main = do
res <- parseOptions
case res of
Nothing -> do
putStrLn "cannot parse options"
return ()
Just opts@Options{fields = fs, benchType = btype} ->
let cfg = defaultConfig
{ presentation = Groups (diffStyle opts)
, selectBenchmarks = selectBench opts
, selectFields = filter
( flip elem (fmap (fmap toLower) fs)
. fmap toLower
)
}
in case btype of
Just (Compare str) ->
showComparisons opts cfg
{ title = Just str }
("charts/" ++ str ++ "/results.csv")
("charts/" ++ str)
Just (Standard str) ->
benchShow opts cfg
{ title = Just str }
(makeGraphs str)
("charts/" ++ str ++ "/results.csv")
("charts/" ++ str)
Nothing ->
error "Please specify a benchmark using --benchmark."

View File

@ -1,23 +0,0 @@
cabal-version: 2.2
name: bench-report
version: 0.0.0
synopsis: Benchmark report generation
description: Benchmark reporting application is not included in the overall
cabal project so that the dependencies of both can remain independent.
Benchmark reporting has a lot of dependencies that usually lag behind
when new GHC releases arrive.
-------------------------------------------------------------------------------
-- benchmark comparison and presentation
-------------------------------------------------------------------------------
executable bench-report
default-language: Haskell2010
ghc-options: -Wall
hs-source-dirs: .
main-is: BenchReport.hs
buildable: True
build-Depends:
base >= 4.9 && < 4.17
, bench-show >= 0.3.2 && < 0.4
, transformers >= 0.4 && < 0.6

View File

@ -1,686 +0,0 @@
#!/usr/bin/env bash
# Note that this script is used in the "streamly" package as well as
# in "streaming-benchmarks" package. Any changes to the script should be
# generic enough so that it works in both the cases.
#------------------------------------------------------------------------------
set -o pipefail
SCRIPT_DIR=$(cd `dirname $0`; pwd)
RUNNING_BENCHMARKS=y
source $SCRIPT_DIR/build-lib.sh
print_help () {
echo "Usage: $0 "
echo " [--benchmarks <"bench1 bench2 ..." | help>]"
echo " [--prefix <benchmark name prefix to match>"
echo " [--fields <"field1 field2 ..." | help>]"
echo " [--sort-by-name]"
echo " [--compare]"
echo " [--diff-style <absolute|percent|multiples>]"
echo " [--diff-cutoff-percent <percent-value>]"
echo " [--graphs]"
echo " [--silent]"
echo " [--no-measure]"
echo " [--append]"
echo " [--long]"
echo " [--slow]"
echo " [--quick]"
echo " [--raw]"
echo " [--dev-build]"
echo " [--use-nix]"
echo " [--with-compiler <compiler exe name>]"
echo " [--cabal-build-options <options>]"
echo " [--rtsopts <opts>]"
#echo " [--commit-compare] [--base <commit>] [--candidate <commit>]"
#echo " -- <gauge options or benchmarks>"
echo
echo "--benchmarks: benchmarks to run, use 'help' for list of benchmarks"
echo "--compare: compare the specified benchmarks with each other"
echo "--diff-cutoff-percent: Diff percentage used for benchmark selection."
echo "This applies only to the second column of the report and makes sense"
echo "only while comparing benchmarks."
echo "A positive cutoff value selects only regressions, whereas, a negative"
echo "cutoff value selects only improvements."
echo "--fields: measurement fields to report, use 'help' for a list"
echo "--graphs: Generate graphical reports"
echo "--no-measure: Don't run benchmarks, run reports from previous results"
echo "--append: Don't overwrite previous results, append for comparison"
echo "--long: Use much longer stream size for infinite stream benchmarks"
echo "--slow: Slightly more accurate results at the expense of speed"
echo "--quick: Faster results, useful for longer benchmarks"
echo "--raw: Run the benchmarks but don't report them. This is useful when"
echo " you only want to work with the csv files generated."
echo "--cabal-build-options: Pass any cabal build options to be used for build"
echo " e.g. --cabal-build-options \"--flag dev\""
echo
echo "When specific space complexity group is chosen then (and only then) "
echo "RTS memory restrictions are used accordingly. For example, "
echo "bench.sh --benchmarks Data.Parser -- Data.Parser/o-1-space "
echo "restricts Heap/Stack space for O(1) characterstics"
echo
#echo "When using --commit-compare, by default comparative chart of HEAD^ vs HEAD"
#echo "commit is generated, in the 'charts' directory."
#echo "Use --base and --candidate to select the commits to compare."
#echo
#echo "Any arguments after a '--' are passed directly to gauge"
exit
}
#-----------------------------------------------------------------------------
# Reporting utility functions
#-----------------------------------------------------------------------------
list_comparisons () {
echo "Comparison groups:"
for i in $COMPARISONS
do
echo -n "$i ["
eval "echo -n \$$i"
echo "]"
done
echo
}
build_report_prog() {
local prog_path=$BENCH_REPORT_DIR/bin/bench-report
hash -r
if test ! -x "$prog_path" -a "$BUILD_ONCE" = "0"
then
echo "Building bench-report executables"
BUILD_ONCE=1
pushd $BENCH_REPORT_DIR
local cmd
cmd="$CABAL_EXECUTABLE install --installdir bin bench-report"
if test "$USE_NIX" -eq 0
then
$cmd || die "bench-report build failed"
else
nix-shell --run "$cmd" || die "bench-report build failed"
fi
popd
elif test ! -x "$prog_path"
then
return 1
fi
return 0
}
build_report_progs() {
if test "$RAW" = "0"
then
build_report_prog || exit 1
local prog
prog=$BENCH_REPORT_DIR/bin/bench-report
test -x $prog || die "Cannot find bench-report executable"
echo "Using bench-report executable [$prog]"
fi
}
# We run the benchmarks in isolation in a separate process so that different
# benchmarks do not interfere with other. To enable that we need to pass the
# benchmark exe path to gauge as an argument. Unfortunately it cannot find its
# own path currently.
# The path is dependent on the architecture and cabal version.
bench_output_file() {
local bench_name=$1
echo "charts/$bench_name/results.csv"
}
#------------------------------------------------------------------------------
# Speed options
#------------------------------------------------------------------------------
# tasty-bench does not like an option set twice
set_super_quick_mode () {
echo -n super_quick
}
# For certain long benchmarks if the user has not requested super quick
# mode we anyway use a slightly quicker mode.
use_quicker_mode () {
if test "$QUICK_MODE" -eq 0
then
echo quicker
fi
}
#------------------------------------------------------------------------------
# Determine options from benchmark name
#------------------------------------------------------------------------------
# Global environment passed:
# BENCH_EXEC_PATH: the benchmark executable
# RTS_OPTIONS: additional RTS options
# QUICK_MODE: whether we are in quick mode
# USE_GAUGE: whether to use gauge or tasty-bench
# LONG: whether to use a large stream size
# $1: bench name
bench_exec_one() {
local BENCH_NAME_ORIG
BENCH_NAME_ORIG="$1"
shift
local SUPER_QUICK_OPTIONS
local QUICKER_OPTIONS
if test "$USE_GAUGE" -eq 0
then
SUPER_QUICK_OPTIONS="--stdev 1000000"
QUICKER_OPTIONS="--stdev 100"
else
# Do not keep time limit as 0 otherwise GC stats may remain 0 in some cases.
SUPER_QUICK_OPTIONS="--quick --min-duration 0 --time-limit 0.01 --include-first-iter"
QUICKER_OPTIONS="--min-samples 3 --time-limit 1"
fi
local BENCH_NAME0
local BENCH_NAME1
local BENCH_NAME2
local BENCH_NAME
# XXX this is a hack to make the "/" separated names used in the functions
# determining options based on benchmark name. For tasty-bench the benchmark
# names are separated by "." instead of "/" and are prefixed by "All".
#
# For example, All.Prelude.Serial/x.y.z => Prelude.Serial/x/y/z
if test "$USE_GAUGE" -eq 0
then
# Remove the prefix "All."
BENCH_NAME0=$(echo $BENCH_NAME_ORIG | sed -e s/^All\.//)
# Module names could contain dots e.g. "Prelude.Serial". So we insert
# an explicit "/" to separate the module name part and the rest of
# the benchmark name. For example, Prelude.Serial/elimination.drain
BENCH_NAME1=$(echo $BENCH_NAME0 | cut -f1 -d '/')
if test "$BENCH_NAME1" = "$BENCH_NAME0"
then
# There is no "/" separator
BENCH_NAME1=$(echo $BENCH_NAME0 | sed -e 's/\./\//g')
BENCH_NAME2=""
else
BENCH_NAME2=/$(echo $BENCH_NAME0 | cut -f2- -d '/' | sed -e 's/\./\//g')
fi
BENCH_NAME="${BENCH_NAME1}${BENCH_NAME2}"
else
BENCH_NAME=$BENCH_NAME_ORIG
fi
local RTS_OPTIONS1
RTS_OPTIONS1="\
+RTS -T \
$(bench_rts_options $(basename $BENCH_EXEC_PATH) $BENCH_NAME) \
$RTS_OPTIONS \
-RTS"
local QUICK_MODE_TYPE
QUICK_MODE_TYPE="\
$(if test "$QUICK_MODE" -ne 0; then set_super_quick_mode; fi) \
$(bench_speed_options $(basename $BENCH_EXEC_PATH) $BENCH_NAME)"
local QUICK_BENCH_OPTIONS
for i in $QUICK_MODE_TYPE
do
case "$i" in
super_quick) QUICK_BENCH_OPTIONS="$SUPER_QUICK_OPTIONS"; break ;;
quicker) QUICK_BENCH_OPTIONS="$QUICKER_OPTIONS"; break ;;
esac
done
local STREAM_SIZE
local STREAM_LEN
local STREAM_SIZE_OPT
if test "$LONG" -ne 0
then
STREAM_SIZE=10000000
STREAM_LEN=$(env LC_ALL=en_US.UTF-8 printf "--stream-size %'.f\n" $STREAM_SIZE)
STREAM_SIZE_OPT="--stream-size $STREAM_SIZE"
fi
echo "$BENCH_NAME_ORIG: \
$RTS_OPTIONS1 \
$STREAM_LEN \
$QUICK_BENCH_OPTIONS" \
"$@"
#------------------------------------------------------------------------------
# Run benchmark with options and collect results
#------------------------------------------------------------------------------
local output_file
output_file=$(bench_output_file $(basename $BENCH_EXEC_PATH))
mkdir -p `dirname $output_file`
rm -f ${output_file}.tmp
local BENCH_NAME_ESC
if test $USE_GAUGE -eq 0
then
# Escape "\" and double quotes in benchmark names
BENCH_NAME_ESC=$(echo "$BENCH_NAME_ORIG" | sed -e 's/\\/\\\\/g' | sed -e 's/"/\\"/g')
echo "$BENCH_NAME_ESC"
$BENCH_EXEC_PATH \
-j 1 \
$RTS_OPTIONS1 \
$STREAM_SIZE_OPT \
$QUICK_BENCH_OPTIONS \
"$@" \
--csv=${output_file}.tmp \
-p '$0 == "'"$BENCH_NAME_ESC"'"' || die "Benchmark execution failed."
# Convert cpuTime field from picoseconds to seconds
awk --version 2>&1 | grep -q "GNU Awk" \
|| die "Need GNU awk. [$(which awk)] is not GNU awk."
tail -n +2 ${output_file}.tmp | \
awk 'BEGIN {FPAT = "([^,]+)|(\"[^\"]+\")";OFS=","} {$2=$2/1000000000000;print}' \
>> $output_file
else
$BENCH_EXEC_PATH \
$RTS_OPTIONS1 \
$STREAM_SIZE_OPT \
$QUICK_BENCH_OPTIONS \
"$@" \
--csvraw=${output_file}.tmp \
-m exact "$BENCH_NAME" || die "Benchmark execution failed."
tail -n +2 ${output_file}.tmp \
>> $output_file
fi
}
invoke_gauge () {
local target_prog="$1"
local target_name="$2"
local output_file="$3"
local MATCH=""
if test "$LONG" -ne 0
then
MATCH="$target_name/o-1-space"
else
MATCH="$BENCH_PREFIX"
fi
echo "name,iters,time,cycles,cpuTime,utime,stime,maxrss,minflt,majflt,nvcsw,nivcsw,allocated,numGcs,bytesCopied,mutatorWallSeconds,mutatorCpuSeconds,gcWallSeconds,gcCpuSeconds" >> $output_file
# keep only benchmark names with shortest prefix e.g. "a/b/c" and "a/b", we
# should only keep "a/b" otherwise benchmarks will run multiple times. why?
$target_prog -l \
| grep "^$MATCH" \
| while read -r name; \
do bench_exec_one "$name" "${GAUGE_ARGS[@]}" || exit 1; done \
|| die "Benchmark execution failed."
}
invoke_tasty_bench () {
local target_prog="$1"
local target_name="$2"
local output_file="$3"
local MATCH=""
if test "$LONG" -ne 0
then
MATCH="-p /$target_name\/o-1-space/"
else
if test -n "$BENCH_PREFIX"
then
# escape "/"
local escaped_name=$(echo "$BENCH_PREFIX" | sed -e 's/\//\\\//g')
MATCH="-p /$escaped_name/"
fi
fi
echo "Name,cpuTime,2*Stdev (ps),Allocated,bytesCopied,maxrss" >> $output_file
$target_prog -l $MATCH \
| grep "^All" \
| while read -r name; \
do bench_exec_one "$name" "${GAUGE_ARGS[@]}" || exit 1; done \
|| die "Benchmark execution failed."
}
run_bench_target () {
local package_name=$1
local component=$2
local target_name=$3
local target_prog
if test -z "$BENCHMARK_PACKAGE_VERSION"
then
echo "Please set BENCHMARK_PACKAGE_VERSION in bench_config"
exit 1
fi
target_prog=$(cabal_target_prog $package_name-$BENCHMARK_PACKAGE_VERSION $component $target_name) || \
die "Cannot find executable for target $target_name"
echo "Running executable $target_name ..."
# Needed by bench_exec_one
BENCH_EXEC_PATH=$target_prog
local output_file=$(bench_output_file $target_name)
mkdir -p `dirname $output_file`
if test "$USE_GAUGE" -eq 0
then invoke_tasty_bench "$target_prog" "$target_name" "$output_file"
else invoke_gauge "$target_prog" "$target_name" "$output_file"
fi
}
# $1: package name
# $2: component
# $3: targets
run_bench_targets() {
for i in $3
do
run_bench_target $1 $2 $i
done
}
run_benches_comparing() {
local bench_list=$1
if test -z "$CANDIDATE"
then
CANDIDATE=$(git rev-parse HEAD)
fi
if test -z "$BASE"
then
# XXX Should be where the current branch is forked from master
BASE="$CANDIDATE^"
fi
echo "Comparing baseline commit [$BASE] with candidate [$CANDIDATE]"
echo "Checking out base commit [$BASE] for benchmarking"
# XXX git checkout will overwrite this script itself and the scripts
# imported/used by this script.
git checkout "$BASE" || die "Checkout of base commit [$BASE] failed"
# $BUILD_BENCH || die "build failed"
run_build "$BUILD_BENCH" $BENCHMARK_PACKAGE_NAME bench "$TARGETS"
run_bench_targets $BENCHMARK_PACKAGE_NAME b "$bench_list" target_exe_extra_args
echo "Checking out candidate commit [$CANDIDATE] for benchmarking"
git checkout "$CANDIDATE" || \
die "Checkout of candidate [$CANDIDATE] commit failed"
# $BUILD_BENCH || die "build failed"
run_build "$BUILD_BENCH" $BENCHMARK_PACKAGE_NAME bench "$TARGETS"
run_bench_targets $BENCHMARK_PACKAGE_NAME b "$bench_list" target_exe_extra_args
# XXX reset back to the original commit
}
backup_output_file() {
local bench_name=$1
local output_file=$(bench_output_file $bench_name)
if test -e $output_file -a "$APPEND" != 1
then
mv -f -v $output_file ${output_file}.prev
fi
}
run_measurements() {
local bench_list=$1
for i in $bench_list
do
backup_output_file $i
done
if test "$COMMIT_COMPARE" = "0"
then
run_build "$BUILD_BENCH" $BENCHMARK_PACKAGE_NAME bench "$TARGETS"
run_bench_targets $BENCHMARK_PACKAGE_NAME b "$bench_list" target_exe_extra_args
else
run_benches_comparing "$bench_list"
fi
}
run_reports() {
local prog
prog=$BENCH_REPORT_DIR/bin/bench-report
test -x $prog || die "Cannot find bench-report executable"
test -z "$SILENT" && echo
for i in $1
do
test -z "$SILENT" && echo "Generating reports for ${i}..."
$prog \
--benchmark $i \
$(test "$USE_GAUGE" = 1 && echo "--use-gauge") \
$(test "$GRAPH" = 1 && echo "--graphs") \
$(test "$SORT_BY_NAME" = 1 && echo "--sort-by-name") \
$(test -n "$BENCH_DIFF_STYLE" && echo "--diff-style $BENCH_DIFF_STYLE") \
$(test -n "$BENCH_CUTOFF_PERCENT" && echo "--diff-cutoff-percent $BENCH_CUTOFF_PERCENT") \
--fields "$FIELDS"
done
}
#-----------------------------------------------------------------------------
# Execution starts here
#-----------------------------------------------------------------------------
USE_GIT_CABAL=1 # This is used by set_common_vars
set_common_vars
USE_NIX=0
COMPARE=0
COMMIT_COMPARE=0
BASE=
CANDIDATE=
APPEND=0
LONG=0
RAW=0
SORT_BY_NAME=0
GRAPH=0
MEASURE=1
GAUGE_ARGS=
BUILD_ONCE=0
CABAL_BUILD_OPTIONS="--flag fusion-plugin --flag limit-build-mem"
#-----------------------------------------------------------------------------
# Read command line
#-----------------------------------------------------------------------------
while test -n "$1"
do
case $1 in
-h|--help|help) print_help ;;
# options with arguments
--benchmarks) shift; TARGETS=$1; shift ;;
--targets) shift; TARGETS=$1; shift ;;
--prefix) shift; BENCH_PREFIX="$1"; shift ;;
--fields) shift; FIELDS=$1; shift ;;
--base) shift; BASE=$1; shift ;;
--candidate) shift; CANDIDATE=$1; shift ;;
--with-compiler) shift; CABAL_WITH_COMPILER=$1; shift ;;
--cabal-build-flags) shift; CABAL_BUILD_OPTIONS+=" $1"; shift ;;
--cabal-build-options) shift; CABAL_BUILD_OPTIONS+=" $1"; shift ;;
--rtsopts) shift; RTS_OPTIONS=$1; shift ;;
--config) shift; BENCH_CONFIG_FILE=$1; shift ;;
--diff-style) shift; BENCH_DIFF_STYLE=$1; shift ;;
--diff-cutoff-percent) shift; BENCH_CUTOFF_PERCENT=$1; shift ;;
# flags
--slow) SLOW=1; shift ;;
--silent) SILENT=1; shift ;;
--quick) QUICK_MODE=1; shift ;;
--compare) COMPARE=1; shift ;;
--commit-compare) COMMIT_COMPARE=1; shift ;;
--raw) RAW=1; shift ;;
--append) APPEND=1; shift ;;
--long) LONG=1; shift ;;
--sort-by-name) SORT_BY_NAME=1; shift ;;
--graphs) GRAPH=1; shift ;;
--no-measure) MEASURE=0; shift ;;
# This is used in build-lib.sh to enable dev_build function which is used
# in targets.sh to enable or disable some targets.
--dev-build) RUNNING_DEVBUILD=1; shift ;;
--use-nix) USE_NIX=1; shift ;;
--use-gauge) USE_GAUGE=1; shift ;;
--) shift; break ;;
*) echo "Unknown flags: $*"; echo; print_help ;;
esac
done
GAUGE_ARGS=("$@")
if test -z "$BENCH_CONFIG_FILE"
then
die "Please use --config to specify config file"
fi
source "$BENCH_CONFIG_FILE" || \
die "Failed to source config file $BENCH_CONFIG_FILE"
# Defined in $BENCH_CONFIG_FILE
bench_config
if test -z "$FIELDS"
then
FIELDS=$DEFAULT_FIELDS
fi
set_derived_vars
#-----------------------------------------------------------------------------
# Determine targets
#-----------------------------------------------------------------------------
only_real_benchmarks () {
for i in $TARGETS
do
local SKIP=0
for j in $COMPARISONS
do
if test $i == $j
then
SKIP=1
fi
done
if test "$SKIP" -eq 0
then
echo -n "$i "
fi
done
}
# defined in $BENCH_CONFIG_FILE
bench_targets
if test "$(has_item "$TARGETS" help)" = "help"
then
list_target_groups
list_comparisons
list_targets
exit
fi
COMMON_FIELDS="allocated bytescopied cputime maxrss"
if test "$USE_GAUGE" -eq 1
then
ALL_FIELDS="$COMMON_FIELDS time cycles utime stime minflt majflt nvcsw nivcsw"
else
ALL_FIELDS="$COMMON_FIELDS"
fi
if test "$(has_item "$FIELDS" help)" = "help"
then
echo "Supported fields: $ALL_FIELDS"
echo "Default fields: $DEFAULT_FIELDS"
exit
fi
if test "$LONG" -ne 0
then
if test -n "$TARGETS"
then
echo "Cannot specify benchmarks [$TARGETS] with --long"
exit
fi
TARGETS=$infinite_grp
fi
DEFAULT_TARGETS="$(all_grp)"
TARGETS=$(set_targets)
TARGETS_ORIG=$TARGETS
TARGETS=$(only_real_benchmarks)
test -z "$SILENT" && echo "Using benchmark suites [$TARGETS]"
#-----------------------------------------------------------------------------
# Build reporting utility
#-----------------------------------------------------------------------------
# We need to build the report progs first at the current (latest) commit before
# checking out any other commit for benchmarking.
if test -z "$SILENT"
then build_report_progs
else silently build_report_progs
fi
#-----------------------------------------------------------------------------
# Build and run targets
#-----------------------------------------------------------------------------
if test "$USE_GAUGE" -eq 1
then
BUILD_FLAGS="--flag use-gauge"
fi
BUILD_BENCH="$CABAL_EXECUTABLE v2-build $BUILD_FLAGS $CABAL_BUILD_OPTIONS --enable-benchmarks"
if test "$MEASURE" = "1"
then
run_measurements "$TARGETS"
fi
#-----------------------------------------------------------------------------
# Run reports
#-----------------------------------------------------------------------------
# $1: var name
build_comparison_results () {
local name
local constituents
name=$1
constituents=$(eval "echo -n \$${name}")
mkdir -p "charts/$name"
dest_file="charts/$name/results.csv"
: > $dest_file
for j in $constituents
do
cat "charts/$j/results.csv" >> $dest_file
done
}
if test "$COMPARE" -eq 1
then
DYN_CMP_GRP="$(echo "$TARGETS" | sed -e 's/ /_/g')_cmp"
eval "$DYN_CMP_GRP=\"$TARGETS\""
COMPARISON_REPORTS=$DYN_CMP_GRP
build_comparison_results $DYN_CMP_GRP
else
COMPARISON_REPORTS=""
fi
for i in $COMPARISONS
do
if test "$(has_item "$TARGETS_ORIG" $i)" = $i
then
COMPARISON_REPORTS="$COMPARISON_REPORTS $i"
build_comparison_results $i
fi
done
if test "$RAW" = "0"
then
run_reports "$TARGETS"
run_reports "$COMPARISON_REPORTS"
if test -n "$DYN_CMP_GRP"
then
rm -rf "charts/$DYN_CMP_GRP"
fi
fi

View File

@ -1,218 +0,0 @@
# $1: message
die () {
>&2 echo -e "Error: $1"
exit 1
}
warn () {
>&2 echo -e "Warning: $1"
}
# $1: command
function run_verbose() {
echo "$*"
bash -c "$*"
}
has_item () {
for i in $1
do
if test "$i" = "$2"
then
echo "$i"
break
fi
done
}
# $1: command
silently () {
eval "$1 &> /dev/null"
}
#------------------------------------------------------------------------------
# target groups
#------------------------------------------------------------------------------
test_only () {
if test -n "$RUNNING_TESTS"
then
echo $1
fi
}
bench_only () {
if test -n "$RUNNING_BENCHMARKS"
then
echo $1
fi
}
dev_build () {
if test -n "$RUNNING_DEVBUILD"
then
echo $1
fi
}
# A group consisting of all known individual targets
all_grp () {
{ for i in $GROUP_TARGETS
do
for j in $(eval "echo \$$i")
do
echo $j
done
done
for i in $INDIVIDUAL_TARGETS
do
echo $i
done
} | sort | uniq
}
# All groups
all_target_groups () {
echo $GROUP_TARGETS
}
# XXX pass as arg
list_targets () {
echo "Individual targets:"
for i in $(all_grp)
do
echo "$i"
done
echo
}
# XXX pass as arg
list_target_groups () {
echo "All Targets: all_grp"
echo "Target groups:"
for i in $(all_target_groups)
do
echo -n "$i ["
eval "echo -n \$$i"
echo "]"
done
echo
}
# XXX pass as arg
set_targets() {
if test -z "$TARGETS"
then
echo $DEFAULT_TARGETS
else
for i in $(echo $TARGETS)
do
case $i in
*_grp) eval "echo -n \$${i}" ;;
*_cmp) eval "echo -n \$${i} $i" ;;
*) echo -n $i ;;
esac
echo -n " "
done
fi
}
# XXX cabal issue "cabal v2-exec which" cannot find benchmark/test executables
# $1: builddir
# $2: package name with version
# $3: component ("" (lib), t (test), b (benchmark), x (executable))
# $4: command to find
cabal_which_builddir() {
local noopt=""
if test "$TEST_QUICK_MODE" = "1"
then
noopt="/noopt"
fi
local path=$(echo $1/build/*/ghc-${GHC_VERSION}/${2}/$3/$4$noopt/build/$4/$4)
echo "[cabal_which $path]" 1>&2
test -f "$path" && echo $path
}
# $1: package name with version
# $2: component
# $3: command to find
cabal_which() {
cabal_which_builddir $BUILD_DIR $1 $2 $3
}
# We run the benchmarks in isolation in a separate process so that different
# benchmarks do not interfere with other. To enable that we need to pass the
# benchmark exe path to gauge as an argument. Unfortunately it cannot find its
# own path currently.
# The path is dependent on the architecture and cabal version.
# $1: package name with version
# $2: component
# $3: target
cabal_target_prog () {
local target_prog=`cabal_which $1 $2 $3`
if test -x "$target_prog"
then
echo $target_prog
else
return 1
fi
}
set_common_vars () {
SLOW=0
QUICK_MODE=0
RUNNING_DEVBUILD=
TARGET_EXE_ARGS=
RTS_OPTIONS=
CABAL_BUILD_OPTIONS=""
CABAL_EXECUTABLE=cabal
# Use branch specific builds if git-cabal is present in PATH
BUILD_DIR=dist-newstyle
if test "$USE_GIT_CABAL" -eq 1
then
if which git-cabal 2>/dev/null
then
echo "Using git-cabal for branch specific builds"
CABAL_EXECUTABLE=git-cabal
BUILD_DIR=$(git-cabal show-builddir)
fi
fi
}
# To be called after parsing CLI arguments
set_derived_vars () {
if test -n "$CABAL_WITH_COMPILER"
then
CABAL_BUILD_OPTIONS+=" --with-compiler $CABAL_WITH_COMPILER"
else
CABAL_WITH_COMPILER=ghc
fi
GHC_VERSION=$($CABAL_WITH_COMPILER --numeric-version)
}
# $1: build program
# $2: package name
# $3: component prefix
# $4: targets
run_build () {
local build_prog=$1
local package=$2
local component_prefix=$3
local COMPONENTS
local c
for c in $4
do
COMPONENTS+="$package:$component_prefix:$c "
done
run_verbose $build_prog $COMPONENTS || die "build failed"
}

View File

@ -1 +0,0 @@
packages: .

View File

@ -1,55 +0,0 @@
let
nixpkgsPath_21_11 =
"https://github.com/NixOS/nixpkgs/archive/refs/tags/21.11.tar.gz";
nixpkgsDefault = import (builtins.fetchTarball nixpkgsPath_21_11) { };
in
{
nixpkgs ? nixpkgsDefault
, compiler ? "default"
}:
let haskellPackages =
if compiler == "default"
then nixpkgs.haskellPackages
else nixpkgs.haskell.packages.${compiler};
mkPackage = super: pkg: path: opts: inShell:
let orig = super.callCabal2nixWithOptions pkg path opts {};
in if inShell
# Avoid copying the source directory to nix store by using
# src = null.
then orig.overrideAttrs (oldAttrs: { src = null; })
else orig;
mkHaskellPackages = inShell:
haskellPackages.override {
overrides = self: super:
with nixpkgs.haskell.lib;
{
bench-show = super.callHackageDirect {
pkg = "bench-show";
ver = "0.3.2";
sha256 = "16b8vyzdp9b5bh34kqmbfwjsyv8wgnxxwl8kjcpgxjsh52xzyaa0";
} { };
bench-report = mkPackage super "bench-report" ./. "" inShell;
};
};
drv = mkHaskellPackages true;
shell = drv.shellFor {
packages = p:
[ p.bench-report
];
# Use a better prompt
shellHook = ''
export CABAL_DIR="$(pwd)/.cabal.nix"
if test -n "$PS_SHELL"
then
export PS1="$PS_SHELL\[$bldred\](nix)\[$txtrst\] "
fi
'';
};
in if nixpkgs.lib.inNixShell
then shell
else (mkHaskellPackages false).bench-report

View File

@ -1,160 +0,0 @@
#!/usr/bin/env bash
BENCH_CONFIG_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &> /dev/null && pwd)"
source $BENCH_CONFIG_DIR/targets.sh \
|| { echo "Cannot source $BENCH_CONFIG_DIR/targets.sh"; exit 1; }
# Customization options
bench_config () {
BENCH_REPORT_DIR=benchmark/bench-report
BENCHMARK_PACKAGE_NAME=streamly-benchmarks
BENCHMARK_PACKAGE_VERSION=0.0.0
USE_GAUGE=0
DEFAULT_FIELDS="allocated cputime bytescopied"
}
#------------------------------------------------------------------------------
# benchmark groups
#------------------------------------------------------------------------------
bench_targets () {
targets
}
#------------------------------------------------------------------------------
# RTS options based on the benchmark executable name and benchmark name
#------------------------------------------------------------------------------
bench_rts_options () {
local exe_name
local bench_name
exe_name="$1"
bench_name="$2"
# Based on benchmark class
case "$bench_name" in
*/o-1-sp*) echo -n "-K36K -M16M" ;;
*/o-n-h*) echo -n "-K36K -M32M" ;;
*/o-n-st*) echo -n "-K1M -M16M" ;;
*/o-n-sp*) echo -n "-K1M -M32M" ;;
*) echo -n "" ;;
esac
echo -n " "
case "$exe_name" in
Prelude.Concurrent*) echo -n "-K256K -M384M" ;;
*) echo -n "" ;;
esac
echo -n " "
# Based on specific benchmark
# XXX Note: for tasty-bench we replace the "." separator in the benchmark names
# with "/" for matching with this. It may not work reliably if the benchmark
# name already contains ".".
case "$bench_name" in
Data.Stream.StreamD/o-n-space/elimination/toList) echo -n "-K2M" ;;
Data.Stream.StreamK/o-n-space/elimination/toList) echo -n "-K2M" ;;
Prelude.Parallel/o-n-heap/mapping/mapM) echo -n "-M256M" ;;
Prelude.Parallel/o-n-heap/monad-outer-product/*) echo -n "-M256M" ;;
Prelude.Parallel/o-n-space/monad-outer-product/*) echo -n "-K4M -M256M" ;;
Prelude.Rate/o-1-space/*) echo -n "-K128K" ;;
Prelude.Rate/o-1-space/asyncly/*) echo -n "-K128K" ;;
# XXX For GHC-9.0
Prelude.Serial/o-1-space/mixed/sum-product-fold) echo -n "-K64M" ;;
# XXX These should be moved to o-n-space?
Prelude.Serial/o-n-heap/grouping/classifySessionsOf*) echo -n "-K4M -M32M" ;;
Prelude.Serial/o-n-heap/Functor/*) echo -n "-K4M -M32M" ;;
Prelude.Serial/o-n-heap/transformer/*) echo -n "-K8M -M64M" ;;
Prelude.Serial/o-n-space/Functor/*) echo -n "-K4M -M64M" ;;
Prelude.Serial/o-n-space/Applicative/*) echo -n "-K8M -M128M" ;;
Prelude.Serial/o-n-space/Monad/*) echo -n "-K8M -M64M" ;;
# Use -K4M for o-n-space except for grouping
Prelude.Serial/o-n-space/grouping/*) echo -n "" ;;
Prelude.Serial/o-n-space/*) echo -n "-K4M" ;;
Prelude.WSerial/o-n-space/*) echo -n "-K4M" ;;
Prelude.Async/o-n-space/monad-outer-product/*) echo -n "-K4M" ;;
Prelude.Ahead/o-n-space/monad-outer-product/*) echo -n "-K4M" ;;
Prelude.Ahead/o-1-space/*) echo -n "-K128K" ;;
Prelude.WAsync/o-n-heap/monad-outer-product/toNull3) echo -n "-M64M" ;;
Prelude.WAsync/o-n-space/monad-outer-product/*) echo -n "-K4M" ;;
# XXX need to investigate these, taking too much stack
Data.Parser.ParserD/o-1-space/some) echo -n "-K8M" ;;
Data.Parser/o-1-space/some) echo -n "-K8M" ;;
Data.Parser.ParserD/o-1-space/manyTill) echo -n "-K4M" ;;
Data.Parser/o-1-space/manyTill) echo -n "-K4M" ;;
Data.Parser/o-n-heap/manyAlt) echo -n "-K4M -M128M" ;;
Data.Parser/o-n-heap/someAlt) echo -n "-K4M -M128M" ;;
Data.Parser/o-n-heap/choice) echo -n "-K16M -M32M" ;;
Data.Parser.ParserK/o-n-heap/manyAlt) echo -n "-K4M -M128M" ;;
Data.Parser.ParserK/o-n-heap/someAlt) echo -n "-K4M -M128M" ;;
Data.Parser.ParserK/o-n-heap/sequence) echo -n "-M64M";;
Data.Parser.ParserK/o-n-heap/sequenceA) echo -n "-M64M";;
Data.SmallArray/o-1-sp*) echo -n "-K128K" ;;
# For tasty-bench
Data.Array*/o-1-space/generation/show) echo -n "-M32M" ;;
# XXX For GHC-8.10
Data.Array/o-1-space/transformationX4/map) echo -n "-M32M" ;;
# DEVBUILD only benchmarks - array foldable instance
Data.Array.Foreign/o-1-space/elimination/foldable/foldl*) echo -n "-K8M" ;;
Data.Array.Foreign/o-1-space/elimination/foldable/sum) echo -n "-K8M" ;;
*) echo -n "" ;;
esac
}
#------------------------------------------------------------------------------
# Speed options
#------------------------------------------------------------------------------
bench_speed_options () {
local exe_name
local bench_name
exe_name="$1"
bench_name="$2"
case "$exe_name" in
Prelude.Concurrent) set_super_quick_mode ;;
Prelude.Rate) set_super_quick_mode ;;
Prelude.Adaptive) set_super_quick_mode;;
*) echo -n "" ;;
esac
# XXX Note: for tasty-bench we replace the "." separator in the benchmark names
# with "/" for matching with this. It may not work reliably if the benchmark
# name already contains ".".
# Use quick options for benchmarks that take too long
case "$bench_name" in
Prelude.Parallel/o-n-heap/mapping/mapM) set_super_quick_mode ;;
Prelude.Parallel/o-n-heap/monad-outer-product/*) set_super_quick_mode ;;
Prelude.Parallel/o-n-space/monad-outer-product/*) set_super_quick_mode ;;
Prelude.Parallel/o-n-heap/generation/*) use_quicker_mode ;;
Prelude.Parallel/o-n-heap/mapping/*) use_quicker_mode ;;
Prelude.Parallel/o-n-heap/concat-foldable/*) use_quicker_mode ;;
Prelude.Async/o-1-space/monad-outer-product/*) use_quicker_mode ;;
Prelude.Async/o-n-space/monad-outer-product/*) use_quicker_mode ;;
Prelude.Ahead/o-1-space/monad-outer-product/*) use_quicker_mode ;;
Prelude.Ahead/o-n-space/monad-outer-product/*) use_quicker_mode ;;
Prelude.WAsync/o-n-heap/monad-outer-product/*) use_quicker_mode ;;
Prelude.WAsync/o-n-space/monad-outer-product/*) use_quicker_mode ;;
FileSystem.Handle/*) use_quicker_mode ;;
*) echo -n "" ;;
esac
}

View File

@ -1,15 +0,0 @@
#!/usr/bin/env bash
SCRIPT_DIR=$(cd `dirname $0`; pwd)
source $SCRIPT_DIR/bench-config.sh \
|| { echo "Cannot source $SCRIPT_DIR/bench-config.sh"; exit 1; }
bench_config
if test -z "$BENCH_REPORT_DIR"
then
echo "BENCH_REPORT_DIR variable not defined by bench-config() in $SCRIPT_DIR/bench-config.sh"
exit 1
fi
cd $SCRIPT_DIR/..
$BENCH_REPORT_DIR/bin/bench-runner.sh --config $SCRIPT_DIR/bench-config.sh "$@"

View File

@ -1,120 +0,0 @@
#------------------------------------------------------------------------------
# test and benchmark groups
#------------------------------------------------------------------------------
# Depends on RUNNING_TESTS, RUNNING_BENCHMARKS, RUNNING_DEVBUILD variables
# being set for dev_build, test_only, bench_only functions to work. So the
# "targets" fucntion should be called only after these are set.
# IMPORTANT NOTE: the names "_grp" and "_cmp" suffixes are special, do
# not rename them to something else.
targets () {
base_stream_grp="\
`bench_only Data.Stream.StreamD` \
`bench_only Data.Stream.StreamK` \
`bench_only Data.Stream.StreamDK`"
prelude_serial_grp="\
Prelude.Serial \
Prelude.WSerial \
Prelude.ZipSerial"
prelude_concurrent_grp="\
Prelude.Async \
Prelude.WAsync \
Prelude.Ahead \
Prelude.Parallel \
Prelude.ZipAsync"
prelude_other_grp="\
`test_only Prelude` \
$(test_only $(dev_build Prelude.Rate)) \
`bench_only Prelude.Rate` \
`test_only Prelude.Fold` \
`test_only Prelude.Concurrent` \
$(bench_only $(dev_build Prelude.Concurrent)) \
`bench_only Prelude.Adaptive`"
array_grp="\
Data.Array \
Data.Array.Foreign \
Data.Array.Prim \
Data.SmallArray \
Data.Array.Prim.Pinned"
base_parser_grp="\
Data.Parser.ParserD \
`bench_only Data.Parser.ParserK`"
parser_grp="\
Data.Fold \
Data.Parser"
list_grp="\
`test_only Data.List.Base` \
`test_only Data.List`"
#------------------------------------------------------------------------------
# Streaming vs non-streaming
#------------------------------------------------------------------------------
# The "o-1-space" groups of these benchmarks are run with long stream
# sizes when --long option is used.
infinite_grp="\
$prelude_serial_grp \
$prelude_concurrent_grp \
`bench_only Prelude.Rate`"
#------------------------------------------------------------------------------
# Benchmark comparison groups
#------------------------------------------------------------------------------
# *_cmp denotes a comparison benchmark, the benchmarks provided in *_cmp
# variables are compared with each other
base_stream_cmp="Data.Stream.StreamD Data.Stream.StreamK"
serial_wserial_cmp="Prelude.Serial Prelude.WSerial"
serial_async_cmp="Prelude.Serial Prelude.Async"
concurrent_cmp="Prelude.Async Prelude.WAsync Prelude.Ahead Prelude.Parallel"
array_cmp="Data.Array.Foreign Data.Array.Prim Data.Array Data.Array.Prim.Pinned"
pinned_array_cmp="Data.Array.Foreign Data.Array.Prim.Pinned"
base_parser_cmp=$base_parser_grp
COMPARISONS="\
base_stream_cmp \
serial_wserial_cmp \
serial_async_cmp \
concurrent_cmp \
array_cmp \
pinned_array_cmp \
base_parser_cmp"
#------------------------------------------------------------------------------
# All test/benchmark modules must be in at least one of these
#------------------------------------------------------------------------------
# All groups
GROUP_TARGETS="\
base_stream_grp \
prelude_serial_grp \
prelude_concurrent_grp \
prelude_other_grp \
array_grp \
base_parser_grp \
parser_grp \
list_grp \
infinite_grp"
# Not in any groups
INDIVIDUAL_TARGETS="\
Data.Unfold \
Unicode.Stream \
$(test_only $(dev_build Unicode.Char)) \
`bench_only Unicode.Char` \
`bench_only Unicode.Utf8` \
FileSystem.Handle \
`test_only FileSystem.Event` \
`test_only Network.Socket` \
`test_only Network.Inet.TCP` \
`test_only version-bounds`"
}

View File

@ -1,247 +0,0 @@
#!/usr/bin/env bash
SCRIPT_DIR=$(dirname $0)
STREAMLY_VERSION=0.8.2
BENCH_REPORT_DIR=benchmark/bench-report
source $SCRIPT_DIR/targets.sh
#------------------------------------------------------------------------------
# Script
#------------------------------------------------------------------------------
print_help () {
echo "Usage: $0 "
echo " [--targets <"target1 target2 ..." | help>]"
echo
echo " [--quick]"
echo " [--with-compiler <compiler exe name>]"
echo " [--cabal-build-options <option>]"
echo " [--dev-build]"
echo
echo " [--coverage]"
echo " [--hpc-report-options <option>]"
echo " [--no-measure]"
echo " [--raw]"
echo
echo " [--rtsopts <option>]"
echo " -- <hspec options or test names>"
echo
echo "--targets: targets to run, use 'help' for list of targets"
echo
echo "--quick: disable optimizations to build quickly"
echo "--cabal-build-options: Pass any cabal build options to be used for build"
echo "--dev-build: runs some additional tests"
echo
echo "--coverage: enable coverage and report coverage info"
echo "--hpc-report-options: option for 'hpc report'"
echo "--no-measure: with --coverage, do not run tests, only show coverage info"
echo "--raw: with --coverage, do not run hpc"
echo
echo "--rtsopts: pass GHC RTS options to the test executable"
echo "Any arguments after a '--' are passed directly to hspec"
exit
}
#-----------------------------------------------------------------------------
# Read command line
#-----------------------------------------------------------------------------
RUNNING_TESTS=y
source $BENCH_REPORT_DIR/bin/build-lib.sh
USE_GIT_CABAL=1
set_common_vars
COVERAGE=0
MEASURE=1
HPC_REPORT_OPTIONS=
RAW=0
CABAL_BUILD_OPTIONS="--flag limit-build-mem"
TEST_QUICK_MODE=0
# XXX add a bisect option
while test -n "$1"
do
case $1 in
-h|--help|help) print_help ;;
# options with arguments
--targets) shift; TARGETS=$1; shift ;;
--with-compiler) shift; CABAL_WITH_COMPILER=$1; shift ;;
--cabal-build-options) shift; CABAL_BUILD_OPTIONS+=" $1"; shift ;;
--hpc-report-options) shift; HPC_REPORT_OPTIONS="$1"; shift ;;
--rtsopts) shift; RTS_OPTIONS="$1"; shift ;;
# flags
--raw) RAW=1; shift ;;
#--slow) SLOW=1; shift ;; # not implemented
--quick) TEST_QUICK_MODE=1; shift ;;
--dev-build) RUNNING_DEVBUILD=1 CABAL_BUILD_OPTIONS+=" --flag dev"; shift ;;
--coverage) COVERAGE=1; shift ;;
--no-measure) MEASURE=0; shift ;;
--) shift; break ;;
-*|--*) echo "Unknown flags: $*"; echo; print_help ;;
*) break ;;
esac
done
TARGET_EXE_ARGS=$*
set_derived_vars
if test $TEST_QUICK_MODE -eq 1
then
CABAL_BUILD_OPTIONS+=" --disable-optimization --flags -opt"
fi
#-----------------------------------------------------------------------------
# Determine targets
#-----------------------------------------------------------------------------
# Defined in targets.sh
targets
if test "$(has_item "$TARGETS" help)" = "help"
then
list_target_groups
list_targets
exit
fi
DEFAULT_TARGETS="$(all_grp)"
TARGETS=$(set_targets)
echo "Using targets [$TARGETS]"
#-----------------------------------------------------------------------------
# Build targets
#-----------------------------------------------------------------------------
test_exe_rts_opts () {
case "$1" in
# XXX Data.Array.* heap requirement increased for GHC-8.10
Data.Array.Foreign) echo -n "-M128M" ;;
Data.Array.Prim) echo -n "-M128M" ;;
Data.Array.Prim.Pinned) echo -n "-M128M" ;;
Prelude.Rate) echo -n "-M512M" ;;
# For -O0 case writeChunks test fails, maybe we should have a separate flag
# for O0 case?
FileSystem.Handle) echo -n "-K16M" ;;
*) if test "$COVERAGE" -eq "1"
then
echo -n "-K8M -M1024M"
else
echo -n "-K8M -M64M"
fi ;;
esac
}
# $1: bench name
# $2: bench executable
target_exe_extra_args () {
local bench_name=$1
local bench_prog=$2
echo "+RTS \
$(test_exe_rts_opts $(basename $bench_prog)) \
$RTS_OPTIONS \
-RTS"
}
if test "$COVERAGE" -eq "1"
then
# Used to determine the hpc tix dir
PACKAGE_FULL_NAME=streamly-$STREAMLY_VERSION
case `uname` in
Linux) SYSTEM=x86_64-linux ;;
*) echo "Unsupported system"; exit 1 ;;
esac
# With the --enable-coverage option the tests as well as the library get
# compiled with -fhpc, and we get coverage for tests as well. But we want to
# exclude that, so a project file is needed.
CABAL_BUILD_OPTIONS+=" --project-file cabal.project.coverage"
mkdir -p $BUILD_DIR/hpc
fi
BUILD_TEST="$CABAL_EXECUTABLE v2-build $CABAL_BUILD_OPTIONS --enable-tests"
if test "$MEASURE" -eq "1"
then
run_build "$BUILD_TEST" streamly-tests test "$TARGETS"
fi
#-----------------------------------------------------------------------------
# Run targets
#-----------------------------------------------------------------------------
# $1: target name
get_tix_file () {
echo $BUILD_DIR/build/$SYSTEM/ghc-${GHC_VERSION}/$PACKAGE_FULL_NAME/hpc/vanilla/tix/$1/$1.tix
}
# $1: package name
# $2: component
# $3: target
# $4: args generator func
run_target () {
local package_name=$1
local component=$2
local target_name=$3
local extra_args=$4
local target_prog
target_prog=$(cabal_target_prog $package_name $component $target_name) || \
die "Cannot find executable for target $target_name"
echo "Running executable $target_name ..."
mkdir -p $(dirname $(get_tix_file $target_name))
export HPCTIXFILE=$(get_tix_file $target_name)
run_verbose $target_prog $($extra_args $target_name $target_prog) \
|| die "Target exe failed"
# hpc-coveralls fails if there is an empty dir and no .tix file generated
rmdir $(dirname $(get_tix_file $target_name)) 2>/dev/null || true
}
# $1: package name with version
# $2: component
# $3: targets
# $4: args generator func
run_targets() {
for i in $3
do
run_target $1 $2 $i $4
done
}
if test "$MEASURE" -eq "1"
then
run_targets "streamly-tests-0.0.0" t "$TARGETS" target_exe_extra_args
fi
#-----------------------------------------------------------------------------
# Run coverage reports
#-----------------------------------------------------------------------------
PACKAGE_NAME=streamly-$STREAMLY_VERSION
MIX_DIR=$BUILD_DIR/build/$SYSTEM/ghc-${GHC_VERSION}/$PACKAGE_NAME/hpc/vanilla/mix/$PACKAGE_NAME/
ALLTIX=$BUILD_DIR/hpc/all.tix
if test "$COVERAGE" -eq "1" -a "$RAW" -eq 0
then
TIXFILES=
for i in $TARGETS
do
tixfile="$(get_tix_file ${i})"
if test -f "$tixfile"
then
TIXFILES+="$tixfile "
fi
done
#echo "Combining tix files:"
#echo $TIXFILES | tr ' ' '\n'
hpc sum --union --output=$ALLTIX $TIXFILES
run_verbose hpc markup $ALLTIX --hpcdir $MIX_DIR
run_verbose hpc report $ALLTIX $HPC_REPORT_OPTIONS --hpcdir $MIX_DIR
fi

View File

@ -49,11 +49,6 @@ build-type: Configure
extra-source-files:
benchmark/*.hs
benchmark/bench-report/BenchReport.hs
benchmark/bench-report/bench-report.cabal
benchmark/bench-report/bin/bench-runner.sh
benchmark/bench-report/bin/build-lib.sh
benchmark/bench-report/cabal.project
benchmark/report/Main.hs
benchmark/report/report.cabal
benchmark/Streamly/Benchmark/Data/*.hs
@ -76,12 +71,8 @@ extra-source-files:
benchmark/lib/Streamly/Benchmark/*.hs
benchmark/lib/Streamly/Benchmark/Common/*.hs
benchmark/streamly-benchmarks.cabal
bin/bench.sh
bin/bench-config.sh
bin/mk-hscope.sh
bin/mk-tags.sh
bin/targets.sh
bin/test.sh
configure
configure.ac
docs/*.hs