2022-02-06 14:06:27 +03:00
|
|
|
|
mod common;
|
2022-02-23 00:00:12 +03:00
|
|
|
|
use common::hyperfine;
|
2021-08-23 22:48:31 +03:00
|
|
|
|
|
2022-02-06 14:15:09 +03:00
|
|
|
|
use predicates::prelude::*;
|
|
|
|
|
|
2022-02-22 10:36:56 +03:00
|
|
|
|
pub fn hyperfine_debug() -> assert_cmd::Command {
|
|
|
|
|
let mut cmd = hyperfine();
|
|
|
|
|
cmd.arg("--debug-mode");
|
|
|
|
|
cmd
|
|
|
|
|
}
|
|
|
|
|
|
2021-08-23 22:48:31 +03:00
|
|
|
|
#[test]
|
2022-02-23 00:15:15 +03:00
|
|
|
|
fn runs_successfully() {
|
2021-08-23 22:48:31 +03:00
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--runs=2")
|
2021-08-23 23:00:33 +03:00
|
|
|
|
.arg("echo dummy benchmark")
|
2021-08-23 22:48:31 +03:00
|
|
|
|
.assert()
|
|
|
|
|
.success();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
Add support for one run with --runs=1
Extend the --runs=N option added in d78c33b (Added options to specify
the max/exact numbers of runs., 2018-09-09) to support --runs=1
instead of dying with a usage error.
This is useful to do ad-hoc testing of your commands while they might
still have syntax errors, or just for doing one run where you don't
care about the stddev.
Before:
$ /usr/bin/hyperfine -s basic -r 1 -L n 5,10 'sleep 0.{n}'
Error: Number of runs below two
After (-s basic) also works:
$ hyperfine -r 1 -L n 5,10 'sleep 0.{n}'
Benchmark 1: sleep 0.5
Time (abs ≡): 500.6 ms [User: 0.6 ms, System: 0.0 ms]
Benchmark 2: sleep 0.10
Time (abs ≡): 100.8 ms [User: 0.7 ms, System: 0.0 ms]
Summary
'sleep 0.10' ran
4.97 ± 0.00 times faster than 'sleep 0.5'
This likewise combines correctly with -m and -M, probably not very
useful, but if you're tweaking an existing command-line:
$ hyperfine -m 1 -M 1 -L n 5,10 'sleep 0.{n}'
Benchmark 1: sleep 0.5
Time (abs ≡): 500.6 ms [User: 0.5 ms, System: 0.0 ms]
Benchmark 2: sleep 0.10
Time (abs ≡): 100.6 ms [User: 0.6 ms, System: 0.0 ms]
Summary
'sleep 0.10' ran
4.98 ± 0.00 times faster than 'sleep 0.5'
The "± 0.00" output in "faster than" should probably be adjusted too,
or we could keep it for consistency. I didn't implement that because
this is the first time I do anything in Rust, and I ran out of
template to copy when wanting to quickly implement this in
write_benchmark_comparison() in main.rs.
2021-11-11 16:10:58 +03:00
|
|
|
|
fn one_run_is_supported() {
|
2021-08-23 22:48:31 +03:00
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--runs=1")
|
2021-08-23 23:00:33 +03:00
|
|
|
|
.arg("echo dummy benchmark")
|
2021-08-23 22:48:31 +03:00
|
|
|
|
.assert()
|
Add support for one run with --runs=1
Extend the --runs=N option added in d78c33b (Added options to specify
the max/exact numbers of runs., 2018-09-09) to support --runs=1
instead of dying with a usage error.
This is useful to do ad-hoc testing of your commands while they might
still have syntax errors, or just for doing one run where you don't
care about the stddev.
Before:
$ /usr/bin/hyperfine -s basic -r 1 -L n 5,10 'sleep 0.{n}'
Error: Number of runs below two
After (-s basic) also works:
$ hyperfine -r 1 -L n 5,10 'sleep 0.{n}'
Benchmark 1: sleep 0.5
Time (abs ≡): 500.6 ms [User: 0.6 ms, System: 0.0 ms]
Benchmark 2: sleep 0.10
Time (abs ≡): 100.8 ms [User: 0.7 ms, System: 0.0 ms]
Summary
'sleep 0.10' ran
4.97 ± 0.00 times faster than 'sleep 0.5'
This likewise combines correctly with -m and -M, probably not very
useful, but if you're tweaking an existing command-line:
$ hyperfine -m 1 -M 1 -L n 5,10 'sleep 0.{n}'
Benchmark 1: sleep 0.5
Time (abs ≡): 500.6 ms [User: 0.5 ms, System: 0.0 ms]
Benchmark 2: sleep 0.10
Time (abs ≡): 100.6 ms [User: 0.6 ms, System: 0.0 ms]
Summary
'sleep 0.10' ran
4.98 ± 0.00 times faster than 'sleep 0.5'
The "± 0.00" output in "faster than" should probably be adjusted too,
or we could keep it for consistency. I didn't implement that because
this is the first time I do anything in Rust, and I ran out of
template to copy when wanting to quickly implement this in
write_benchmark_comparison() in main.rs.
2021-11-11 16:10:58 +03:00
|
|
|
|
.success();
|
2021-08-23 22:48:31 +03:00
|
|
|
|
}
|
2022-02-06 14:15:09 +03:00
|
|
|
|
|
2022-02-23 00:15:15 +03:00
|
|
|
|
#[test]
|
|
|
|
|
fn can_run_commands_without_a_shell() {
|
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--runs=1")
|
|
|
|
|
.arg("--show-output")
|
|
|
|
|
.arg("--shell=none")
|
|
|
|
|
.arg("echo 'hello world' argument2")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
|
|
|
|
.stdout(predicate::str::contains("hello world argument2"));
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-06 14:15:09 +03:00
|
|
|
|
#[test]
|
|
|
|
|
fn fails_with_wrong_number_of_command_name_arguments() {
|
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--command-name=a")
|
|
|
|
|
.arg("--command-name=b")
|
|
|
|
|
.arg("echo a")
|
|
|
|
|
.assert()
|
|
|
|
|
.failure()
|
|
|
|
|
.stderr(predicate::str::contains("Too many --command-name options"));
|
|
|
|
|
}
|
2022-02-06 15:48:39 +03:00
|
|
|
|
|
2022-02-06 15:54:09 +03:00
|
|
|
|
#[test]
|
|
|
|
|
fn fails_with_wrong_number_of_prepare_options() {
|
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--runs=1")
|
|
|
|
|
.arg("--prepare=echo a")
|
|
|
|
|
.arg("--prepare=echo b")
|
|
|
|
|
.arg("echo a")
|
|
|
|
|
.arg("echo b")
|
|
|
|
|
.assert()
|
|
|
|
|
.success();
|
|
|
|
|
|
|
|
|
|
hyperfine()
|
2022-02-07 23:03:23 +03:00
|
|
|
|
.arg("--runs=1")
|
2022-02-06 15:54:09 +03:00
|
|
|
|
.arg("--prepare=echo a")
|
|
|
|
|
.arg("--prepare=echo b")
|
|
|
|
|
.arg("echo a")
|
|
|
|
|
.arg("echo b")
|
|
|
|
|
.arg("echo c")
|
|
|
|
|
.assert()
|
|
|
|
|
.failure()
|
|
|
|
|
.stderr(predicate::str::contains(
|
|
|
|
|
"The '--prepare' option has to be provided",
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-06 19:23:43 +03:00
|
|
|
|
#[test]
|
|
|
|
|
fn fails_with_duplicate_parameter_names() {
|
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--parameter-list")
|
|
|
|
|
.arg("x")
|
|
|
|
|
.arg("1,2,3")
|
|
|
|
|
.arg("--parameter-list")
|
|
|
|
|
.arg("x")
|
|
|
|
|
.arg("a,b,c")
|
|
|
|
|
.arg("echo test")
|
|
|
|
|
.assert()
|
|
|
|
|
.failure()
|
|
|
|
|
.stderr(predicate::str::contains("Duplicate parameter names: x"));
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-06 15:48:39 +03:00
|
|
|
|
#[test]
|
|
|
|
|
fn fails_for_unknown_command() {
|
|
|
|
|
hyperfine()
|
2022-02-22 15:31:03 +03:00
|
|
|
|
.arg("--runs=1")
|
|
|
|
|
.arg("some-nonexisting-program-b5d9574198b7e4b12a71fa4747c0a577")
|
|
|
|
|
.assert()
|
|
|
|
|
.failure()
|
|
|
|
|
.stderr(predicate::str::contains(
|
2022-02-23 00:00:12 +03:00
|
|
|
|
"Command terminated with non-zero exit code",
|
2022-02-22 15:31:03 +03:00
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
2022-02-23 00:00:12 +03:00
|
|
|
|
fn fails_for_unknown_command_without_shell() {
|
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--shell=none")
|
2022-02-21 22:45:04 +03:00
|
|
|
|
.arg("--runs=1")
|
2022-02-06 15:48:39 +03:00
|
|
|
|
.arg("some-nonexisting-program-b5d9574198b7e4b12a71fa4747c0a577")
|
|
|
|
|
.assert()
|
|
|
|
|
.failure()
|
|
|
|
|
.stderr(predicate::str::contains(
|
2022-02-23 00:00:12 +03:00
|
|
|
|
"Failed to run command 'some-nonexisting-program-b5d9574198b7e4b12a71fa4747c0a577'",
|
2022-02-06 15:48:39 +03:00
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-23 00:19:35 +03:00
|
|
|
|
#[cfg(unix)]
|
|
|
|
|
#[test]
|
|
|
|
|
fn fails_for_failing_command_without_shell() {
|
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--shell=none")
|
|
|
|
|
.arg("--runs=1")
|
|
|
|
|
.arg("false")
|
|
|
|
|
.assert()
|
|
|
|
|
.failure()
|
|
|
|
|
.stderr(predicate::str::contains(
|
|
|
|
|
"Command terminated with non-zero exit code",
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-20 19:31:54 +03:00
|
|
|
|
#[test]
|
|
|
|
|
fn fails_for_unknown_setup_command() {
|
|
|
|
|
hyperfine()
|
2022-02-21 22:45:04 +03:00
|
|
|
|
.arg("--runs=1")
|
2022-02-20 19:31:54 +03:00
|
|
|
|
.arg("--setup=some-nonexisting-program-b5d9574198b7e4b12a71fa4747c0a577")
|
|
|
|
|
.arg("echo test")
|
|
|
|
|
.assert()
|
|
|
|
|
.failure()
|
|
|
|
|
.stderr(predicate::str::contains(
|
|
|
|
|
"The setup command terminated with a non-zero exit code.",
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn fails_for_unknown_cleanup_command() {
|
|
|
|
|
hyperfine()
|
2022-02-21 22:45:04 +03:00
|
|
|
|
.arg("--runs=1")
|
2022-02-20 19:31:54 +03:00
|
|
|
|
.arg("--cleanup=some-nonexisting-program-b5d9574198b7e4b12a71fa4747c0a577")
|
|
|
|
|
.arg("echo test")
|
|
|
|
|
.assert()
|
|
|
|
|
.failure()
|
|
|
|
|
.stderr(predicate::str::contains(
|
|
|
|
|
"The cleanup command terminated with a non-zero exit code.",
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn fails_for_unknown_prepare_command() {
|
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--prepare=some-nonexisting-program-b5d9574198b7e4b12a71fa4747c0a577")
|
|
|
|
|
.arg("echo test")
|
|
|
|
|
.assert()
|
|
|
|
|
.failure()
|
|
|
|
|
.stderr(predicate::str::contains(
|
|
|
|
|
"The preparation command terminated with a non-zero exit code.",
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-06 15:48:39 +03:00
|
|
|
|
#[cfg(unix)]
|
|
|
|
|
#[test]
|
|
|
|
|
fn can_run_failing_commands_with_ignore_failure_option() {
|
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("false")
|
|
|
|
|
.assert()
|
|
|
|
|
.failure()
|
|
|
|
|
.stderr(predicate::str::contains(
|
|
|
|
|
"Command terminated with non-zero exit code",
|
|
|
|
|
));
|
|
|
|
|
|
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--runs=1")
|
|
|
|
|
.arg("--ignore-failure")
|
|
|
|
|
.arg("false")
|
|
|
|
|
.assert()
|
|
|
|
|
.success();
|
|
|
|
|
}
|
2022-02-21 10:59:57 +03:00
|
|
|
|
|
2022-02-22 13:57:05 +03:00
|
|
|
|
#[test]
|
|
|
|
|
fn shows_output_of_benchmarked_command() {
|
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--runs=2")
|
|
|
|
|
.arg("--command-name=dummy")
|
|
|
|
|
.arg("--show-output")
|
|
|
|
|
.arg("echo 4fd47015")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
|
|
|
|
.stdout(predicate::str::contains("4fd47015").count(2));
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-21 10:59:57 +03:00
|
|
|
|
#[test]
|
|
|
|
|
fn runs_commands_using_user_defined_shell() {
|
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--runs=1")
|
|
|
|
|
.arg("--show-output")
|
|
|
|
|
.arg("--shell")
|
|
|
|
|
.arg("echo 'custom_shell' '--shell-arg'")
|
|
|
|
|
.arg("echo benchmark")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
|
|
|
|
.stdout(
|
|
|
|
|
predicate::str::contains("custom_shell --shell-arg -c echo benchmark").or(
|
|
|
|
|
predicate::str::contains("custom_shell --shell-arg /C echo benchmark"),
|
|
|
|
|
),
|
|
|
|
|
);
|
|
|
|
|
}
|
2022-02-22 10:36:56 +03:00
|
|
|
|
|
2022-09-12 00:20:22 +03:00
|
|
|
|
#[test]
|
2023-03-15 10:58:52 +03:00
|
|
|
|
fn can_pass_input_to_command_from_a_file() {
|
2022-09-12 00:20:22 +03:00
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--runs=1")
|
2023-03-15 10:58:52 +03:00
|
|
|
|
.arg("--input=example_input_file.txt")
|
2022-09-12 00:20:22 +03:00
|
|
|
|
.arg("--show-output")
|
|
|
|
|
.arg("cat")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
2023-03-15 10:58:52 +03:00
|
|
|
|
.stdout(predicate::str::contains("This text is part of a file"));
|
2022-09-12 00:20:22 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
2022-09-12 01:11:05 +03:00
|
|
|
|
fn fails_if_invalid_stdin_data_file_provided() {
|
2022-09-12 00:20:22 +03:00
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--runs=1")
|
2023-03-15 11:12:14 +03:00
|
|
|
|
.arg("--input=example_non_existent_file.txt")
|
2022-09-12 00:20:22 +03:00
|
|
|
|
.arg("--show-output")
|
|
|
|
|
.arg("cat")
|
|
|
|
|
.assert()
|
|
|
|
|
.failure()
|
2022-09-12 01:11:05 +03:00
|
|
|
|
.stderr(predicate::str::contains(
|
2023-03-15 11:12:14 +03:00
|
|
|
|
"The file 'example_non_existent_file.txt' specified as '--input' does not exist",
|
2022-09-12 01:11:05 +03:00
|
|
|
|
));
|
2022-09-12 00:20:22 +03:00
|
|
|
|
}
|
|
|
|
|
|
2022-02-22 10:36:56 +03:00
|
|
|
|
#[test]
|
|
|
|
|
fn returns_mean_time_in_correct_unit() {
|
|
|
|
|
hyperfine_debug()
|
|
|
|
|
.arg("sleep 1.234")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
|
|
|
|
.stdout(predicate::str::contains("Time (mean ± σ): 1.234 s ±"));
|
|
|
|
|
|
|
|
|
|
hyperfine_debug()
|
|
|
|
|
.arg("sleep 0.123")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
|
|
|
|
.stdout(predicate::str::contains("Time (mean ± σ): 123.0 ms ±"));
|
|
|
|
|
|
|
|
|
|
hyperfine_debug()
|
|
|
|
|
.arg("--time-unit=millisecond")
|
|
|
|
|
.arg("sleep 1.234")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
|
|
|
|
.stdout(predicate::str::contains("Time (mean ± σ): 1234.0 ms ±"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn performs_ten_runs_for_slow_commands() {
|
|
|
|
|
hyperfine_debug()
|
|
|
|
|
.arg("sleep 0.5")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
|
|
|
|
.stdout(predicate::str::contains("10 runs"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn performs_three_seconds_of_benchmarking_for_fast_commands() {
|
|
|
|
|
hyperfine_debug()
|
|
|
|
|
.arg("sleep 0.01")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
|
|
|
|
.stdout(predicate::str::contains("300 runs"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
2022-02-22 10:58:57 +03:00
|
|
|
|
fn takes_shell_spawning_time_into_account_for_computing_number_of_runs() {
|
|
|
|
|
hyperfine_debug()
|
|
|
|
|
.arg("--shell=sleep 0.02")
|
|
|
|
|
.arg("sleep 0.01")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
|
|
|
|
.stdout(predicate::str::contains("100 runs"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn takes_preparation_command_into_account_for_computing_number_of_runs() {
|
2022-02-22 10:36:56 +03:00
|
|
|
|
hyperfine_debug()
|
|
|
|
|
.arg("--prepare=sleep 0.02")
|
|
|
|
|
.arg("sleep 0.01")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
|
|
|
|
.stdout(predicate::str::contains("100 runs"));
|
2022-02-22 10:58:57 +03:00
|
|
|
|
|
|
|
|
|
// Shell overhead needs to be added to both the prepare command and the actual command,
|
|
|
|
|
// leading to a total benchmark time of (prepare + shell + cmd + shell = 0.1 s)
|
|
|
|
|
hyperfine_debug()
|
|
|
|
|
.arg("--shell=sleep 0.01")
|
|
|
|
|
.arg("--prepare=sleep 0.03")
|
|
|
|
|
.arg("sleep 0.05")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
|
|
|
|
.stdout(predicate::str::contains("30 runs"));
|
2022-02-22 10:36:56 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn shows_benchmark_comparison_with_relative_times() {
|
|
|
|
|
hyperfine_debug()
|
|
|
|
|
.arg("sleep 1.0")
|
|
|
|
|
.arg("sleep 2.0")
|
|
|
|
|
.arg("sleep 3.0")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
|
|
|
|
.stdout(
|
|
|
|
|
predicate::str::contains("2.00 ± 0.00 times faster")
|
|
|
|
|
.and(predicate::str::contains("3.00 ± 0.00 times faster")),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn performs_all_benchmarks_in_parameter_scan() {
|
|
|
|
|
hyperfine_debug()
|
|
|
|
|
.arg("--parameter-scan")
|
|
|
|
|
.arg("time")
|
|
|
|
|
.arg("30")
|
|
|
|
|
.arg("45")
|
|
|
|
|
.arg("--parameter-step-size")
|
|
|
|
|
.arg("5")
|
|
|
|
|
.arg("sleep {time}")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
|
|
|
|
.stdout(
|
|
|
|
|
predicate::str::contains("Benchmark 1: sleep 30")
|
|
|
|
|
.and(predicate::str::contains("Benchmark 2: sleep 35"))
|
|
|
|
|
.and(predicate::str::contains("Benchmark 3: sleep 40"))
|
|
|
|
|
.and(predicate::str::contains("Benchmark 4: sleep 45"))
|
|
|
|
|
.and(predicate::str::contains("Benchmark 5: sleep 50").not()),
|
|
|
|
|
);
|
|
|
|
|
}
|
2023-04-20 13:49:53 +03:00
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn intermediate_results_are_not_exported_to_stdout() {
|
|
|
|
|
hyperfine_debug()
|
|
|
|
|
.arg("--style=none") // To only see the Markdown export on stdout
|
|
|
|
|
.arg("--export-markdown")
|
|
|
|
|
.arg("-")
|
|
|
|
|
.arg("sleep 1")
|
|
|
|
|
.arg("sleep 2")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
|
|
|
|
.stdout(
|
|
|
|
|
(predicate::str::contains("sleep 1").count(1))
|
|
|
|
|
.and(predicate::str::contains("sleep 2").count(1)),
|
|
|
|
|
);
|
|
|
|
|
}
|
2023-04-20 14:06:36 +03:00
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
#[cfg(unix)]
|
|
|
|
|
fn exports_intermediate_results_to_file() {
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tempdir = tempdir().unwrap();
|
|
|
|
|
let export_path = tempdir.path().join("results.md");
|
|
|
|
|
|
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--runs=1")
|
|
|
|
|
.arg("--export-markdown")
|
|
|
|
|
.arg(&export_path)
|
|
|
|
|
.arg("true")
|
|
|
|
|
.arg("false")
|
|
|
|
|
.assert()
|
|
|
|
|
.failure();
|
|
|
|
|
|
|
|
|
|
let contents = std::fs::read_to_string(export_path).unwrap();
|
|
|
|
|
assert!(contents.contains("true"));
|
|
|
|
|
}
|
2023-04-20 22:32:06 +03:00
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn unused_parameters_are_shown_in_benchmark_name() {
|
|
|
|
|
hyperfine()
|
|
|
|
|
.arg("--runs=2")
|
|
|
|
|
.arg("--parameter-list")
|
|
|
|
|
.arg("branch")
|
|
|
|
|
.arg("master,feature")
|
|
|
|
|
.arg("echo test")
|
|
|
|
|
.assert()
|
|
|
|
|
.success()
|
|
|
|
|
.stdout(
|
|
|
|
|
predicate::str::contains("echo test (branch = master)")
|
|
|
|
|
.and(predicate::str::contains("echo test (branch = feature)")),
|
|
|
|
|
);
|
|
|
|
|
}
|