hyperfine/tests/integration_tests.rs
David Peter d9418e0731 Use raw executor by default, skip intermediate shell
This adds a new "raw executor" (next to the "shell executor") that
allows hyperfine to execute commands directly without any intermediate
shell.

The command line is split into tokens (using the `shell-words` crate),
and according to POSIX rules. The first token is taken as the executable,
and the rest as arguments.

The new executor is enabled by default. In order to select the shell
executor, users will have to pass `--shell=default`.

This allows us to reduce measurement noise and to benchmark very quick
commands. It also decreases the time to run benchmarks, as we don't need
the calibration phase.

Also, it allows one to make sure that the executed command is not
implemented as a shell builtin. For example `hyperfine true`
and `hyperfine --shell=default true` return different times due
to the fact that `bash` executes `true` as a NOP.

Co-authored: Ciprian Dorin Craciun <ciprian@volution.ro>
2022-02-22 22:51:09 +01:00

295 lines
7.5 KiB
Rust
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

mod common;
use common::{hyperfine, hyperfine_shell};
use predicates::prelude::*;
pub fn hyperfine_debug() -> assert_cmd::Command {
let mut cmd = hyperfine();
cmd.arg("--debug-mode");
cmd
}
#[test]
fn hyperfine_runs_successfully() {
hyperfine()
.arg("--runs=2")
.arg("echo dummy benchmark")
.assert()
.success();
}
#[test]
fn one_run_is_supported() {
hyperfine()
.arg("--runs=1")
.arg("echo dummy benchmark")
.assert()
.success();
}
#[test]
fn fails_with_wrong_number_of_command_name_arguments() {
hyperfine()
.arg("--command-name=a")
.arg("--command-name=b")
.arg("echo a")
.assert()
.failure()
.stderr(predicate::str::contains("Too many --command-name options"));
}
#[test]
fn fails_with_wrong_number_of_prepare_options() {
hyperfine()
.arg("--runs=1")
.arg("--prepare=echo a")
.arg("--prepare=echo b")
.arg("echo a")
.arg("echo b")
.assert()
.success();
hyperfine()
.arg("--runs=1")
.arg("--prepare=echo a")
.arg("--prepare=echo b")
.arg("echo a")
.arg("echo b")
.arg("echo c")
.assert()
.failure()
.stderr(predicate::str::contains(
"The '--prepare' option has to be provided",
));
}
#[test]
fn fails_with_duplicate_parameter_names() {
hyperfine()
.arg("--parameter-list")
.arg("x")
.arg("1,2,3")
.arg("--parameter-list")
.arg("x")
.arg("a,b,c")
.arg("echo test")
.assert()
.failure()
.stderr(predicate::str::contains("Duplicate parameter names: x"));
}
#[test]
fn fails_for_unknown_command() {
hyperfine()
.arg("--runs=1")
.arg("some-nonexisting-program-b5d9574198b7e4b12a71fa4747c0a577")
.assert()
.failure()
.stderr(predicate::str::contains(
"Failed to run command 'some-nonexisting-program-b5d9574198b7e4b12a71fa4747c0a577'",
));
}
#[test]
fn fails_for_unknown_shell_command() {
hyperfine_shell()
.arg("--runs=1")
.arg("some-nonexisting-program-b5d9574198b7e4b12a71fa4747c0a577")
.assert()
.failure()
.stderr(predicate::str::contains(
"Command terminated with non-zero exit code",
));
}
#[test]
fn fails_for_unknown_setup_command() {
hyperfine()
.arg("--runs=1")
.arg("--setup=some-nonexisting-program-b5d9574198b7e4b12a71fa4747c0a577")
.arg("echo test")
.assert()
.failure()
.stderr(predicate::str::contains(
"The setup command terminated with a non-zero exit code.",
));
}
#[test]
fn fails_for_unknown_cleanup_command() {
hyperfine()
.arg("--runs=1")
.arg("--cleanup=some-nonexisting-program-b5d9574198b7e4b12a71fa4747c0a577")
.arg("echo test")
.assert()
.failure()
.stderr(predicate::str::contains(
"The cleanup command terminated with a non-zero exit code.",
));
}
#[test]
fn fails_for_unknown_prepare_command() {
hyperfine()
.arg("--prepare=some-nonexisting-program-b5d9574198b7e4b12a71fa4747c0a577")
.arg("echo test")
.assert()
.failure()
.stderr(predicate::str::contains(
"The preparation command terminated with a non-zero exit code.",
));
}
#[cfg(unix)]
#[test]
fn can_run_failing_commands_with_ignore_failure_option() {
hyperfine()
.arg("false")
.assert()
.failure()
.stderr(predicate::str::contains(
"Command terminated with non-zero exit code",
));
hyperfine()
.arg("--runs=1")
.arg("--ignore-failure")
.arg("false")
.assert()
.success();
}
#[test]
fn shows_output_of_benchmarked_command() {
hyperfine()
.arg("--runs=2")
.arg("--command-name=dummy")
.arg("--show-output")
.arg("echo 4fd47015")
.assert()
.success()
.stdout(predicate::str::contains("4fd47015").count(2));
}
#[test]
fn runs_commands_using_user_defined_shell() {
hyperfine()
.arg("--runs=1")
.arg("--show-output")
.arg("--shell")
.arg("echo 'custom_shell' '--shell-arg'")
.arg("echo benchmark")
.assert()
.success()
.stdout(
predicate::str::contains("custom_shell --shell-arg -c echo benchmark").or(
predicate::str::contains("custom_shell --shell-arg /C echo benchmark"),
),
);
}
#[test]
fn returns_mean_time_in_correct_unit() {
hyperfine_debug()
.arg("sleep 1.234")
.assert()
.success()
.stdout(predicate::str::contains("Time (mean ± σ): 1.234 s ±"));
hyperfine_debug()
.arg("sleep 0.123")
.assert()
.success()
.stdout(predicate::str::contains("Time (mean ± σ): 123.0 ms ±"));
hyperfine_debug()
.arg("--time-unit=millisecond")
.arg("sleep 1.234")
.assert()
.success()
.stdout(predicate::str::contains("Time (mean ± σ): 1234.0 ms ±"));
}
#[test]
fn performs_ten_runs_for_slow_commands() {
hyperfine_debug()
.arg("sleep 0.5")
.assert()
.success()
.stdout(predicate::str::contains("10 runs"));
}
#[test]
fn performs_three_seconds_of_benchmarking_for_fast_commands() {
hyperfine_debug()
.arg("sleep 0.01")
.assert()
.success()
.stdout(predicate::str::contains("300 runs"));
}
#[test]
fn takes_shell_spawning_time_into_account_for_computing_number_of_runs() {
hyperfine_debug()
.arg("--shell=sleep 0.02")
.arg("sleep 0.01")
.assert()
.success()
.stdout(predicate::str::contains("100 runs"));
}
#[test]
fn takes_preparation_command_into_account_for_computing_number_of_runs() {
hyperfine_debug()
.arg("--prepare=sleep 0.02")
.arg("sleep 0.01")
.assert()
.success()
.stdout(predicate::str::contains("100 runs"));
// Shell overhead needs to be added to both the prepare command and the actual command,
// leading to a total benchmark time of (prepare + shell + cmd + shell = 0.1 s)
hyperfine_debug()
.arg("--shell=sleep 0.01")
.arg("--prepare=sleep 0.03")
.arg("sleep 0.05")
.assert()
.success()
.stdout(predicate::str::contains("30 runs"));
}
#[test]
fn shows_benchmark_comparison_with_relative_times() {
hyperfine_debug()
.arg("sleep 1.0")
.arg("sleep 2.0")
.arg("sleep 3.0")
.assert()
.success()
.stdout(
predicate::str::contains("2.00 ± 0.00 times faster")
.and(predicate::str::contains("3.00 ± 0.00 times faster")),
);
}
#[test]
fn performs_all_benchmarks_in_parameter_scan() {
hyperfine_debug()
.arg("--parameter-scan")
.arg("time")
.arg("30")
.arg("45")
.arg("--parameter-step-size")
.arg("5")
.arg("sleep {time}")
.assert()
.success()
.stdout(
predicate::str::contains("Benchmark 1: sleep 30")
.and(predicate::str::contains("Benchmark 2: sleep 35"))
.and(predicate::str::contains("Benchmark 3: sleep 40"))
.and(predicate::str::contains("Benchmark 4: sleep 45"))
.and(predicate::str::contains("Benchmark 5: sleep 50").not()),
);
}