Add iteration information to failure error message

This commit is contained in:
David Peter 2024-11-10 22:21:22 +01:00 committed by David Peter
parent f096c266f0
commit 83f04cdd31
2 changed files with 67 additions and 11 deletions

View File

@ -16,11 +16,28 @@ use super::timing_result::TimingResult;
use anyhow::{bail, Context, Result}; use anyhow::{bail, Context, Result};
use statistical::mean; use statistical::mean;
pub enum BenchmarkIteration {
NonBenchmarkRun,
Warmup(u64),
Benchmark(u64),
}
impl BenchmarkIteration {
pub fn to_env_var_value(&self) -> Option<String> {
match self {
BenchmarkIteration::NonBenchmarkRun => None,
BenchmarkIteration::Warmup(i) => Some(format!("warmup-{}", i)),
BenchmarkIteration::Benchmark(i) => Some(format!("{}", i)),
}
}
}
pub trait Executor { pub trait Executor {
/// Run the given command and measure the execution time /// Run the given command and measure the execution time
fn run_command_and_measure( fn run_command_and_measure(
&self, &self,
command: &Command<'_>, command: &Command<'_>,
iteration: BenchmarkIteration,
command_failure_action: Option<CmdFailureAction>, command_failure_action: Option<CmdFailureAction>,
) -> Result<(TimingResult, ExitStatus)>; ) -> Result<(TimingResult, ExitStatus)>;
@ -39,6 +56,7 @@ pub trait Executor {
fn run_command_and_measure_common( fn run_command_and_measure_common(
mut command: std::process::Command, mut command: std::process::Command,
iteration: BenchmarkIteration,
command_failure_action: CmdFailureAction, command_failure_action: CmdFailureAction,
command_input_policy: &CommandInputPolicy, command_input_policy: &CommandInputPolicy,
command_output_policy: &CommandOutputPolicy, command_output_policy: &CommandOutputPolicy,
@ -53,17 +71,29 @@ fn run_command_and_measure_common(
randomized_environment_offset::value(), randomized_environment_offset::value(),
); );
if let Some(value) = iteration.to_env_var_value() {
command.env("HYPERFINE_ITERATION", value);
}
let result = execute_and_measure(command) let result = execute_and_measure(command)
.with_context(|| format!("Failed to run command '{command_name}'"))?; .with_context(|| format!("Failed to run command '{command_name}'"))?;
if command_failure_action == CmdFailureAction::RaiseError && !result.status.success() { if command_failure_action == CmdFailureAction::RaiseError && !result.status.success() {
let when = match iteration {
BenchmarkIteration::NonBenchmarkRun => "a non-benchmark run".to_string(),
BenchmarkIteration::Warmup(0) => "the first warmup run".to_string(),
BenchmarkIteration::Warmup(i) => format!("warmup iteration {i}"),
BenchmarkIteration::Benchmark(0) => "the first benchmark run".to_string(),
BenchmarkIteration::Benchmark(i) => format!("benchmark iteration {i}"),
};
bail!( bail!(
"{}. Use the '-i'/'--ignore-failure' option if you want to ignore this. \ "{cause} in {when}. Use the '-i'/'--ignore-failure' option if you want to ignore this. \
Alternatively, use the '--show-output' option to debug what went wrong.", Alternatively, use the '--show-output' option to debug what went wrong.",
result.status.code().map_or( cause=result.status.code().map_or(
"The process has been terminated by a signal".into(), "The process has been terminated by a signal".into(),
|c| format!("Command terminated with non-zero exit code: {c}") |c| format!("Command terminated with non-zero exit code {c}")
)
),
); );
} }
@ -84,10 +114,12 @@ impl<'a> Executor for RawExecutor<'a> {
fn run_command_and_measure( fn run_command_and_measure(
&self, &self,
command: &Command<'_>, command: &Command<'_>,
iteration: BenchmarkIteration,
command_failure_action: Option<CmdFailureAction>, command_failure_action: Option<CmdFailureAction>,
) -> Result<(TimingResult, ExitStatus)> { ) -> Result<(TimingResult, ExitStatus)> {
let result = run_command_and_measure_common( let result = run_command_and_measure_common(
command.get_command()?, command.get_command()?,
iteration,
command_failure_action.unwrap_or(self.options.command_failure_action), command_failure_action.unwrap_or(self.options.command_failure_action),
&self.options.command_input_policy, &self.options.command_input_policy,
&self.options.command_output_policy, &self.options.command_output_policy,
@ -133,6 +165,7 @@ impl<'a> Executor for ShellExecutor<'a> {
fn run_command_and_measure( fn run_command_and_measure(
&self, &self,
command: &Command<'_>, command: &Command<'_>,
iteration: BenchmarkIteration,
command_failure_action: Option<CmdFailureAction>, command_failure_action: Option<CmdFailureAction>,
) -> Result<(TimingResult, ExitStatus)> { ) -> Result<(TimingResult, ExitStatus)> {
let on_windows_cmd = cfg!(windows) && *self.shell == Shell::Default("cmd.exe"); let on_windows_cmd = cfg!(windows) && *self.shell == Shell::Default("cmd.exe");
@ -149,6 +182,7 @@ impl<'a> Executor for ShellExecutor<'a> {
let mut result = run_command_and_measure_common( let mut result = run_command_and_measure_common(
command_builder, command_builder,
iteration,
command_failure_action.unwrap_or(self.options.command_failure_action), command_failure_action.unwrap_or(self.options.command_failure_action),
&self.options.command_input_policy, &self.options.command_input_policy,
&self.options.command_output_policy, &self.options.command_output_policy,
@ -191,7 +225,11 @@ impl<'a> Executor for ShellExecutor<'a> {
for _ in 0..COUNT { for _ in 0..COUNT {
// Just run the shell without any command // Just run the shell without any command
let res = self.run_command_and_measure(&Command::new(None, ""), None); let res = self.run_command_and_measure(
&Command::new(None, ""),
BenchmarkIteration::NonBenchmarkRun,
None,
);
match res { match res {
Err(_) => { Err(_) => {
@ -260,6 +298,7 @@ impl Executor for MockExecutor {
fn run_command_and_measure( fn run_command_and_measure(
&self, &self,
command: &Command<'_>, command: &Command<'_>,
_iteration: BenchmarkIteration,
_command_failure_action: Option<CmdFailureAction>, _command_failure_action: Option<CmdFailureAction>,
) -> Result<(TimingResult, ExitStatus)> { ) -> Result<(TimingResult, ExitStatus)> {
#[cfg(unix)] #[cfg(unix)]

View File

@ -6,6 +6,7 @@ pub mod timing_result;
use std::cmp; use std::cmp;
use crate::benchmark::executor::BenchmarkIteration;
use crate::command::Command; use crate::command::Command;
use crate::options::{CmdFailureAction, ExecutorKind, Options, OutputStyleOption}; use crate::options::{CmdFailureAction, ExecutorKind, Options, OutputStyleOption};
use crate::outlier_detection::{modified_zscores, OUTLIER_THRESHOLD}; use crate::outlier_detection::{modified_zscores, OUTLIER_THRESHOLD};
@ -57,7 +58,11 @@ impl<'a> Benchmark<'a> {
error_output: &'static str, error_output: &'static str,
) -> Result<TimingResult> { ) -> Result<TimingResult> {
self.executor self.executor
.run_command_and_measure(command, Some(CmdFailureAction::RaiseError)) .run_command_and_measure(
command,
executor::BenchmarkIteration::NonBenchmarkRun,
Some(CmdFailureAction::RaiseError),
)
.map(|r| r.0) .map(|r| r.0)
.map_err(|_| anyhow!(error_output)) .map_err(|_| anyhow!(error_output))
} }
@ -187,9 +192,13 @@ impl<'a> Benchmark<'a> {
None None
}; };
for _ in 0..self.options.warmup_count { for i in 0..self.options.warmup_count {
let _ = run_preparation_command()?; let _ = run_preparation_command()?;
let _ = self.executor.run_command_and_measure(self.command, None)?; let _ = self.executor.run_command_and_measure(
self.command,
BenchmarkIteration::Warmup(i),
None,
)?;
let _ = run_conclusion_command()?; let _ = run_conclusion_command()?;
if let Some(bar) = progress_bar.as_ref() { if let Some(bar) = progress_bar.as_ref() {
bar.inc(1) bar.inc(1)
@ -216,7 +225,11 @@ impl<'a> Benchmark<'a> {
preparation_result.map_or(0.0, |res| res.time_real + self.executor.time_overhead()); preparation_result.map_or(0.0, |res| res.time_real + self.executor.time_overhead());
// Initial timing run // Initial timing run
let (res, status) = self.executor.run_command_and_measure(self.command, None)?; let (res, status) = self.executor.run_command_and_measure(
self.command,
BenchmarkIteration::Benchmark(0),
None,
)?;
let success = status.success(); let success = status.success();
let conclusion_result = run_conclusion_command()?; let conclusion_result = run_conclusion_command()?;
@ -260,7 +273,7 @@ impl<'a> Benchmark<'a> {
} }
// Gather statistics (perform the actual benchmark) // Gather statistics (perform the actual benchmark)
for _ in 0..count_remaining { for i in 0..count_remaining {
run_preparation_command()?; run_preparation_command()?;
let msg = { let msg = {
@ -272,7 +285,11 @@ impl<'a> Benchmark<'a> {
bar.set_message(msg.to_owned()) bar.set_message(msg.to_owned())
} }
let (res, status) = self.executor.run_command_and_measure(self.command, None)?; let (res, status) = self.executor.run_command_and_measure(
self.command,
BenchmarkIteration::Benchmark(i + 1),
None,
)?;
let success = status.success(); let success = status.success();
times_real.push(res.time_real); times_real.push(res.time_real);