mirror of
https://github.com/sharkdp/hyperfine.git
synced 2024-11-25 00:30:29 +03:00
Add iteration information to failure error message
This commit is contained in:
parent
f096c266f0
commit
83f04cdd31
@ -16,11 +16,28 @@ use super::timing_result::TimingResult;
|
||||
use anyhow::{bail, Context, Result};
|
||||
use statistical::mean;
|
||||
|
||||
pub enum BenchmarkIteration {
|
||||
NonBenchmarkRun,
|
||||
Warmup(u64),
|
||||
Benchmark(u64),
|
||||
}
|
||||
|
||||
impl BenchmarkIteration {
|
||||
pub fn to_env_var_value(&self) -> Option<String> {
|
||||
match self {
|
||||
BenchmarkIteration::NonBenchmarkRun => None,
|
||||
BenchmarkIteration::Warmup(i) => Some(format!("warmup-{}", i)),
|
||||
BenchmarkIteration::Benchmark(i) => Some(format!("{}", i)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Executor {
|
||||
/// Run the given command and measure the execution time
|
||||
fn run_command_and_measure(
|
||||
&self,
|
||||
command: &Command<'_>,
|
||||
iteration: BenchmarkIteration,
|
||||
command_failure_action: Option<CmdFailureAction>,
|
||||
) -> Result<(TimingResult, ExitStatus)>;
|
||||
|
||||
@ -39,6 +56,7 @@ pub trait Executor {
|
||||
|
||||
fn run_command_and_measure_common(
|
||||
mut command: std::process::Command,
|
||||
iteration: BenchmarkIteration,
|
||||
command_failure_action: CmdFailureAction,
|
||||
command_input_policy: &CommandInputPolicy,
|
||||
command_output_policy: &CommandOutputPolicy,
|
||||
@ -53,17 +71,29 @@ fn run_command_and_measure_common(
|
||||
randomized_environment_offset::value(),
|
||||
);
|
||||
|
||||
if let Some(value) = iteration.to_env_var_value() {
|
||||
command.env("HYPERFINE_ITERATION", value);
|
||||
}
|
||||
|
||||
let result = execute_and_measure(command)
|
||||
.with_context(|| format!("Failed to run command '{command_name}'"))?;
|
||||
|
||||
if command_failure_action == CmdFailureAction::RaiseError && !result.status.success() {
|
||||
let when = match iteration {
|
||||
BenchmarkIteration::NonBenchmarkRun => "a non-benchmark run".to_string(),
|
||||
BenchmarkIteration::Warmup(0) => "the first warmup run".to_string(),
|
||||
BenchmarkIteration::Warmup(i) => format!("warmup iteration {i}"),
|
||||
BenchmarkIteration::Benchmark(0) => "the first benchmark run".to_string(),
|
||||
BenchmarkIteration::Benchmark(i) => format!("benchmark iteration {i}"),
|
||||
};
|
||||
bail!(
|
||||
"{}. Use the '-i'/'--ignore-failure' option if you want to ignore this. \
|
||||
"{cause} in {when}. Use the '-i'/'--ignore-failure' option if you want to ignore this. \
|
||||
Alternatively, use the '--show-output' option to debug what went wrong.",
|
||||
result.status.code().map_or(
|
||||
cause=result.status.code().map_or(
|
||||
"The process has been terminated by a signal".into(),
|
||||
|c| format!("Command terminated with non-zero exit code: {c}")
|
||||
)
|
||||
|c| format!("Command terminated with non-zero exit code {c}")
|
||||
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
@ -84,10 +114,12 @@ impl<'a> Executor for RawExecutor<'a> {
|
||||
fn run_command_and_measure(
|
||||
&self,
|
||||
command: &Command<'_>,
|
||||
iteration: BenchmarkIteration,
|
||||
command_failure_action: Option<CmdFailureAction>,
|
||||
) -> Result<(TimingResult, ExitStatus)> {
|
||||
let result = run_command_and_measure_common(
|
||||
command.get_command()?,
|
||||
iteration,
|
||||
command_failure_action.unwrap_or(self.options.command_failure_action),
|
||||
&self.options.command_input_policy,
|
||||
&self.options.command_output_policy,
|
||||
@ -133,6 +165,7 @@ impl<'a> Executor for ShellExecutor<'a> {
|
||||
fn run_command_and_measure(
|
||||
&self,
|
||||
command: &Command<'_>,
|
||||
iteration: BenchmarkIteration,
|
||||
command_failure_action: Option<CmdFailureAction>,
|
||||
) -> Result<(TimingResult, ExitStatus)> {
|
||||
let on_windows_cmd = cfg!(windows) && *self.shell == Shell::Default("cmd.exe");
|
||||
@ -149,6 +182,7 @@ impl<'a> Executor for ShellExecutor<'a> {
|
||||
|
||||
let mut result = run_command_and_measure_common(
|
||||
command_builder,
|
||||
iteration,
|
||||
command_failure_action.unwrap_or(self.options.command_failure_action),
|
||||
&self.options.command_input_policy,
|
||||
&self.options.command_output_policy,
|
||||
@ -191,7 +225,11 @@ impl<'a> Executor for ShellExecutor<'a> {
|
||||
|
||||
for _ in 0..COUNT {
|
||||
// Just run the shell without any command
|
||||
let res = self.run_command_and_measure(&Command::new(None, ""), None);
|
||||
let res = self.run_command_and_measure(
|
||||
&Command::new(None, ""),
|
||||
BenchmarkIteration::NonBenchmarkRun,
|
||||
None,
|
||||
);
|
||||
|
||||
match res {
|
||||
Err(_) => {
|
||||
@ -260,6 +298,7 @@ impl Executor for MockExecutor {
|
||||
fn run_command_and_measure(
|
||||
&self,
|
||||
command: &Command<'_>,
|
||||
_iteration: BenchmarkIteration,
|
||||
_command_failure_action: Option<CmdFailureAction>,
|
||||
) -> Result<(TimingResult, ExitStatus)> {
|
||||
#[cfg(unix)]
|
||||
|
@ -6,6 +6,7 @@ pub mod timing_result;
|
||||
|
||||
use std::cmp;
|
||||
|
||||
use crate::benchmark::executor::BenchmarkIteration;
|
||||
use crate::command::Command;
|
||||
use crate::options::{CmdFailureAction, ExecutorKind, Options, OutputStyleOption};
|
||||
use crate::outlier_detection::{modified_zscores, OUTLIER_THRESHOLD};
|
||||
@ -57,7 +58,11 @@ impl<'a> Benchmark<'a> {
|
||||
error_output: &'static str,
|
||||
) -> Result<TimingResult> {
|
||||
self.executor
|
||||
.run_command_and_measure(command, Some(CmdFailureAction::RaiseError))
|
||||
.run_command_and_measure(
|
||||
command,
|
||||
executor::BenchmarkIteration::NonBenchmarkRun,
|
||||
Some(CmdFailureAction::RaiseError),
|
||||
)
|
||||
.map(|r| r.0)
|
||||
.map_err(|_| anyhow!(error_output))
|
||||
}
|
||||
@ -187,9 +192,13 @@ impl<'a> Benchmark<'a> {
|
||||
None
|
||||
};
|
||||
|
||||
for _ in 0..self.options.warmup_count {
|
||||
for i in 0..self.options.warmup_count {
|
||||
let _ = run_preparation_command()?;
|
||||
let _ = self.executor.run_command_and_measure(self.command, None)?;
|
||||
let _ = self.executor.run_command_and_measure(
|
||||
self.command,
|
||||
BenchmarkIteration::Warmup(i),
|
||||
None,
|
||||
)?;
|
||||
let _ = run_conclusion_command()?;
|
||||
if let Some(bar) = progress_bar.as_ref() {
|
||||
bar.inc(1)
|
||||
@ -216,7 +225,11 @@ impl<'a> Benchmark<'a> {
|
||||
preparation_result.map_or(0.0, |res| res.time_real + self.executor.time_overhead());
|
||||
|
||||
// Initial timing run
|
||||
let (res, status) = self.executor.run_command_and_measure(self.command, None)?;
|
||||
let (res, status) = self.executor.run_command_and_measure(
|
||||
self.command,
|
||||
BenchmarkIteration::Benchmark(0),
|
||||
None,
|
||||
)?;
|
||||
let success = status.success();
|
||||
|
||||
let conclusion_result = run_conclusion_command()?;
|
||||
@ -260,7 +273,7 @@ impl<'a> Benchmark<'a> {
|
||||
}
|
||||
|
||||
// Gather statistics (perform the actual benchmark)
|
||||
for _ in 0..count_remaining {
|
||||
for i in 0..count_remaining {
|
||||
run_preparation_command()?;
|
||||
|
||||
let msg = {
|
||||
@ -272,7 +285,11 @@ impl<'a> Benchmark<'a> {
|
||||
bar.set_message(msg.to_owned())
|
||||
}
|
||||
|
||||
let (res, status) = self.executor.run_command_and_measure(self.command, None)?;
|
||||
let (res, status) = self.executor.run_command_and_measure(
|
||||
self.command,
|
||||
BenchmarkIteration::Benchmark(i + 1),
|
||||
None,
|
||||
)?;
|
||||
let success = status.success();
|
||||
|
||||
times_real.push(res.time_real);
|
||||
|
Loading…
Reference in New Issue
Block a user