mirror of
https://github.com/sharkdp/hyperfine.git
synced 2024-11-29 06:56:57 +03:00
parent
314e4e3387
commit
b2a246c195
@ -24,6 +24,8 @@
|
||||
## Bugfixes
|
||||
|
||||
- Fix a bug in the outlier detection which would only detect "slow outliers" but not the fast ones (runs that are much faster than the rest of the benchmarking runs), see #329
|
||||
- Better error messages for very fast commands that would lead to inf/nan results in the relative
|
||||
speed comparison, see #319
|
||||
- Keep output colorized when the output is not interactive and `--style=full` or `--style=color` is used.
|
||||
|
||||
## Other
|
||||
|
@ -5,7 +5,7 @@ use crate::hyperfine::internal::{compute_relative_speed, BenchmarkResultWithRela
|
||||
use crate::hyperfine::types::BenchmarkResult;
|
||||
use crate::hyperfine::units::Unit;
|
||||
|
||||
use std::io::Result;
|
||||
use std::io::{Error, ErrorKind, Result};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MarkdownExporter {}
|
||||
@ -23,15 +23,20 @@ impl Exporter for MarkdownExporter {
|
||||
Unit::Second
|
||||
};
|
||||
|
||||
let annotated_results = compute_relative_speed(results);
|
||||
if let Some(annotated_results) = compute_relative_speed(results) {
|
||||
let mut destination = start_table(unit);
|
||||
|
||||
let mut destination = start_table(unit);
|
||||
for result in annotated_results {
|
||||
add_table_row(&mut destination, &result, unit);
|
||||
}
|
||||
|
||||
for result in annotated_results {
|
||||
add_table_row(&mut destination, &result, unit);
|
||||
Ok(destination)
|
||||
} else {
|
||||
Err(Error::new(
|
||||
ErrorKind::Other,
|
||||
"Relative speed comparison is not available for Markdown export.",
|
||||
))
|
||||
}
|
||||
|
||||
Ok(destination)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -52,6 +52,7 @@ pub fn min(vals: &[f64]) -> f64 {
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct BenchmarkResultWithRelativeSpeed<'a> {
|
||||
pub result: &'a BenchmarkResult,
|
||||
pub relative_speed: Scalar,
|
||||
@ -65,31 +66,38 @@ fn compare_mean_time(l: &BenchmarkResult, r: &BenchmarkResult) -> Ordering {
|
||||
|
||||
pub fn compute_relative_speed<'a>(
|
||||
results: &'a [BenchmarkResult],
|
||||
) -> Vec<BenchmarkResultWithRelativeSpeed<'a>> {
|
||||
) -> Option<Vec<BenchmarkResultWithRelativeSpeed<'a>>> {
|
||||
let fastest: &BenchmarkResult = results
|
||||
.iter()
|
||||
.min_by(|&l, &r| compare_mean_time(l, r))
|
||||
.expect("at least one benchmark result");
|
||||
|
||||
results
|
||||
.iter()
|
||||
.map(|result| {
|
||||
let ratio = result.mean / fastest.mean;
|
||||
if fastest.mean == 0.0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
// https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas
|
||||
// Covariance asssumed to be 0, i.e. variables are assumed to be independent
|
||||
let ratio_stddev = ratio
|
||||
* ((result.stddev / result.mean).powi(2) + (fastest.stddev / fastest.mean).powi(2))
|
||||
Some(
|
||||
results
|
||||
.iter()
|
||||
.map(|result| {
|
||||
let ratio = result.mean / fastest.mean;
|
||||
|
||||
// https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas
|
||||
// Covariance asssumed to be 0, i.e. variables are assumed to be independent
|
||||
let ratio_stddev = ratio
|
||||
* ((result.stddev / result.mean).powi(2)
|
||||
+ (fastest.stddev / fastest.mean).powi(2))
|
||||
.sqrt();
|
||||
|
||||
BenchmarkResultWithRelativeSpeed {
|
||||
result,
|
||||
relative_speed: ratio,
|
||||
relative_speed_stddev: ratio_stddev,
|
||||
is_fastest: result == fastest,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
BenchmarkResultWithRelativeSpeed {
|
||||
result,
|
||||
relative_speed: ratio,
|
||||
relative_speed_stddev: ratio_stddev,
|
||||
is_fastest: result == fastest,
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn write_benchmark_comparison(results: &[BenchmarkResult]) {
|
||||
@ -97,21 +105,31 @@ pub fn write_benchmark_comparison(results: &[BenchmarkResult]) {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut annotated_results = compute_relative_speed(&results);
|
||||
annotated_results.sort_by(|l, r| compare_mean_time(l.result, r.result));
|
||||
if let Some(mut annotated_results) = compute_relative_speed(&results) {
|
||||
annotated_results.sort_by(|l, r| compare_mean_time(l.result, r.result));
|
||||
|
||||
let fastest = &annotated_results[0];
|
||||
let others = &annotated_results[1..];
|
||||
let fastest = &annotated_results[0];
|
||||
let others = &annotated_results[1..];
|
||||
|
||||
println!("{}", "Summary".bold());
|
||||
println!(" '{}' ran", fastest.result.command.cyan());
|
||||
println!("{}", "Summary".bold());
|
||||
println!(" '{}' ran", fastest.result.command.cyan());
|
||||
|
||||
for item in others {
|
||||
println!(
|
||||
"{} ± {} times faster than '{}'",
|
||||
format!("{:8.2}", item.relative_speed).bold().green(),
|
||||
format!("{:.2}", item.relative_speed_stddev).green(),
|
||||
&item.result.command.magenta()
|
||||
for item in others {
|
||||
println!(
|
||||
"{} ± {} times faster than '{}'",
|
||||
format!("{:8.2}", item.relative_speed).bold().green(),
|
||||
format!("{:.2}", item.relative_speed_stddev).green(),
|
||||
&item.result.command.magenta()
|
||||
);
|
||||
}
|
||||
} else {
|
||||
eprintln!(
|
||||
"{}: The benchmark comparison could not be computed as some benchmark times are zero. \
|
||||
This could be caused by background interference during the initial calibration phase \
|
||||
of hyperfine, in combination with very fast commands (faster than a few milliseconds). \
|
||||
Try to re-run the benchmark on a quiet system. If it does not help, you command is \
|
||||
most likely too fast to be accurately benchmarked by hyperfine.",
|
||||
"Note".bold().red()
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -125,12 +143,11 @@ fn test_max() {
|
||||
assert_eq!(1.0, max(&[-1.0, 1.0, 0.0]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_relative_speed() {
|
||||
use approx::assert_relative_eq;
|
||||
#[cfg(test)]
|
||||
fn create_result(name: &str, mean: Scalar) -> BenchmarkResult {
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
let create_result = |name: &str, mean| BenchmarkResult {
|
||||
BenchmarkResult {
|
||||
command: name.into(),
|
||||
mean,
|
||||
stddev: 1.0,
|
||||
@ -141,7 +158,12 @@ fn test_compute_relative_speed() {
|
||||
max: mean,
|
||||
times: None,
|
||||
parameters: BTreeMap::new(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_relative_speed() {
|
||||
use approx::assert_relative_eq;
|
||||
|
||||
let results = vec![
|
||||
create_result("cmd1", 3.0),
|
||||
@ -149,13 +171,22 @@ fn test_compute_relative_speed() {
|
||||
create_result("cmd3", 5.0),
|
||||
];
|
||||
|
||||
let annotated_results = compute_relative_speed(&results);
|
||||
let annotated_results = compute_relative_speed(&results).unwrap();
|
||||
|
||||
assert_relative_eq!(1.5, annotated_results[0].relative_speed);
|
||||
assert_relative_eq!(1.0, annotated_results[1].relative_speed);
|
||||
assert_relative_eq!(2.5, annotated_results[2].relative_speed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_relative_speed_for_zero_times() {
|
||||
let results = vec![create_result("cmd1", 1.0), create_result("cmd2", 0.0)];
|
||||
|
||||
let annotated_results = compute_relative_speed(&results);
|
||||
|
||||
assert!(annotated_results.is_none());
|
||||
}
|
||||
|
||||
pub fn tokenize<'a>(values: &'a str) -> Vec<String> {
|
||||
let mut tokens = vec![];
|
||||
let mut buf = String::new();
|
||||
|
42
src/main.rs
42
src/main.rs
@ -16,7 +16,7 @@ use crate::hyperfine::export::{ExportManager, ExportType};
|
||||
use crate::hyperfine::internal::{tokenize, write_benchmark_comparison};
|
||||
use crate::hyperfine::parameter_range::get_parameterized_commands;
|
||||
use crate::hyperfine::types::{
|
||||
BenchmarkResult, CmdFailureAction, Command, HyperfineOptions, OutputStyleOption, ParameterValue,
|
||||
CmdFailureAction, Command, HyperfineOptions, OutputStyleOption, ParameterValue,
|
||||
};
|
||||
use crate::hyperfine::units::Unit;
|
||||
|
||||
@ -27,7 +27,11 @@ pub fn error(message: &str) -> ! {
|
||||
}
|
||||
|
||||
/// Runs the benchmark for the given commands
|
||||
fn run(commands: &[Command<'_>], options: &HyperfineOptions) -> io::Result<Vec<BenchmarkResult>> {
|
||||
fn run(
|
||||
commands: &[Command<'_>],
|
||||
options: &HyperfineOptions,
|
||||
export_manager: &ExportManager,
|
||||
) -> io::Result<()> {
|
||||
let shell_spawning_time =
|
||||
mean_shell_spawning_time(&options.shell, options.output_style, options.show_output)?;
|
||||
|
||||
@ -47,7 +51,21 @@ fn run(commands: &[Command<'_>], options: &HyperfineOptions) -> io::Result<Vec<B
|
||||
timing_results.push(run_benchmark(num, cmd, shell_spawning_time, options)?);
|
||||
}
|
||||
|
||||
Ok(timing_results)
|
||||
// Print relative speed comparison
|
||||
if options.output_style != OutputStyleOption::Disabled {
|
||||
write_benchmark_comparison(&timing_results);
|
||||
}
|
||||
|
||||
// Export results
|
||||
let ans = export_manager.write_results(timing_results, options.time_unit);
|
||||
if let Err(e) = ans {
|
||||
error(&format!(
|
||||
"The following error occurred while exporting: {}",
|
||||
e
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() {
|
||||
@ -57,26 +75,12 @@ fn main() {
|
||||
let commands = build_commands(&matches);
|
||||
|
||||
let res = match options {
|
||||
Ok(ref opts) => run(&commands, &opts),
|
||||
Ok(ref opts) => run(&commands, &opts, &export_manager),
|
||||
Err(ref e) => error(&e.to_string()),
|
||||
};
|
||||
|
||||
match res {
|
||||
Ok(timing_results) => {
|
||||
let options = options.unwrap();
|
||||
|
||||
if options.output_style != OutputStyleOption::Disabled {
|
||||
write_benchmark_comparison(&timing_results);
|
||||
}
|
||||
|
||||
let ans = export_manager.write_results(timing_results, options.time_unit);
|
||||
if let Err(e) = ans {
|
||||
error(&format!(
|
||||
"The following error occurred while exporting: {}",
|
||||
e
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(_) => {}
|
||||
Err(e) => error(&e.to_string()),
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user