Move functions into Scheduler

This commit is contained in:
David Peter 2022-02-20 14:04:37 +01:00 committed by David Peter
parent dcfeede18e
commit 39c7736095
3 changed files with 378 additions and 383 deletions

View File

@ -25,89 +25,7 @@ use crate::util::units::Second;
use anyhow::{bail, Result};
pub struct Scheduler<'a> {
commands: &'a Commands<'a>,
options: &'a Options,
export_manager: &'a ExportManager,
results: Vec<BenchmarkResult>,
}
impl<'a> Scheduler<'a> {
pub fn new(
commands: &'a Commands,
options: &'a Options,
export_manager: &'a ExportManager,
) -> Self {
Self {
commands,
options,
export_manager,
results: vec![],
}
}
pub fn run_benchmarks(&mut self) -> Result<()> {
let shell_spawning_time = mean_shell_spawning_time(
&self.options.shell,
self.options.output_style,
self.options.command_output_policy,
)?;
for (num, cmd) in self.commands.iter().enumerate() {
self.results
.push(run_benchmark(num, cmd, shell_spawning_time, self.options)?);
// We export (all results so far) after each individual benchmark, because
// we would risk losing all results if a later benchmark fails.
self.export_manager
.write_results(&self.results, self.options.time_unit)?;
}
Ok(())
}
pub fn print_relative_speed_comparison(&self) {
if self.options.output_style == OutputStyleOption::Disabled {
return;
}
if self.results.len() < 2 {
return;
}
if let Some(mut annotated_results) = relative_speed::compute(&self.results) {
annotated_results.sort_by(|l, r| relative_speed::compare_mean_time(l.result, r.result));
let fastest = &annotated_results[0];
let others = &annotated_results[1..];
println!("{}", "Summary".bold());
println!(" '{}' ran", fastest.result.command.cyan());
for item in others {
println!(
"{}{} times faster than '{}'",
format!("{:8.2}", item.relative_speed).bold().green(),
if let Some(stddev) = item.relative_speed_stddev {
format!(" ± {}", format!("{:.2}", stddev).green())
} else {
"".into()
},
&item.result.command.magenta()
);
}
} else {
eprintln!(
"{}: The benchmark comparison could not be computed as some benchmark times are zero. \
This could be caused by background interference during the initial calibration phase \
of hyperfine, in combination with very fast commands (faster than a few milliseconds). \
Try to re-run the benchmark on a quiet system. If it does not help, you command is \
most likely too fast to be accurately benchmarked by hyperfine.",
"Note".bold().red()
);
}
}
}
///////////////
/// Correct for shell spawning time
fn subtract_shell_spawning_time(time: Second, shell_spawning_time: Second) -> Second {
@ -232,211 +150,226 @@ pub fn mean_shell_spawning_time(
time_system: mean(&times_system),
})
}
///////////////
fn run_intermediate_command(
shell: &Shell,
command: &Option<Command<'_>>,
command_output_policy: CommandOutputPolicy,
error_output: &'static str,
) -> Result<TimingResult> {
if let Some(ref cmd) = command {
let res = time_shell_command(
shell,
cmd,
command_output_policy,
CmdFailureAction::RaiseError,
None,
);
if res.is_err() {
bail!(error_output);
pub struct Scheduler<'a> {
commands: &'a Commands<'a>,
options: &'a Options,
export_manager: &'a ExportManager,
results: Vec<BenchmarkResult>,
}
impl<'a> Scheduler<'a> {
pub fn new(
commands: &'a Commands,
options: &'a Options,
export_manager: &'a ExportManager,
) -> Self {
Self {
commands,
options,
export_manager,
results: vec![],
}
return res.map(|r| r.0);
}
Ok(TimingResult {
..Default::default()
})
}
/// Run the command specified by `--setup`.
fn run_setup_command(
shell: &Shell,
command: &Option<Command<'_>>,
output_policy: CommandOutputPolicy,
) -> Result<TimingResult> {
let error_output = "The setup command terminated with a non-zero exit code. \
Append ' || true' to the command if you are sure that this can be ignored.";
run_intermediate_command(shell, command, output_policy, error_output)
}
/// Run the command specified by `--prepare`.
fn run_preparation_command(
shell: &Shell,
command: &Option<Command<'_>>,
output_policy: CommandOutputPolicy,
) -> Result<TimingResult> {
let error_output = "The preparation command terminated with a non-zero exit code. \
Append ' || true' to the command if you are sure that this can be ignored.";
run_intermediate_command(shell, command, output_policy, error_output)
}
/// Run the command specified by `--cleanup`.
fn run_cleanup_command(
shell: &Shell,
command: &Option<Command<'_>>,
output_policy: CommandOutputPolicy,
) -> Result<TimingResult> {
let error_output = "The cleanup command terminated with a non-zero exit code. \
Append ' || true' to the command if you are sure that this can be ignored.";
run_intermediate_command(shell, command, output_policy, error_output)
}
/// Run the benchmark for a single shell command
pub fn run_benchmark(
num: usize,
cmd: &Command<'_>,
shell_spawning_time: TimingResult,
options: &Options,
) -> Result<BenchmarkResult> {
let command_name = cmd.get_name();
if options.output_style != OutputStyleOption::Disabled {
println!(
"{}{}: {}",
"Benchmark ".bold(),
(num + 1).to_string().bold(),
command_name,
);
}
let mut times_real: Vec<Second> = vec![];
let mut times_user: Vec<Second> = vec![];
let mut times_system: Vec<Second> = vec![];
let mut exit_codes: Vec<Option<i32>> = vec![];
let mut all_succeeded = true;
pub fn run_benchmarks(&mut self) -> Result<()> {
let shell_spawning_time = mean_shell_spawning_time(
&self.options.shell,
self.options.output_style,
self.options.command_output_policy,
)?;
// Run init command
let prepare_cmd = options.preparation_command.as_ref().map(|values| {
let preparation_command = if values.len() == 1 {
&values[0]
for (num, cmd) in self.commands.iter().enumerate() {
self.results
.push(self.run_benchmark(num, cmd, shell_spawning_time, self.options)?);
// We export (all results so far) after each individual benchmark, because
// we would risk losing all results if a later benchmark fails.
self.export_manager
.write_results(&self.results, self.options.time_unit)?;
}
Ok(())
}
pub fn print_relative_speed_comparison(&self) {
if self.options.output_style == OutputStyleOption::Disabled {
return;
}
if self.results.len() < 2 {
return;
}
if let Some(mut annotated_results) = relative_speed::compute(&self.results) {
annotated_results.sort_by(|l, r| relative_speed::compare_mean_time(l.result, r.result));
let fastest = &annotated_results[0];
let others = &annotated_results[1..];
println!("{}", "Summary".bold());
println!(" '{}' ran", fastest.result.command.cyan());
for item in others {
println!(
"{}{} times faster than '{}'",
format!("{:8.2}", item.relative_speed).bold().green(),
if let Some(stddev) = item.relative_speed_stddev {
format!(" ± {}", format!("{:.2}", stddev).green())
} else {
"".into()
},
&item.result.command.magenta()
);
}
} else {
&values[num]
};
Command::new_parametrized(None, preparation_command, cmd.get_parameters().clone())
});
eprintln!(
"{}: The benchmark comparison could not be computed as some benchmark times are zero. \
This could be caused by background interference during the initial calibration phase \
of hyperfine, in combination with very fast commands (faster than a few milliseconds). \
Try to re-run the benchmark on a quiet system. If it does not help, you command is \
most likely too fast to be accurately benchmarked by hyperfine.",
"Note".bold().red()
);
}
}
// Run setup command
let setup_cmd = options.setup_command.as_ref().map(|setup_command| {
Command::new_parametrized(None, setup_command, cmd.get_parameters().clone())
});
run_setup_command(&options.shell, &setup_cmd, options.command_output_policy)?;
fn run_intermediate_command(
&self,
command: &Option<Command<'_>>,
error_output: &'static str,
) -> Result<TimingResult> {
if let Some(ref cmd) = command {
let res = time_shell_command(
&self.options.shell,
cmd,
self.options.command_output_policy,
CmdFailureAction::RaiseError,
None,
);
if res.is_err() {
bail!(error_output);
}
return res.map(|r| r.0);
}
Ok(TimingResult {
..Default::default()
})
}
// Warmup phase
if options.warmup_count > 0 {
/// Run the command specified by `--setup`.
fn run_setup_command(&self, command: &Option<Command<'_>>) -> Result<TimingResult> {
let error_output = "The setup command terminated with a non-zero exit code. \
Append ' || true' to the command if you are sure that this can be ignored.";
self.run_intermediate_command(command, error_output)
}
/// Run the command specified by `--prepare`.
fn run_preparation_command(&self, command: &Option<Command<'_>>) -> Result<TimingResult> {
let error_output = "The preparation command terminated with a non-zero exit code. \
Append ' || true' to the command if you are sure that this can be ignored.";
self.run_intermediate_command(command, error_output)
}
/// Run the command specified by `--cleanup`.
fn run_cleanup_command(&self, command: &Option<Command<'_>>) -> Result<TimingResult> {
let error_output = "The cleanup command terminated with a non-zero exit code. \
Append ' || true' to the command if you are sure that this can be ignored.";
self.run_intermediate_command(command, error_output)
}
/// Run the benchmark for a single shell command
pub fn run_benchmark(
&self,
num: usize,
cmd: &Command<'_>,
shell_spawning_time: TimingResult,
options: &Options,
) -> Result<BenchmarkResult> {
let command_name = cmd.get_name();
if options.output_style != OutputStyleOption::Disabled {
println!(
"{}{}: {}",
"Benchmark ".bold(),
(num + 1).to_string().bold(),
command_name,
);
}
let mut times_real: Vec<Second> = vec![];
let mut times_user: Vec<Second> = vec![];
let mut times_system: Vec<Second> = vec![];
let mut exit_codes: Vec<Option<i32>> = vec![];
let mut all_succeeded = true;
// Run init command
let prepare_cmd = options.preparation_command.as_ref().map(|values| {
let preparation_command = if values.len() == 1 {
&values[0]
} else {
&values[num]
};
Command::new_parametrized(
None,
preparation_command,
cmd.get_parameters().iter().cloned(),
)
});
// Run setup command
let setup_cmd = options.setup_command.as_ref().map(|setup_command| {
Command::new_parametrized(None, setup_command, cmd.get_parameters().iter().cloned())
});
self.run_setup_command(&setup_cmd)?;
// Warmup phase
if options.warmup_count > 0 {
let progress_bar = if options.output_style != OutputStyleOption::Disabled {
Some(get_progress_bar(
options.warmup_count,
"Performing warmup runs",
options.output_style,
))
} else {
None
};
for _ in 0..options.warmup_count {
let _ = self.run_preparation_command(&prepare_cmd)?;
let _ = time_shell_command(
&options.shell,
cmd,
options.command_output_policy,
options.command_failure_action,
None,
)?;
if let Some(bar) = progress_bar.as_ref() {
bar.inc(1)
}
}
if let Some(bar) = progress_bar.as_ref() {
bar.finish_and_clear()
}
}
// Set up progress bar (and spinner for initial measurement)
let progress_bar = if options.output_style != OutputStyleOption::Disabled {
Some(get_progress_bar(
options.warmup_count,
"Performing warmup runs",
options.run_bounds.min,
"Initial time measurement",
options.output_style,
))
} else {
None
};
for _ in 0..options.warmup_count {
let _ = run_preparation_command(
&options.shell,
&prepare_cmd,
options.command_output_policy,
)?;
let _ = time_shell_command(
&options.shell,
cmd,
options.command_output_policy,
options.command_failure_action,
None,
)?;
if let Some(bar) = progress_bar.as_ref() {
bar.inc(1)
}
}
if let Some(bar) = progress_bar.as_ref() {
bar.finish_and_clear()
}
}
// Set up progress bar (and spinner for initial measurement)
let progress_bar = if options.output_style != OutputStyleOption::Disabled {
Some(get_progress_bar(
options.run_bounds.min,
"Initial time measurement",
options.output_style,
))
} else {
None
};
let prepare_result =
run_preparation_command(&options.shell, &prepare_cmd, options.command_output_policy)?;
// Initial timing run
let (res, status) = time_shell_command(
&options.shell,
cmd,
options.command_output_policy,
options.command_failure_action,
Some(shell_spawning_time),
)?;
let success = status.success();
// Determine number of benchmark runs
let runs_in_min_time = (options.min_benchmarking_time
/ (res.time_real + prepare_result.time_real + shell_spawning_time.time_real))
as u64;
let count = {
let min = cmp::max(runs_in_min_time, options.run_bounds.min);
options
.run_bounds
.max
.as_ref()
.map(|max| cmp::min(min, *max))
.unwrap_or(min)
};
let count_remaining = count - 1;
// Save the first result
times_real.push(res.time_real);
times_user.push(res.time_user);
times_system.push(res.time_system);
exit_codes.push(extract_exit_code(status));
all_succeeded = all_succeeded && success;
// Re-configure the progress bar
if let Some(bar) = progress_bar.as_ref() {
bar.set_length(count)
}
if let Some(bar) = progress_bar.as_ref() {
bar.inc(1)
}
// Gather statistics
for _ in 0..count_remaining {
run_preparation_command(&options.shell, &prepare_cmd, options.command_output_policy)?;
let msg = {
let mean = format_duration(mean(&times_real), options.time_unit);
format!("Current estimate: {}", mean.to_string().green())
};
if let Some(bar) = progress_bar.as_ref() {
bar.set_message(msg.to_owned())
}
let prepare_result = self.run_preparation_command(&prepare_cmd)?;
// Initial timing run
let (res, status) = time_shell_command(
&options.shell,
cmd,
@ -446,6 +379,25 @@ pub fn run_benchmark(
)?;
let success = status.success();
// Determine number of benchmark runs
let runs_in_min_time = (options.min_benchmarking_time
/ (res.time_real + prepare_result.time_real + shell_spawning_time.time_real))
as u64;
let count = {
let min = cmp::max(runs_in_min_time, options.run_bounds.min);
options
.run_bounds
.max
.as_ref()
.map(|max| cmp::min(min, *max))
.unwrap_or(min)
};
let count_remaining = count - 1;
// Save the first result
times_real.push(res.time_real);
times_user.push(res.time_user);
times_system.push(res.time_system);
@ -453,127 +405,165 @@ pub fn run_benchmark(
all_succeeded = all_succeeded && success;
// Re-configure the progress bar
if let Some(bar) = progress_bar.as_ref() {
bar.set_length(count)
}
if let Some(bar) = progress_bar.as_ref() {
bar.inc(1)
}
}
if let Some(bar) = progress_bar.as_ref() {
bar.finish_and_clear()
}
// Gather statistics
for _ in 0..count_remaining {
self.run_preparation_command(&prepare_cmd)?;
// Compute statistical quantities
let t_num = times_real.len();
let t_mean = mean(&times_real);
let t_stddev = if times_real.len() > 1 {
Some(standard_deviation(&times_real, Some(t_mean)))
} else {
None
};
let t_median = median(&times_real);
let t_min = min(&times_real);
let t_max = max(&times_real);
let msg = {
let mean = format_duration(mean(&times_real), options.time_unit);
format!("Current estimate: {}", mean.to_string().green())
};
let user_mean = mean(&times_user);
let system_mean = mean(&times_system);
if let Some(bar) = progress_bar.as_ref() {
bar.set_message(msg.to_owned())
}
// Formatting and console output
let (mean_str, time_unit) = format_duration_unit(t_mean, options.time_unit);
let min_str = format_duration(t_min, Some(time_unit));
let max_str = format_duration(t_max, Some(time_unit));
let num_str = format!("{} runs", t_num);
let (res, status) = time_shell_command(
&options.shell,
cmd,
options.command_output_policy,
options.command_failure_action,
Some(shell_spawning_time),
)?;
let success = status.success();
let user_str = format_duration(user_mean, Some(time_unit));
let system_str = format_duration(system_mean, Some(time_unit));
times_real.push(res.time_real);
times_user.push(res.time_user);
times_system.push(res.time_system);
exit_codes.push(extract_exit_code(status));
if options.output_style != OutputStyleOption::Disabled {
if times_real.len() == 1 {
println!(
" Time ({} ≡): {:>8} {:>8} [User: {}, System: {}]",
"abs".green().bold(),
mean_str.green().bold(),
" ".to_string(), // alignment
user_str.blue(),
system_str.blue()
);
all_succeeded = all_succeeded && success;
if let Some(bar) = progress_bar.as_ref() {
bar.inc(1)
}
}
if let Some(bar) = progress_bar.as_ref() {
bar.finish_and_clear()
}
// Compute statistical quantities
let t_num = times_real.len();
let t_mean = mean(&times_real);
let t_stddev = if times_real.len() > 1 {
Some(standard_deviation(&times_real, Some(t_mean)))
} else {
let stddev_str = format_duration(t_stddev.unwrap(), Some(time_unit));
None
};
let t_median = median(&times_real);
let t_min = min(&times_real);
let t_max = max(&times_real);
println!(
" Time ({} ± {}): {:>8} ± {:>8} [User: {}, System: {}]",
"mean".green().bold(),
"σ".green(),
mean_str.green().bold(),
stddev_str.green(),
user_str.blue(),
system_str.blue()
);
let user_mean = mean(&times_user);
let system_mean = mean(&times_system);
println!(
" Range ({} … {}): {:>8} … {:>8} {}",
"min".cyan(),
"max".purple(),
min_str.cyan(),
max_str.purple(),
num_str.dimmed()
);
// Formatting and console output
let (mean_str, time_unit) = format_duration_unit(t_mean, options.time_unit);
let min_str = format_duration(t_min, Some(time_unit));
let max_str = format_duration(t_max, Some(time_unit));
let num_str = format!("{} runs", t_num);
let user_str = format_duration(user_mean, Some(time_unit));
let system_str = format_duration(system_mean, Some(time_unit));
if options.output_style != OutputStyleOption::Disabled {
if times_real.len() == 1 {
println!(
" Time ({} ≡): {:>8} {:>8} [User: {}, System: {}]",
"abs".green().bold(),
mean_str.green().bold(),
" ".to_string(), // alignment
user_str.blue(),
system_str.blue()
);
} else {
let stddev_str = format_duration(t_stddev.unwrap(), Some(time_unit));
println!(
" Time ({} ± {}): {:>8} ± {:>8} [User: {}, System: {}]",
"mean".green().bold(),
"σ".green(),
mean_str.green().bold(),
stddev_str.green(),
user_str.blue(),
system_str.blue()
);
println!(
" Range ({} … {}): {:>8} … {:>8} {}",
"min".cyan(),
"max".purple(),
min_str.cyan(),
max_str.purple(),
num_str.dimmed()
);
}
}
}
// Warnings
let mut warnings = vec![];
// Warnings
let mut warnings = vec![];
// Check execution time
if times_real.iter().any(|&t| t < MIN_EXECUTION_TIME) {
warnings.push(Warnings::FastExecutionTime);
}
// Check programm exit codes
if !all_succeeded {
warnings.push(Warnings::NonZeroExitCode);
}
// Run outlier detection
let scores = modified_zscores(&times_real);
if scores[0] > OUTLIER_THRESHOLD {
warnings.push(Warnings::SlowInitialRun(times_real[0]));
} else if scores.iter().any(|&s| s.abs() > OUTLIER_THRESHOLD) {
warnings.push(Warnings::OutliersDetected);
}
if !warnings.is_empty() {
eprintln!(" ");
for warning in &warnings {
eprintln!(" {}: {}", "Warning".yellow(), warning);
// Check execution time
if times_real.iter().any(|&t| t < MIN_EXECUTION_TIME) {
warnings.push(Warnings::FastExecutionTime);
}
// Check programm exit codes
if !all_succeeded {
warnings.push(Warnings::NonZeroExitCode);
}
// Run outlier detection
let scores = modified_zscores(&times_real);
if scores[0] > OUTLIER_THRESHOLD {
warnings.push(Warnings::SlowInitialRun(times_real[0]));
} else if scores.iter().any(|&s| s.abs() > OUTLIER_THRESHOLD) {
warnings.push(Warnings::OutliersDetected);
}
if !warnings.is_empty() {
eprintln!(" ");
for warning in &warnings {
eprintln!(" {}: {}", "Warning".yellow(), warning);
}
}
if options.output_style != OutputStyleOption::Disabled {
println!(" ");
}
// Run cleanup command
let cleanup_cmd = options.cleanup_command.as_ref().map(|cleanup_command| {
Command::new_parametrized(None, cleanup_command, cmd.get_parameters().iter().cloned())
});
self.run_cleanup_command(&cleanup_cmd)?;
Ok(BenchmarkResult {
command: command_name,
mean: t_mean,
stddev: t_stddev,
median: t_median,
user: user_mean,
system: system_mean,
min: t_min,
max: t_max,
times: Some(times_real),
exit_codes,
parameters: cmd
.get_parameters()
.iter()
.map(|(name, value)| ((*name).to_string(), value.to_string()))
.collect(),
})
}
if options.output_style != OutputStyleOption::Disabled {
println!(" ");
}
// Run cleanup command
let cleanup_cmd = options.cleanup_command.as_ref().map(|cleanup_command| {
Command::new_parametrized(None, cleanup_command, cmd.get_parameters().clone())
});
run_cleanup_command(&options.shell, &cleanup_cmd, options.command_output_policy)?;
Ok(BenchmarkResult {
command: command_name,
mean: t_mean,
stddev: t_stddev,
median: t_median,
user: user_mean,
system: system_mean,
min: t_min,
max: t_max,
times: Some(times_real),
exit_codes,
parameters: cmd
.get_parameters()
.iter()
.map(|(name, value)| ((*name).to_string(), value.to_string()))
.collect(),
})
}

View File

@ -4,7 +4,10 @@ use std::str::FromStr;
use crate::{
error::{OptionsError, ParameterScanError},
parameter::range_step::{Numeric, RangeStep},
parameter::{
range_step::{Numeric, RangeStep},
ParameterNameAndValue,
},
};
use clap::ArgMatches;
@ -26,7 +29,7 @@ pub struct Command<'a> {
expression: &'a str,
/// Zero or more parameter values.
parameters: Vec<(&'a str, ParameterValue)>,
parameters: Vec<ParameterNameAndValue<'a>>,
}
impl<'a> Command<'a> {
@ -41,12 +44,12 @@ impl<'a> Command<'a> {
pub fn new_parametrized(
name: Option<&'a str>,
expression: &'a str,
parameters: Vec<(&'a str, ParameterValue)>,
parameters: impl IntoIterator<Item = ParameterNameAndValue<'a>>,
) -> Command<'a> {
Command {
name,
expression,
parameters,
parameters: parameters.into_iter().collect(),
}
}
@ -61,7 +64,7 @@ impl<'a> Command<'a> {
self.replace_parameters_in(self.expression)
}
pub fn get_parameters(&self) -> &Vec<(&'a str, ParameterValue)> {
pub fn get_parameters(&self) -> &[(&'a str, ParameterValue)] {
&self.parameters
}
@ -166,7 +169,7 @@ impl<'a> Commands<'a> {
i += 1;
let (command_index, params_indices) = index.split_first().unwrap();
let parameters = param_names_and_values
let parameters: Vec<_> = param_names_and_values
.iter()
.zip(params_indices)
.map(|((name, values), i)| (*name, ParameterValue::Text(values[*i].clone())))

View File

@ -17,3 +17,5 @@ impl<'a> ToString for ParameterValue {
}
}
}
pub type ParameterNameAndValue<'a> = (&'a str, ParameterValue);