fix(bench): Result interpretation problems (#5798)

Co-authored-by: Lucas Nogueira <lucas@tauri.studio>
This commit is contained in:
Robin van Boven 2022-12-14 16:44:05 +01:00 committed by GitHub
parent bca09f7f5f
commit f7a080a121
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 49 additions and 37 deletions

View File

@ -10,6 +10,7 @@ on:
env: env:
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
CARGO_PROFILE_DEV_DEBUG: 0 # This would add unnecessary bloat to the target folder, decreasing cache efficiency. CARGO_PROFILE_DEV_DEBUG: 0 # This would add unnecessary bloat to the target folder, decreasing cache efficiency.
LC_ALL: en_US.UTF-8 # This prevents strace from changing it's number format to use commas.
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.ref }} group: ${{ github.workflow }}-${{ github.ref }}

View File

@ -22,7 +22,7 @@ fn main() {
// all data's // all data's
let all_data_buffer = let all_data_buffer =
BufReader::new(File::open(&tauri_data).expect("Unable to read all data file")); BufReader::new(File::open(tauri_data).expect("Unable to read all data file"));
let mut all_data: Vec<utils::BenchResult> = let mut all_data: Vec<utils::BenchResult> =
serde_json::from_reader(all_data_buffer).expect("Unable to read all data buffer"); serde_json::from_reader(all_data_buffer).expect("Unable to read all data buffer");
@ -30,12 +30,11 @@ fn main() {
all_data.push(current_data); all_data.push(current_data);
// use only latest 20 elements from alls data // use only latest 20 elements from alls data
let recent: Vec<utils::BenchResult>; let recent: Vec<utils::BenchResult> = if all_data.len() > 20 {
if all_data.len() > 20 { all_data[all_data.len() - 20..].to_vec()
recent = all_data[all_data.len() - 20..].to_vec();
} else { } else {
recent = all_data.clone(); all_data.clone()
} };
// write json's // write json's
utils::write_json( utils::write_json(
@ -44,7 +43,7 @@ fn main() {
.expect("Something wrong with tauri_data"), .expect("Something wrong with tauri_data"),
&serde_json::to_value(&all_data).expect("Unable to build final json (alls)"), &serde_json::to_value(&all_data).expect("Unable to build final json (alls)"),
) )
.expect(format!("Unable to write {:?}", tauri_data).as_str()); .unwrap_or_else(|_| panic!("Unable to write {:?}", tauri_data));
utils::write_json( utils::write_json(
tauri_recent tauri_recent
@ -52,5 +51,5 @@ fn main() {
.expect("Something wrong with tauri_recent"), .expect("Something wrong with tauri_recent"),
&serde_json::to_value(&recent).expect("Unable to build final json (recent)"), &serde_json::to_value(&recent).expect("Unable to build final json (recent)"),
) )
.expect(format!("Unable to write {:?}", tauri_recent).as_str()); .unwrap_or_else(|_| panic!("Unable to write {:?}", tauri_recent));
} }

View File

@ -49,7 +49,7 @@ fn run_strace_benchmarks(new_data: &mut utils::BenchResult) -> Result<()> {
let mut file = tempfile::NamedTempFile::new()?; let mut file = tempfile::NamedTempFile::new()?;
Command::new("strace") Command::new("strace")
.args(&[ .args([
"-c", "-c",
"-f", "-f",
"-o", "-o",
@ -64,7 +64,10 @@ fn run_strace_benchmarks(new_data: &mut utils::BenchResult) -> Result<()> {
file.as_file_mut().read_to_string(&mut output)?; file.as_file_mut().read_to_string(&mut output)?;
let strace_result = utils::parse_strace_output(&output); let strace_result = utils::parse_strace_output(&output);
let clone = strace_result.get("clone").map(|d| d.calls).unwrap_or(0) + 1; // Note, we always have 1 thread. Use cloneX calls as counter for additional threads created.
let clone = 1
+ strace_result.get("clone").map(|d| d.calls).unwrap_or(0)
+ strace_result.get("clone3").map(|d| d.calls).unwrap_or(0);
let total = strace_result.get("total").unwrap().calls; let total = strace_result.get("total").unwrap().calls;
thread_count.insert(name.to_string(), clone); thread_count.insert(name.to_string(), clone);
syscall_count.insert(name.to_string(), total); syscall_count.insert(name.to_string(), total);
@ -84,7 +87,7 @@ fn run_max_mem_benchmark() -> Result<HashMap<String, u64>> {
let benchmark_file = benchmark_file.to_str().unwrap(); let benchmark_file = benchmark_file.to_str().unwrap();
let proc = Command::new("mprof") let proc = Command::new("mprof")
.args(&[ .args([
"run", "run",
"-C", "-C",
"-o", "-o",
@ -99,7 +102,7 @@ fn run_max_mem_benchmark() -> Result<HashMap<String, u64>> {
println!("{:?}", proc_result); println!("{:?}", proc_result);
results.insert( results.insert(
name.to_string(), name.to_string(),
utils::parse_max_mem(&benchmark_file).unwrap(), utils::parse_max_mem(benchmark_file).unwrap(),
); );
} }
@ -132,7 +135,7 @@ fn rlib_size(target_dir: &std::path::Path, prefix: &str) -> u64 {
fn get_binary_sizes(target_dir: &Path) -> Result<HashMap<String, u64>> { fn get_binary_sizes(target_dir: &Path) -> Result<HashMap<String, u64>> {
let mut sizes = HashMap::<String, u64>::new(); let mut sizes = HashMap::<String, u64>::new();
let wry_size = rlib_size(&target_dir, "libwry"); let wry_size = rlib_size(target_dir, "libwry");
println!("wry {} bytes", wry_size); println!("wry {} bytes", wry_size);
sizes.insert("wry_rlib".to_string(), wry_size); sizes.insert("wry_rlib".to_string(), wry_size);
@ -174,9 +177,9 @@ fn cargo_deps() -> HashMap<String, usize> {
let mut cmd = Command::new("cargo"); let mut cmd = Command::new("cargo");
cmd.arg("tree"); cmd.arg("tree");
cmd.arg("--no-dedupe"); cmd.arg("--no-dedupe");
cmd.args(&["--edges", "normal"]); cmd.args(["--edges", "normal"]);
cmd.args(&["--prefix", "none"]); cmd.args(["--prefix", "none"]);
cmd.args(&["--target", target]); cmd.args(["--target", target]);
cmd.current_dir(&utils::tauri_root_path()); cmd.current_dir(&utils::tauri_root_path());
let full_deps = cmd.output().expect("failed to run cargo tree").stdout; let full_deps = cmd.output().expect("failed to run cargo tree").stdout;
@ -268,7 +271,7 @@ fn main() -> Result<()> {
time::format_description::parse("[year]-[month]-[day]T[hour]:[minute]:[second]Z").unwrap(); time::format_description::parse("[year]-[month]-[day]T[hour]:[minute]:[second]Z").unwrap();
let now = time::OffsetDateTime::now_utc(); let now = time::OffsetDateTime::now_utc();
let mut new_data = utils::BenchResult { let mut new_data = utils::BenchResult {
created_at: format!("{}", now.format(&format).unwrap()), created_at: now.format(&format).unwrap(),
sha1: utils::run_collect(&["git", "rev-parse", "HEAD"]) sha1: utils::run_collect(&["git", "rev-parse", "HEAD"])
.0 .0
.trim() .trim()

View File

@ -45,12 +45,11 @@ pub fn get_target() -> &'static str {
} }
pub fn target_dir() -> PathBuf { pub fn target_dir() -> PathBuf {
let target_dir = bench_root_path() bench_root_path()
.join("tests") .join("tests")
.join("target") .join("target")
.join(get_target()) .join(get_target())
.join("release"); .join("release")
target_dir.into()
} }
pub fn bench_root_path() -> PathBuf { pub fn bench_root_path() -> PathBuf {
@ -105,16 +104,14 @@ pub fn parse_max_mem(file_path: &str) -> Option<u64> {
let output = BufReader::new(file); let output = BufReader::new(file);
let mut highest: u64 = 0; let mut highest: u64 = 0;
// MEM 203.437500 1621617192.4123 // MEM 203.437500 1621617192.4123
for line in output.lines() { for line in output.lines().flatten() {
if let Ok(line) = line { // split line by space
// split line by space let split = line.split(' ').collect::<Vec<_>>();
let split = line.split(" ").collect::<Vec<_>>(); if split.len() == 3 {
if split.len() == 3 { // mprof generate result in MB
// mprof generate result in MB let current_bytes = str::parse::<f64>(split[1]).unwrap() as u64 * 1024 * 1024;
let current_bytes = str::parse::<f64>(split[1]).unwrap() as u64 * 1024 * 1024; if current_bytes > highest {
if current_bytes > highest { highest = current_bytes;
highest = current_bytes;
}
} }
} }
} }
@ -169,14 +166,26 @@ pub fn parse_strace_output(output: &str) -> HashMap<String, StraceOutput> {
} }
let total_fields = total_line.split_whitespace().collect::<Vec<_>>(); let total_fields = total_line.split_whitespace().collect::<Vec<_>>();
summary.insert( summary.insert(
"total".to_string(), "total".to_string(),
StraceOutput { match total_fields.len() {
percent_time: str::parse::<f64>(total_fields[0]).unwrap(), // Old format, has no usecs/call
seconds: str::parse::<f64>(total_fields[1]).unwrap(), 5 => StraceOutput {
usecs_per_call: None, percent_time: str::parse::<f64>(total_fields[0]).unwrap(),
calls: str::parse::<u64>(total_fields[2]).unwrap(), seconds: str::parse::<f64>(total_fields[1]).unwrap(),
errors: str::parse::<u64>(total_fields[3]).unwrap(), usecs_per_call: None,
calls: str::parse::<u64>(total_fields[2]).unwrap(),
errors: str::parse::<u64>(total_fields[3]).unwrap(),
},
6 => StraceOutput {
percent_time: str::parse::<f64>(total_fields[0]).unwrap(),
seconds: str::parse::<f64>(total_fields[1]).unwrap(),
usecs_per_call: Some(str::parse::<u64>(total_fields[2]).unwrap()),
calls: str::parse::<u64>(total_fields[3]).unwrap(),
errors: str::parse::<u64>(total_fields[4]).unwrap(),
},
_ => panic!("Unexpected total field count: {}", total_fields.len()),
}, },
); );
@ -222,7 +231,7 @@ pub fn download_file(url: &str, filename: PathBuf) {
.arg("-s") .arg("-s")
.arg("-o") .arg("-o")
.arg(&filename) .arg(&filename)
.arg(&url) .arg(url)
.status() .status()
.unwrap(); .unwrap();