fix benchmark symbol table, some more clean up

This commit is contained in:
gluax 2022-06-09 11:43:10 -07:00
parent 32d07583c5
commit 267ab16810
3 changed files with 84 additions and 83 deletions

View File

@ -41,7 +41,7 @@ impl Emitter for BufEmitter {
fn emit_err(&mut self, _: leo_errors::LeoError) {} fn emit_err(&mut self, _: leo_errors::LeoError) {}
fn last_emitted_err_code(&self) -> Option<i32> { fn last_emitted_err_code(&self) -> Option<i32> {
Some(0) None
} }
fn emit_warning(&mut self, _: leo_errors::LeoWarning) {} fn emit_warning(&mut self, _: leo_errors::LeoWarning) {}
@ -197,7 +197,7 @@ bench!(bench_full, BenchMode::Full);
criterion_group!( criterion_group!(
name = benches; name = benches;
config = Criterion::default().sample_size(200).measurement_time(Duration::from_secs(5)).nresamples(200_000); config = Criterion::default().sample_size(200).measurement_time(Duration::from_secs(10)).nresamples(200_000);
targets = targets =
bench_parse, bench_parse,
bench_symbol, bench_symbol,

View File

@ -14,11 +14,14 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with the Leo library. If not, see <https://www.gnu.org/licenses/>. // along with the Leo library. If not, see <https://www.gnu.org/licenses/>.
use std::{fs, path::Path}; use std::{
fs,
path::{Path, PathBuf},
};
use walkdir::WalkDir; use walkdir::WalkDir;
pub fn find_tests<T: AsRef<Path> + Copy>(path: T) -> Vec<(String, String)> { pub fn find_tests<T: AsRef<Path> + Copy>(path: T) -> Vec<(PathBuf, String)> {
WalkDir::new(path) WalkDir::new(path)
.into_iter() .into_iter()
.flatten() .flatten()
@ -26,12 +29,12 @@ pub fn find_tests<T: AsRef<Path> + Copy>(path: T) -> Vec<(String, String)> {
let path = f.path(); let path = f.path();
if matches!(path.extension(), Some(s) if s == "leo") { if matches!(path.extension(), Some(s) if s == "leo") {
let content = fs::read_to_string(path).expect("failed to read test"); let content = fs::read_to_string(path).expect("failed to read test");
Some((path.to_str().unwrap_or_default().to_string(), content)) Some((path.to_path_buf(), content))
} else { } else {
None None
} }
}) })
.collect::<Vec<(String, String)>>() .collect::<Vec<(PathBuf, String)>>()
} }
pub fn split_tests_one_line(source: &str) -> Vec<&str> { pub fn split_tests_one_line(source: &str) -> Vec<&str> {

View File

@ -82,7 +82,7 @@ fn take_hook(
} }
pub struct TestCases { pub struct TestCases {
tests: Vec<(String, String)>, tests: Vec<(PathBuf, String)>,
path_prefix: PathBuf, path_prefix: PathBuf,
fail_categories: Vec<TestFailure>, fail_categories: Vec<TestFailure>,
} }
@ -117,7 +117,7 @@ impl TestCases {
let config = match extract_test_config(content) { let config = match extract_test_config(content) {
None => { None => {
self.fail_categories.push(TestFailure { self.fail_categories.push(TestFailure {
path: path.to_string(), path: path.to_str().unwrap_or("").to_string(),
errors: vec![TestError::MissingTestConfig], errors: vec![TestError::MissingTestConfig],
}); });
return true; return true;
@ -143,8 +143,6 @@ impl TestCases {
let mut output = Vec::new(); let mut output = Vec::new();
for ((path, content), config) in self.tests.clone().iter().zip(configs.into_iter()) { for ((path, content), config) in self.tests.clone().iter().zip(configs.into_iter()) {
let path = Path::new(&path);
let test_name = path let test_name = path
.file_stem() .file_stem()
.expect("no file name for test") .expect("no file name for test")
@ -196,9 +194,10 @@ impl TestCases {
(expectation_path, None) (expectation_path, None)
} }
} }
}
pub fn run_tests<T: Runner>(runner: &T, expectation_category: &str) { pub fn run_tests<T: Runner>(runner: &T, expectation_category: &str) {
let (mut cases, configs) = Self::new(expectation_category, |_| true); let (mut cases, configs) = TestCases::new(expectation_category, |_| true);
let mut pass_categories = 0; let mut pass_categories = 0;
let mut pass_tests = 0; let mut pass_tests = 0;
@ -278,7 +277,6 @@ impl TestCases {
}) })
} }
}); });
}
} }
/// returns (name, content) for all benchmark samples /// returns (name, content) for all benchmark samples