fix benchmark symbol table, some more clean up

This commit is contained in:
gluax 2022-06-09 11:43:10 -07:00
parent 32d07583c5
commit 267ab16810
3 changed files with 84 additions and 83 deletions

View File

@ -41,7 +41,7 @@ impl Emitter for BufEmitter {
fn emit_err(&mut self, _: leo_errors::LeoError) {}
fn last_emitted_err_code(&self) -> Option<i32> {
Some(0)
None
}
fn emit_warning(&mut self, _: leo_errors::LeoWarning) {}
@ -197,7 +197,7 @@ bench!(bench_full, BenchMode::Full);
criterion_group!(
name = benches;
config = Criterion::default().sample_size(200).measurement_time(Duration::from_secs(5)).nresamples(200_000);
config = Criterion::default().sample_size(200).measurement_time(Duration::from_secs(10)).nresamples(200_000);
targets =
bench_parse,
bench_symbol,

View File

@ -14,11 +14,14 @@
// You should have received a copy of the GNU General Public License
// along with the Leo library. If not, see <https://www.gnu.org/licenses/>.
use std::{fs, path::Path};
use std::{
fs,
path::{Path, PathBuf},
};
use walkdir::WalkDir;
pub fn find_tests<T: AsRef<Path> + Copy>(path: T) -> Vec<(String, String)> {
pub fn find_tests<T: AsRef<Path> + Copy>(path: T) -> Vec<(PathBuf, String)> {
WalkDir::new(path)
.into_iter()
.flatten()
@ -26,12 +29,12 @@ pub fn find_tests<T: AsRef<Path> + Copy>(path: T) -> Vec<(String, String)> {
let path = f.path();
if matches!(path.extension(), Some(s) if s == "leo") {
let content = fs::read_to_string(path).expect("failed to read test");
Some((path.to_str().unwrap_or_default().to_string(), content))
Some((path.to_path_buf(), content))
} else {
None
}
})
.collect::<Vec<(String, String)>>()
.collect::<Vec<(PathBuf, String)>>()
}
pub fn split_tests_one_line(source: &str) -> Vec<&str> {

View File

@ -82,7 +82,7 @@ fn take_hook(
}
pub struct TestCases {
tests: Vec<(String, String)>,
tests: Vec<(PathBuf, String)>,
path_prefix: PathBuf,
fail_categories: Vec<TestFailure>,
}
@ -117,7 +117,7 @@ impl TestCases {
let config = match extract_test_config(content) {
None => {
self.fail_categories.push(TestFailure {
path: path.to_string(),
path: path.to_str().unwrap_or("").to_string(),
errors: vec![TestError::MissingTestConfig],
});
return true;
@ -143,8 +143,6 @@ impl TestCases {
let mut output = Vec::new();
for ((path, content), config) in self.tests.clone().iter().zip(configs.into_iter()) {
let path = Path::new(&path);
let test_name = path
.file_stem()
.expect("no file name for test")
@ -196,89 +194,89 @@ impl TestCases {
(expectation_path, None)
}
}
}
pub fn run_tests<T: Runner>(runner: &T, expectation_category: &str) {
let (mut cases, configs) = Self::new(expectation_category, |_| true);
pub fn run_tests<T: Runner>(runner: &T, expectation_category: &str) {
let (mut cases, configs) = TestCases::new(expectation_category, |_| true);
let mut pass_categories = 0;
let mut pass_tests = 0;
let mut fail_tests = 0;
let mut pass_categories = 0;
let mut pass_tests = 0;
let mut fail_tests = 0;
let mut outputs = vec![];
cases.process_tests(configs, |cases, (path, content, test_name, config)| {
let namespace = match runner.resolve_namespace(&config.namespace) {
Some(ns) => ns,
None => return,
};
let mut outputs = vec![];
cases.process_tests(configs, |cases, (path, content, test_name, config)| {
let namespace = match runner.resolve_namespace(&config.namespace) {
Some(ns) => ns,
None => return,
};
let (expectation_path, expectations) = cases.clear_expectations(path, expectation_category);
let (expectation_path, expectations) = cases.clear_expectations(path, expectation_category);
let tests = match namespace.parse_type() {
ParseType::Line => crate::fetch::split_tests_one_line(content)
.into_iter()
.map(|x| x.to_string())
.collect(),
ParseType::ContinuousLines => crate::fetch::split_tests_two_line(content),
ParseType::Whole => vec![content.to_string()],
};
let tests = match namespace.parse_type() {
ParseType::Line => crate::fetch::split_tests_one_line(content)
.into_iter()
.map(|x| x.to_string())
.collect(),
ParseType::ContinuousLines => crate::fetch::split_tests_two_line(content),
ParseType::Whole => vec![content.to_string()],
};
let mut errors = vec![];
if let Some(expectations) = expectations.as_ref() {
if tests.len() != expectations.outputs.len() {
errors.push(TestError::MismatchedTestExpectationLength);
}
let mut errors = vec![];
if let Some(expectations) = expectations.as_ref() {
if tests.len() != expectations.outputs.len() {
errors.push(TestError::MismatchedTestExpectationLength);
}
}
let mut new_outputs = vec![];
let mut expected_output = expectations.as_ref().map(|x| x.outputs.iter());
for (i, test) in tests.into_iter().enumerate() {
let expected_output = expected_output.as_mut().and_then(|x| x.next()).cloned();
println!("running test {} @ '{}'", test_name, path.to_str().unwrap());
let panic_buf = set_hook();
let leo_output = panic::catch_unwind(|| {
namespace.run_test(Test {
name: test_name.to_string(),
content: test.clone(),
path: path.into(),
config: config.extra.clone(),
})
});
let output = take_hook(leo_output, panic_buf);
if let Some(error) = emit_errors(&test, &output, &config.expectation, expected_output, i) {
fail_tests += 1;
errors.push(error);
} else {
pass_tests += 1;
new_outputs.push(
output
.unwrap()
.as_ref()
.map(|x| serde_yaml::to_value(x).expect("serialization failed"))
.unwrap_or_else(|e| Value::String(e.clone())),
);
}
}
if errors.is_empty() {
if expectations.is_none() {
outputs.push((
expectation_path,
TestExpectation {
namespace: config.namespace,
expectation: config.expectation,
outputs: new_outputs,
},
));
}
pass_categories += 1;
} else {
cases.fail_categories.push(TestFailure {
path: path.to_str().unwrap().to_string(),
errors,
let mut new_outputs = vec![];
let mut expected_output = expectations.as_ref().map(|x| x.outputs.iter());
for (i, test) in tests.into_iter().enumerate() {
let expected_output = expected_output.as_mut().and_then(|x| x.next()).cloned();
println!("running test {} @ '{}'", test_name, path.to_str().unwrap());
let panic_buf = set_hook();
let leo_output = panic::catch_unwind(|| {
namespace.run_test(Test {
name: test_name.to_string(),
content: test.clone(),
path: path.into(),
config: config.extra.clone(),
})
});
let output = take_hook(leo_output, panic_buf);
if let Some(error) = emit_errors(&test, &output, &config.expectation, expected_output, i) {
fail_tests += 1;
errors.push(error);
} else {
pass_tests += 1;
new_outputs.push(
output
.unwrap()
.as_ref()
.map(|x| serde_yaml::to_value(x).expect("serialization failed"))
.unwrap_or_else(|e| Value::String(e.clone())),
);
}
});
}
}
if errors.is_empty() {
if expectations.is_none() {
outputs.push((
expectation_path,
TestExpectation {
namespace: config.namespace,
expectation: config.expectation,
outputs: new_outputs,
},
));
}
pass_categories += 1;
} else {
cases.fail_categories.push(TestFailure {
path: path.to_str().unwrap().to_string(),
errors,
})
}
});
}
/// returns (name, content) for all benchmark samples