Dry-run Enso benchmarks when checking engine in CI. (#4001)

This commit is contained in:
Michał Wawrzyniec Urbańczyk 2022-12-21 03:05:21 +01:00 committed by GitHub
parent e47d22321c
commit 9a173cb8d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 54 additions and 10 deletions

View File

@ -73,7 +73,7 @@ pub async fn download_project_templates(client: reqwest::Client, enso_root: Path
/// Describe, which benchmarks should be run.
#[derive(Clone, Copy, Debug, Display, PartialEq, Eq, PartialOrd, Ord, clap::ArgEnum)]
pub enum Benchmarks {
/// Run all SBT-exposed benchmarks. Does *not* including pure Enso benchmarks.
/// Run all SBT-exposed benchmarks. Does *not* including pure [`Benchmarks::Enso`] benchmarks.
All,
/// Run the runtime benchmark (from `sbt`).
Runtime,
@ -98,6 +98,9 @@ impl Benchmarks {
}
}
/// Describes what should be done with the backend.
///
/// Basically a recipe of what to do with `sbt` and its artifacts.
#[derive(Clone, Debug)]
pub struct BuildConfigurationFlags {
/// If true, repository shall be cleaned at the build start.
@ -110,6 +113,11 @@ pub struct BuildConfigurationFlags {
///
/// Note that this does not run the benchmarks, only ensures that they are buildable.
pub build_benchmarks: bool,
/// Whether the Enso-written benchmarks should be checked whether they compile.
///
/// Note that this does not benchmark, only ensures that they are buildable.
/// Also, this does nothing if `execute_benchmarks` contains `Benchmarks::Enso`.
pub check_enso_benchmarks: bool,
/// Which benchmarks should be run.
pub execute_benchmarks: BTreeSet<Benchmarks>,
/// Used to check that benchmarks do not fail on runtime, rather than obtaining the results.
@ -149,18 +157,24 @@ impl BuildConfigurationResolved {
config.build_engine_package = true;
}
if config.test_standard_library {
// Check for components that require Enso Engine runner. Basically everything that needs to
// run pure Enso code.
if config.test_standard_library
|| config.execute_benchmarks.contains(&Benchmarks::Enso)
|| config.check_enso_benchmarks
{
config.build_engine_package = true;
}
// If we are about to run pure Enso benchmarks, there is no reason to try them in dry run.
if config.execute_benchmarks.contains(&Benchmarks::Enso) {
config.check_enso_benchmarks = false;
}
if config.test_java_generated_from_rust {
config.generate_java_from_rust = true;
}
if config.execute_benchmarks.contains(&Benchmarks::Enso) {
config.build_engine_package = true;
}
Self(config)
}
}
@ -188,6 +202,7 @@ impl Default for BuildConfigurationFlags {
test_scala: false,
test_standard_library: false,
build_benchmarks: false,
check_enso_benchmarks: false,
execute_benchmarks: default(),
execute_benchmarks_once: false,
build_js_parser: false,

View File

@ -14,6 +14,7 @@ use crate::engine::ReleaseCommand;
use crate::engine::ReleaseOperation;
use crate::engine::FLATC_VERSION;
use crate::engine::PARALLEL_ENSO_TESTS;
use crate::enso::BenchmarkOptions;
use crate::enso::BuiltEnso;
use crate::enso::IrCaches;
use crate::paths::cache_directory;
@ -301,7 +302,8 @@ impl RunContext {
debug!("No SBT tasks to run.");
}
} else {
// Compile
// If we are run on a weak machine (like GH-hosted runner), we need to build things one
// by one.
sbt.call_arg("compile").await?;
// Build the Runner & Runtime Uberjars
@ -345,9 +347,12 @@ impl RunContext {
let enso = BuiltEnso { paths: self.paths.clone() };
if self.config.execute_benchmarks.contains(&Benchmarks::Enso) {
enso.run_benchmarks().await?;
enso.run_benchmarks(BenchmarkOptions { dry_run: false }).await?;
} else if self.config.check_enso_benchmarks {
enso.run_benchmarks(BenchmarkOptions { dry_run: true }).await?;
}
// If we were running any benchmarks, they are complete by now. Upload the report.
if is_in_env() {
let path = &self.paths.repo_root.engine.runtime.bench_report_xml;

View File

@ -12,8 +12,25 @@ use ide_ci::programs::docker::ContainerId;
#[derive(Copy, Clone, Debug, strum::Display, strum::EnumString)]
pub enum Boolean {
True,
False,
}
impl From<bool> for Boolean {
fn from(value: bool) -> Self {
if value {
Self::True
} else {
Self::False
}
}
}
ide_ci::define_env_var! {
ENSO_JVM_OPTS, String;
ENSO_BENCHMARK_TEST_DRY_RUN, Boolean;
}
#[derive(Copy, Clone, Debug)]
@ -37,6 +54,11 @@ impl AsRef<OsStr> for IrCaches {
}
}
#[derive(Copy, Clone, Debug)]
pub struct BenchmarkOptions {
pub dry_run: bool,
}
#[derive(Clone, Debug)]
pub struct BuiltEnso {
pub paths: Paths,
@ -48,9 +70,10 @@ impl BuiltEnso {
self.paths.engine.dir.join_iter(["bin", &filename])
}
pub async fn run_benchmarks(&self) -> Result {
pub async fn run_benchmarks(&self, opt: BenchmarkOptions) -> Result {
self.cmd()?
.with_args(["--run", self.paths.repo_root.test.benchmarks.as_str()])
.set_env(ENSO_BENCHMARK_TEST_DRY_RUN, &Boolean::from(opt.dry_run))?
.run_ok()
.await
}

View File

@ -54,7 +54,7 @@ pub enum Command {
ide_option: Vec<String>,
},
/// Builds Project Manager and runs it in the background. Builds GUI and runs it using
/// webpack's dev server.
/// GUI dev server.
Watch {
#[clap(flatten)]
gui: WatchJob<Gui>,

View File

@ -424,6 +424,7 @@ impl Processor {
build_benchmarks: true,
execute_benchmarks: once(Benchmarks::Runtime).collect(),
execute_benchmarks_once: true,
check_enso_benchmarks: true,
build_js_parser: matches!(TARGET_OS, OS::Linux),
verify_packages: true,
generate_documentation: true,