move over code that tries to do the full specialization process in parallel

This commit is contained in:
Folkert 2020-10-10 20:40:55 +02:00
parent 37e0523557
commit 8eec622d23
3 changed files with 197 additions and 16 deletions

View File

@ -33,13 +33,12 @@ pub fn build_file(
OptLevel::Normal => roc_builtins::std::standard_stdlib(),
OptLevel::Optimize => roc_builtins::unique::uniq_stdlib(),
};
let solve_types = roc_load::file::Phase::SolveTypes;
let loaded = roc_load::file::load(
let loaded = roc_load::file::load_and_monomorphize(
&arena,
filename.clone(),
stdlib,
src_dir.as_path(),
subs_by_module,
solve_types,
)?;
let dest_filename = filename.with_file_name("roc_app.o");
let buf = &mut String::with_capacity(1024);
@ -67,7 +66,7 @@ pub fn build_file(
buf
);
program::gen(
program::gen_from_mono_module(
&arena,
loaded,
filename,

View File

@ -6,7 +6,7 @@ use inkwell::OptimizationLevel;
use roc_collections::all::default_hasher;
use roc_gen::layout_id::LayoutIds;
use roc_gen::llvm::build::{build_proc, build_proc_header, module_from_builtins, OptLevel};
use roc_load::file::LoadedModule;
use roc_load::file::{LoadedModule, MonomorphizedModule};
use roc_mono::ir::{Env, PartialProc, Procs};
use roc_mono::layout::{Layout, LayoutCache};
use std::collections::HashSet;
@ -303,3 +303,127 @@ pub fn gen(
println!("\nSuccess! 🎉\n\n\t{}\n", dest_filename.display());
}
#[allow(clippy::cognitive_complexity)]
pub fn gen_from_mono_module(
arena: &Bump,
mut loaded: MonomorphizedModule,
filename: PathBuf,
target: Triple,
dest_filename: &Path,
opt_level: OptLevel,
) {
use roc_reporting::report::{can_problem, type_problem, RocDocAllocator, DEFAULT_PALETTE};
let src = loaded.src;
let home = loaded.module_id;
let src_lines: Vec<&str> = src.split('\n').collect();
let palette = DEFAULT_PALETTE;
// Report parsing and canonicalization problems
let alloc = RocDocAllocator::new(&src_lines, home, &loaded.interns);
for problem in loaded.can_problems.into_iter() {
let report = can_problem(&alloc, filename.clone(), problem);
let mut buf = String::new();
report.render_color_terminal(&mut buf, &alloc, &palette);
println!("\n{}\n", buf);
}
for problem in loaded.type_problems.into_iter() {
let report = type_problem(&alloc, filename.clone(), problem);
let mut buf = String::new();
report.render_color_terminal(&mut buf, &alloc, &palette);
println!("\n{}\n", buf);
}
// Generate the binary
let context = Context::create();
let module = arena.alloc(module_from_builtins(&context, "app"));
let builder = context.create_builder();
let (mpm, fpm) = roc_gen::llvm::build::construct_optimization_passes(module, opt_level);
let ptr_bytes = target.pointer_width().unwrap().bytes() as u32;
let mut exposed_to_host =
HashSet::with_capacity_and_hasher(loaded.exposed_vars_by_symbol.len(), default_hasher());
for (symbol, _) in loaded.exposed_vars_by_symbol {
exposed_to_host.insert(symbol);
}
// Compile and add all the Procs before adding main
let env = roc_gen::llvm::build::Env {
arena: &arena,
builder: &builder,
context: &context,
interns: loaded.interns,
module,
ptr_bytes,
leak: false,
exposed_to_host,
};
// Populate Procs further and get the low-level Expr from the canonical Expr
let mut headers = Vec::with_capacity(loaded.procedures.len());
// Add all the Proc headers to the module.
// We have to do this in a separate pass first,
// because their bodies may reference each other.
let mut layout_ids = LayoutIds::default();
for ((symbol, layout), proc) in loaded.procedures {
let fn_val = build_proc_header(&env, &mut layout_ids, symbol, &layout, &proc);
headers.push((proc, fn_val));
}
// Build each proc using its header info.
for (proc, fn_val) in headers {
// NOTE: This is here to be uncommented in case verification fails.
// (This approach means we don't have to defensively clone name here.)
//
// println!("\n\nBuilding and then verifying function {:?}\n\n", proc);
build_proc(&env, &mut layout_ids, proc, fn_val);
if fn_val.verify(true) {
fpm.run_on(&fn_val);
} else {
// NOTE: If this fails, uncomment the above println to debug.
panic!(
"Non-main function failed LLVM verification. Uncomment the above println to debug!"
);
}
}
// Uncomment this to see the module's optimized LLVM instruction output:
// env.module.print_to_stderr();
mpm.run_on(module);
// Verify the module
if let Err(errors) = env.module.verify() {
panic!("😱 LLVM errors when defining module: {:?}", errors);
}
// Uncomment this to see the module's optimized LLVM instruction output:
// env.module.print_to_stderr();
// Emit the .o file
let opt = OptimizationLevel::Aggressive;
let reloc = RelocMode::Default;
let model = CodeModel::Default;
let target_machine = target::target_machine(&target, opt, reloc, model).unwrap();
target_machine
.write_to_file(&env.module, FileType::Object, &dest_filename)
.expect("Writing .o file failed");
println!("\nSuccess! 🎉\n\n\t{}\n", dest_filename.display());
}

View File

@ -13,7 +13,7 @@ use roc_constrain::module::{
use roc_constrain::module::{constrain_module, ExposedModuleTypes, SubsByModule};
use roc_module::ident::{Ident, ModuleName};
use roc_module::symbol::{IdentIds, Interns, ModuleId, ModuleIds, Symbol};
use roc_mono::ir::{MonoProblem, PartialProc, PendingSpecialization, Procs};
use roc_mono::ir::{MonoProblem, PartialProc, PendingSpecialization, Proc, Procs};
use roc_mono::layout::{Layout, LayoutCache};
use roc_parse::ast::{self, Attempting, ExposesEntry, ImportsEntry};
use roc_parse::module::module_defs;
@ -327,6 +327,20 @@ struct ConstrainedModule<'a> {
module_timing: ModuleTiming,
}
#[derive(Debug)]
pub struct MonomorphizedModule<'a> {
pub module_id: ModuleId,
pub interns: Interns,
pub subs: Subs,
pub can_problems: Vec<roc_problem::can::Problem>,
pub type_problems: Vec<solve::TypeError>,
pub mono_problems: Vec<roc_mono::ir::MonoProblem>,
pub procedures: MutMap<(Symbol, Layout<'a>), Proc<'a>>,
pub exposed_vars_by_symbol: Vec<(Symbol, Variable)>,
pub src: Box<str>,
pub timings: MutMap<ModuleId, ModuleTiming>,
}
#[derive(Debug)]
enum Msg<'a> {
Header(ModuleHeader<'a>),
@ -581,6 +595,55 @@ fn enqueue_task<'a>(
Ok(())
}
pub fn load_and_typecheck(
arena: &Bump,
filename: PathBuf,
stdlib: StdLib,
src_dir: &Path,
exposed_types: SubsByModule,
) -> Result<LoadedModule, LoadingProblem> {
use LoadResult::*;
match load(
arena,
filename,
stdlib,
src_dir,
exposed_types,
Phase::SolveTypes,
)? {
Monomorphized(_) => unreachable!(""),
TypeChecked(module) => Ok(module),
}
}
pub fn load_and_monomorphize<'a>(
arena: &'a Bump,
filename: PathBuf,
stdlib: StdLib,
src_dir: &Path,
exposed_types: SubsByModule,
) -> Result<MonomorphizedModule<'a>, LoadingProblem> {
use LoadResult::*;
match load(
arena,
filename,
stdlib,
src_dir,
exposed_types,
Phase::MakeSpecializations,
)? {
Monomorphized(module) => Ok(module),
TypeChecked(_) => unreachable!(""),
}
}
enum LoadResult<'a> {
TypeChecked(LoadedModule),
Monomorphized(MonomorphizedModule<'a>),
}
/// The loading process works like this, starting from the given filename (e.g. "main.roc"):
///
/// 1. Open the file.
@ -624,19 +687,14 @@ fn enqueue_task<'a>(
/// and then linking them together, and possibly caching them by the hash of their
/// specializations, so if none of their specializations changed, we don't even need
/// to rebuild the module and can link in the cached one directly.)
pub fn load(
fn load<'a>(
arena: &'a Bump,
filename: PathBuf,
stdlib: StdLib,
src_dir: &Path,
exposed_types: SubsByModule,
goal_phase: Phase,
) -> Result<LoadedModule, LoadingProblem> {
// Initialize the need to specialize based on whether we're going all the
// way to that phase. This is mut because we switch it off after we're
// done specializing, and that indicates that all the pending specializations
// have been at least enqueued (even if they haven't all been specialized yet.)
let arena = Bump::new();
) -> Result<LoadResult<'a>, LoadingProblem> {
// Reserve one CPU for the main thread, and let all the others be eligible
// to spawn workers.
let num_workers = num_cpus::get() - 1;
@ -812,13 +870,13 @@ pub fn load(
.map_err(|_| LoadingProblem::MsgChannelDied)?;
}
return Ok(finish(
return Ok(LoadResult::TypeChecked(finish(
state,
solved_subs,
problems,
exposed_vars_by_symbol,
src,
));
)));
}
msg => {
// This is where most of the main thread's work gets done.