Merge remote-tracking branch 'origin/trunk' into hello-web

This commit is contained in:
Folkert 2021-09-22 21:30:27 +02:00
commit e278b820ef
7 changed files with 491 additions and 181 deletions

View File

@ -2,10 +2,9 @@ use crate::{Backend, Env, Relocation};
use bumpalo::collections::Vec;
use roc_collections::all::{MutMap, MutSet};
use roc_module::symbol::Symbol;
use roc_mono::ir::{BranchInfo, Literal, Stmt};
use roc_mono::ir::{BranchInfo, JoinPointId, Literal, Param, SelfRecursive, Stmt};
use roc_mono::layout::{Builtin, Layout};
use std::marker::PhantomData;
use target_lexicon::Triple;
pub mod aarch64;
pub mod x86_64;
@ -211,12 +210,16 @@ pub struct Backend64Bit<
env: &'a Env<'a>,
buf: Vec<'a, u8>,
relocs: Vec<'a, Relocation>,
proc_name: Option<String>,
is_self_recursive: Option<SelfRecursive>,
last_seen_map: MutMap<Symbol, *const Stmt<'a>>,
layout_map: MutMap<Symbol, *const Layout<'a>>,
layout_map: MutMap<Symbol, Layout<'a>>,
free_map: MutMap<*const Stmt<'a>, Vec<'a, Symbol>>,
symbol_storage_map: MutMap<Symbol, SymbolStorage<GeneralReg, FloatReg>>,
literal_map: MutMap<Symbol, Literal<'a>>,
join_map: MutMap<JoinPointId, u64>,
// This should probably be smarter than a vec.
// There are certain registers we should always use first. With pushing and popping, this could get mixed.
@ -247,11 +250,13 @@ impl<
CC: CallConv<GeneralReg, FloatReg>,
> Backend<'a> for Backend64Bit<'a, GeneralReg, FloatReg, ASM, CC>
{
fn new(env: &'a Env, _target: &Triple) -> Result<Self, String> {
fn new(env: &'a Env) -> Result<Self, String> {
Ok(Backend64Bit {
phantom_asm: PhantomData,
phantom_cc: PhantomData,
env,
proc_name: None,
is_self_recursive: None,
buf: bumpalo::vec![in env.arena],
relocs: bumpalo::vec![in env.arena],
last_seen_map: MutMap::default(),
@ -259,6 +264,7 @@ impl<
free_map: MutMap::default(),
symbol_storage_map: MutMap::default(),
literal_map: MutMap::default(),
join_map: MutMap::default(),
general_free_regs: bumpalo::vec![in env.arena],
general_used_regs: bumpalo::vec![in env.arena],
general_used_callee_saved_regs: MutSet::default(),
@ -275,12 +281,15 @@ impl<
self.env
}
fn reset(&mut self) {
fn reset(&mut self, name: String, is_self_recursive: SelfRecursive) {
self.proc_name = Some(name);
self.is_self_recursive = Some(is_self_recursive);
self.stack_size = 0;
self.free_stack_chunks.clear();
self.fn_call_stack_size = 0;
self.last_seen_map.clear();
self.layout_map.clear();
self.join_map.clear();
self.free_map.clear();
self.symbol_storage_map.clear();
self.buf.clear();
@ -304,7 +313,7 @@ impl<
&mut self.last_seen_map
}
fn layout_map(&mut self) -> &mut MutMap<Symbol, *const Layout<'a>> {
fn layout_map(&mut self) -> &mut MutMap<Symbol, Layout<'a>> {
&mut self.layout_map
}
@ -330,8 +339,49 @@ impl<
)?;
let setup_offset = out.len();
// Deal with jumps to the return address.
let old_relocs = std::mem::replace(&mut self.relocs, bumpalo::vec![in self.env.arena]);
// Check if their is an unnessary jump to return right at the end of the function.
let mut end_jmp_size = 0;
for reloc in old_relocs
.iter()
.filter(|reloc| matches!(reloc, Relocation::JmpToReturn { .. }))
{
if let Relocation::JmpToReturn {
inst_loc,
inst_size,
..
} = reloc
{
if *inst_loc as usize + *inst_size as usize == self.buf.len() {
end_jmp_size = *inst_size as usize;
break;
}
}
}
// Update jumps to returns.
let ret_offset = self.buf.len() - end_jmp_size;
let mut tmp = bumpalo::vec![in self.env.arena];
for reloc in old_relocs
.iter()
.filter(|reloc| matches!(reloc, Relocation::JmpToReturn { .. }))
{
if let Relocation::JmpToReturn {
inst_loc,
inst_size,
offset,
} = reloc
{
if *inst_loc as usize + *inst_size as usize != self.buf.len() {
self.update_jmp_imm32_offset(&mut tmp, *inst_loc, *offset, ret_offset as u64);
}
}
}
// Add function body.
out.extend(&self.buf);
out.extend(&self.buf[..self.buf.len() - end_jmp_size]);
// Cleanup stack.
CC::cleanup_stack(
@ -342,23 +392,28 @@ impl<
)?;
ASM::ret(&mut out);
// Update relocs to include stack setup offset.
// Update other relocs to include stack setup offset.
let mut out_relocs = bumpalo::vec![in self.env.arena];
let old_relocs = std::mem::replace(&mut self.relocs, bumpalo::vec![in self.env.arena]);
out_relocs.extend(old_relocs.into_iter().map(|reloc| match reloc {
Relocation::LocalData { offset, data } => Relocation::LocalData {
offset: offset + setup_offset as u64,
data,
},
Relocation::LinkedData { offset, name } => Relocation::LinkedData {
offset: offset + setup_offset as u64,
name,
},
Relocation::LinkedFunction { offset, name } => Relocation::LinkedFunction {
offset: offset + setup_offset as u64,
name,
},
}));
out_relocs.extend(
old_relocs
.into_iter()
.filter(|reloc| !matches!(reloc, Relocation::JmpToReturn { .. }))
.map(|reloc| match reloc {
Relocation::LocalData { offset, data } => Relocation::LocalData {
offset: offset + setup_offset as u64,
data,
},
Relocation::LinkedData { offset, name } => Relocation::LinkedData {
offset: offset + setup_offset as u64,
name,
},
Relocation::LinkedFunction { offset, name } => Relocation::LinkedFunction {
offset: offset + setup_offset as u64,
name,
},
Relocation::JmpToReturn { .. } => unreachable!(),
}),
);
Ok((out.into_bump_slice(), out_relocs.into_bump_slice()))
}
@ -401,29 +456,13 @@ impl<
arg_layouts: &[Layout<'a>],
ret_layout: &Layout<'a>,
) -> Result<(), String> {
if let Some(SelfRecursive::SelfRecursive(id)) = self.is_self_recursive {
if &fn_name == self.proc_name.as_ref().unwrap() && self.join_map.contains_key(&id) {
return self.build_jump(&id, args, arg_layouts, ret_layout);
}
}
// Save used caller saved regs.
let old_general_used_regs = std::mem::replace(
&mut self.general_used_regs,
bumpalo::vec![in self.env.arena],
);
for (reg, saved_sym) in old_general_used_regs.into_iter() {
if CC::general_caller_saved(&reg) {
self.general_free_regs.push(reg);
self.free_to_stack(&saved_sym)?;
} else {
self.general_used_regs.push((reg, saved_sym));
}
}
let old_float_used_regs =
std::mem::replace(&mut self.float_used_regs, bumpalo::vec![in self.env.arena]);
for (reg, saved_sym) in old_float_used_regs.into_iter() {
if CC::float_caller_saved(&reg) {
self.float_free_regs.push(reg);
self.free_to_stack(&saved_sym)?;
} else {
self.float_used_regs.push((reg, saved_sym));
}
}
self.push_used_caller_saved_regs_to_stack()?;
// Put values in param regs or on top of the stack.
let tmp_stack_size = CC::store_args(
@ -486,7 +525,7 @@ impl<
// Build unconditional jump to the end of this switch.
// Since we don't know the offset yet, set it to 0 and overwrite later.
let jmp_location = self.buf.len();
let jmp_offset = ASM::jmp_imm32(&mut self.buf, 0);
let jmp_offset = ASM::jmp_imm32(&mut self.buf, 0x1234_5678);
ret_jumps.push((jmp_location, jmp_offset));
// Overwite the original jne with the correct offset.
@ -510,12 +549,12 @@ impl<
// Update all return jumps to jump past the default case.
let ret_offset = self.buf.len();
for (jmp_location, start_offset) in ret_jumps.into_iter() {
tmp.clear();
let jmp_offset = ret_offset - start_offset;
ASM::jmp_imm32(&mut tmp, jmp_offset as i32);
for (i, byte) in tmp.iter().enumerate() {
self.buf[jmp_location + i] = *byte;
}
self.update_jmp_imm32_offset(
&mut tmp,
jmp_location as u64,
start_offset as u64,
ret_offset as u64,
);
}
Ok(())
} else {
@ -526,6 +565,134 @@ impl<
}
}
fn build_join(
&mut self,
id: &JoinPointId,
parameters: &'a [Param<'a>],
body: &'a Stmt<'a>,
remainder: &'a Stmt<'a>,
ret_layout: &Layout<'a>,
) -> Result<(), String> {
// Create jump to remaining.
let jmp_location = self.buf.len();
let start_offset = ASM::jmp_imm32(&mut self.buf, 0x1234_5678);
// This section can essentially be seen as a sub function within the main function.
// Thus we build using a new backend with some minor extra synchronization.
let mut sub_backend = Self::new(self.env)?;
sub_backend.reset(
self.proc_name.as_ref().unwrap().clone(),
self.is_self_recursive.as_ref().unwrap().clone(),
);
// Sync static maps of important information.
sub_backend.last_seen_map = self.last_seen_map.clone();
sub_backend.layout_map = self.layout_map.clone();
sub_backend.free_map = self.free_map.clone();
// Setup join point.
sub_backend.join_map.insert(*id, 0);
self.join_map.insert(*id, self.buf.len() as u64);
// Sync stack size so the "sub function" doesn't mess up our stack.
sub_backend.stack_size = self.stack_size;
sub_backend.fn_call_stack_size = self.fn_call_stack_size;
// Load params as if they were args.
let mut args = bumpalo::vec![in self.env.arena];
for param in parameters {
args.push((param.layout, param.symbol));
}
sub_backend.load_args(args.into_bump_slice(), ret_layout)?;
// Build all statements in body.
sub_backend.build_stmt(body, ret_layout)?;
// Merge the "sub function" into the main function.
let sub_func_offset = self.buf.len() as u64;
self.buf.extend_from_slice(&sub_backend.buf);
// Update stack based on how much was used by the sub function.
self.stack_size = sub_backend.stack_size;
self.fn_call_stack_size = sub_backend.fn_call_stack_size;
// Relocations must be shifted to be merged correctly.
self.relocs
.extend(sub_backend.relocs.into_iter().map(|reloc| match reloc {
Relocation::LocalData { offset, data } => Relocation::LocalData {
offset: offset + sub_func_offset,
data,
},
Relocation::LinkedData { offset, name } => Relocation::LinkedData {
offset: offset + sub_func_offset,
name,
},
Relocation::LinkedFunction { offset, name } => Relocation::LinkedFunction {
offset: offset + sub_func_offset,
name,
},
Relocation::JmpToReturn {
inst_loc,
inst_size,
offset,
} => Relocation::JmpToReturn {
inst_loc: inst_loc + sub_func_offset,
inst_size,
offset: offset + sub_func_offset,
},
}));
// Overwite the original jump with the correct offset.
let mut tmp = bumpalo::vec![in self.env.arena];
self.update_jmp_imm32_offset(
&mut tmp,
jmp_location as u64,
start_offset as u64,
self.buf.len() as u64,
);
// Build remainder of function.
self.build_stmt(remainder, ret_layout)
}
fn build_jump(
&mut self,
id: &JoinPointId,
args: &'a [Symbol],
arg_layouts: &[Layout<'a>],
ret_layout: &Layout<'a>,
) -> Result<(), String> {
// Treat this like a function call, but with a jump instead of a call instruction at the end.
self.push_used_caller_saved_regs_to_stack()?;
let tmp_stack_size = CC::store_args(
&mut self.buf,
&self.symbol_storage_map,
args,
arg_layouts,
ret_layout,
)?;
self.fn_call_stack_size = std::cmp::max(self.fn_call_stack_size, tmp_stack_size);
let jmp_location = self.buf.len();
let start_offset = ASM::jmp_imm32(&mut self.buf, 0x1234_5678);
if let Some(offset) = self.join_map.get(id) {
let offset = *offset;
let mut tmp = bumpalo::vec![in self.env.arena];
self.update_jmp_imm32_offset(
&mut tmp,
jmp_location as u64,
start_offset as u64,
offset,
);
Ok(())
} else {
Err(format!(
"Jump: unknown point specified to jump to: {:?}",
id
))
}
}
fn build_num_abs(
&mut self,
dst: &Symbol,
@ -828,29 +995,26 @@ impl<
fn return_symbol(&mut self, sym: &Symbol, layout: &Layout<'a>) -> Result<(), String> {
let val = self.symbol_storage_map.get(sym);
match val {
Some(SymbolStorage::GeneralReg(reg)) if *reg == CC::GENERAL_RETURN_REGS[0] => Ok(()),
Some(SymbolStorage::GeneralReg(reg)) if *reg == CC::GENERAL_RETURN_REGS[0] => {}
Some(SymbolStorage::GeneralReg(reg)) => {
// If it fits in a general purpose register, just copy it over to.
// Technically this can be optimized to produce shorter instructions if less than 64bits.
ASM::mov_reg64_reg64(&mut self.buf, CC::GENERAL_RETURN_REGS[0], *reg);
Ok(())
}
Some(SymbolStorage::FloatReg(reg)) if *reg == CC::FLOAT_RETURN_REGS[0] => Ok(()),
Some(SymbolStorage::FloatReg(reg)) if *reg == CC::FLOAT_RETURN_REGS[0] => {}
Some(SymbolStorage::FloatReg(reg)) => {
ASM::mov_freg64_freg64(&mut self.buf, CC::FLOAT_RETURN_REGS[0], *reg);
Ok(())
}
Some(SymbolStorage::Base { offset, size, .. }) => match layout {
Layout::Builtin(Builtin::Int64) => {
ASM::mov_reg64_base32(&mut self.buf, CC::GENERAL_RETURN_REGS[0], *offset);
Ok(())
}
Layout::Builtin(Builtin::Float64) => {
ASM::mov_freg64_base32(&mut self.buf, CC::FLOAT_RETURN_REGS[0], *offset);
Ok(())
}
Layout::Struct(field_layouts) => {
let (offset, size) = (*offset, *size);
// Nothing to do for empty struct
if size > 0 {
let ret_reg = if self.symbol_storage_map.contains_key(&Symbol::RET_POINTER)
{
@ -858,23 +1022,34 @@ impl<
} else {
None
};
CC::return_struct(&mut self.buf, offset, size, field_layouts, ret_reg)
} else {
// Nothing to do for empty struct
Ok(())
CC::return_struct(&mut self.buf, offset, size, field_layouts, ret_reg)?;
}
}
x => Err(format!(
"returning symbol with layout, {:?}, is not yet implemented",
x
)),
x => {
return Err(format!(
"returning symbol with layout, {:?}, is not yet implemented",
x
));
}
},
Some(x) => Err(format!(
"returning symbol storage, {:?}, is not yet implemented",
x
)),
None => Err(format!("Unknown return symbol: {}", sym)),
Some(x) => {
return Err(format!(
"returning symbol storage, {:?}, is not yet implemented",
x
));
}
None => {
return Err(format!("Unknown return symbol: {}", sym));
}
}
let inst_loc = self.buf.len() as u64;
let offset = ASM::jmp_imm32(&mut self.buf, 0x1234_5678) as u64;
self.relocs.push(Relocation::JmpToReturn {
inst_loc,
inst_size: self.buf.len() as u64 - inst_loc,
offset,
});
Ok(())
}
}
@ -1212,4 +1387,74 @@ impl<
)),
}
}
fn push_used_caller_saved_regs_to_stack(&mut self) -> Result<(), String> {
let old_general_used_regs = std::mem::replace(
&mut self.general_used_regs,
bumpalo::vec![in self.env.arena],
);
for (reg, saved_sym) in old_general_used_regs.into_iter() {
if CC::general_caller_saved(&reg) {
self.general_free_regs.push(reg);
self.free_to_stack(&saved_sym)?;
} else {
self.general_used_regs.push((reg, saved_sym));
}
}
let old_float_used_regs =
std::mem::replace(&mut self.float_used_regs, bumpalo::vec![in self.env.arena]);
for (reg, saved_sym) in old_float_used_regs.into_iter() {
if CC::float_caller_saved(&reg) {
self.float_free_regs.push(reg);
self.free_to_stack(&saved_sym)?;
} else {
self.float_used_regs.push((reg, saved_sym));
}
}
Ok(())
}
// Updates a jump instruction to a new offset and returns the number of bytes written.
fn update_jmp_imm32_offset(
&mut self,
tmp: &mut Vec<'a, u8>,
jmp_location: u64,
base_offset: u64,
target_offset: u64,
) {
tmp.clear();
let jmp_offset = target_offset as i32 - base_offset as i32;
ASM::jmp_imm32(tmp, jmp_offset);
for (i, byte) in tmp.iter().enumerate() {
self.buf[jmp_location as usize + i] = *byte;
}
}
}
#[macro_export]
macro_rules! single_register_integers {
() => {
Builtin::Int1
| Builtin::Int8
| Builtin::Int16
| Builtin::Int32
| Builtin::Int64
| Builtin::Usize
};
}
#[macro_export]
macro_rules! single_register_floats {
() => {
// Float16 is explicitly ignored because it is not supported by must hardware and may require special exceptions.
// Builtin::Float16 |
Builtin::Float32 | Builtin::Float64
};
}
#[macro_export]
macro_rules! single_register_builtins {
() => {
single_register_integers!() | single_register_floats!()
};
}

View File

@ -1,5 +1,7 @@
use crate::generic64::{Assembler, CallConv, RegTrait, SymbolStorage, PTR_SIZE};
use crate::Relocation;
use crate::{
single_register_builtins, single_register_floats, single_register_integers, Relocation,
};
use bumpalo::collections::Vec;
use roc_collections::all::MutMap;
use roc_module::symbol::Symbol;
@ -191,7 +193,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64SystemV {
}
for (layout, sym) in args.iter() {
match layout {
Layout::Builtin(Builtin::Int64) => {
Layout::Builtin(single_register_integers!()) => {
if general_i < Self::GENERAL_PARAM_REGS.len() {
symbol_map.insert(
*sym,
@ -210,7 +212,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64SystemV {
);
}
}
Layout::Builtin(Builtin::Float64) => {
Layout::Builtin(single_register_floats!()) => {
if float_i < Self::FLOAT_PARAM_REGS.len() {
symbol_map.insert(
*sym,
@ -229,6 +231,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64SystemV {
);
}
}
Layout::Struct(&[]) => {}
x => {
return Err(format!(
"Loading args with layout {:?} not yet implementd",
@ -254,8 +257,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64SystemV {
// For most return layouts we will do nothing.
// In some cases, we need to put the return address as the first arg.
match ret_layout {
Layout::Builtin(Builtin::Int64) => {}
Layout::Builtin(Builtin::Float64) => {}
Layout::Builtin(single_register_builtins!()) => {}
x => {
return Err(format!(
"receiving return type, {:?}, is not yet implemented",
@ -265,7 +267,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64SystemV {
}
for (i, layout) in arg_layouts.iter().enumerate() {
match layout {
Layout::Builtin(Builtin::Int64) => {
Layout::Builtin(single_register_integers!()) => {
if general_i < Self::GENERAL_PARAM_REGS.len() {
// Load the value to the param reg.
let dst = Self::GENERAL_PARAM_REGS[general_i];
@ -319,7 +321,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64SystemV {
stack_offset += 8;
}
}
Layout::Builtin(Builtin::Float64) => {
Layout::Builtin(single_register_floats!()) => {
if float_i < Self::FLOAT_PARAM_REGS.len() {
// Load the value to the param reg.
let dst = Self::FLOAT_PARAM_REGS[float_i];
@ -371,6 +373,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64SystemV {
stack_offset += 8;
}
}
Layout::Struct(&[]) => {}
x => {
return Err(format!(
"calling with arg type, {:?}, is not yet implemented",
@ -529,13 +532,14 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64WindowsFastcall {
for (layout, sym) in args.iter() {
if i < Self::GENERAL_PARAM_REGS.len() {
match layout {
Layout::Builtin(Builtin::Int64) => {
Layout::Builtin(single_register_integers!()) => {
symbol_map
.insert(*sym, SymbolStorage::GeneralReg(Self::GENERAL_PARAM_REGS[i]));
}
Layout::Builtin(Builtin::Float64) => {
Layout::Builtin(single_register_floats!()) => {
symbol_map.insert(*sym, SymbolStorage::FloatReg(Self::FLOAT_PARAM_REGS[i]));
}
Layout::Struct(&[]) => {}
x => {
return Err(format!(
"Loading args with layout {:?} not yet implementd",
@ -546,8 +550,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64WindowsFastcall {
i += 1;
} else {
base_offset += match layout {
Layout::Builtin(Builtin::Int64) => 8,
Layout::Builtin(Builtin::Float64) => 8,
Layout::Builtin(single_register_builtins!()) => 8,
x => {
return Err(format!(
"Loading args with layout {:?} not yet implemented",
@ -581,8 +584,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64WindowsFastcall {
// For most return layouts we will do nothing.
// In some cases, we need to put the return address as the first arg.
match ret_layout {
Layout::Builtin(Builtin::Int64) => {}
Layout::Builtin(Builtin::Float64) => {}
Layout::Builtin(single_register_builtins!()) => {}
x => {
return Err(format!(
"receiving return type, {:?}, is not yet implemented",
@ -592,7 +594,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64WindowsFastcall {
}
for (i, layout) in arg_layouts.iter().enumerate() {
match layout {
Layout::Builtin(Builtin::Int64) => {
Layout::Builtin(single_register_integers!()) => {
if i < Self::GENERAL_PARAM_REGS.len() {
// Load the value to the param reg.
let dst = Self::GENERAL_PARAM_REGS[reg_i];
@ -646,7 +648,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64WindowsFastcall {
stack_offset += 8;
}
}
Layout::Builtin(Builtin::Float64) => {
Layout::Builtin(single_register_floats!()) => {
if i < Self::FLOAT_PARAM_REGS.len() {
// Load the value to the param reg.
let dst = Self::FLOAT_PARAM_REGS[reg_i];
@ -698,6 +700,7 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64WindowsFastcall {
stack_offset += 8;
}
}
Layout::Struct(&[]) => {}
x => {
return Err(format!(
"calling with arg type, {:?}, is not yet implemented",

View File

@ -9,10 +9,10 @@ use roc_module::ident::{ModuleName, TagName};
use roc_module::low_level::LowLevel;
use roc_module::symbol::{Interns, Symbol};
use roc_mono::ir::{
BranchInfo, CallType, Expr, JoinPointId, ListLiteralElement, Literal, Proc, Stmt,
BranchInfo, CallType, Expr, JoinPointId, ListLiteralElement, Literal, Param, Proc,
SelfRecursive, Stmt,
};
use roc_mono::layout::{Builtin, Layout, LayoutIds};
use target_lexicon::Triple;
mod generic64;
mod object_builder;
@ -46,6 +46,11 @@ pub enum Relocation {
offset: u64,
name: String,
},
JmpToReturn {
inst_loc: u64,
inst_size: u64,
offset: u64,
},
}
trait Backend<'a>
@ -53,12 +58,13 @@ where
Self: Sized,
{
/// new creates a new backend that will output to the specific Object.
fn new(env: &'a Env, target: &Triple) -> Result<Self, String>;
fn new(env: &'a Env) -> Result<Self, String>;
fn env(&self) -> &'a Env<'a>;
/// reset resets any registers or other values that may be occupied at the end of a procedure.
fn reset(&mut self);
/// It also passes basic procedure information to the builder for setup of the next function.
fn reset(&mut self, name: String, is_self_recursive: SelfRecursive);
/// finalize does any setup and cleanup that should happen around the procedure.
/// finalize does setup because things like stack size and jump locations are not know until the function is written.
@ -79,7 +85,10 @@ where
/// build_proc creates a procedure and outputs it to the wrapped object writer.
fn build_proc(&mut self, proc: Proc<'a>) -> Result<(&'a [u8], &[Relocation]), String> {
self.reset();
let proc_name = LayoutIds::default()
.get(proc.name, &proc.ret_layout)
.to_symbol_string(proc.name, &self.env().interns);
self.reset(proc_name, proc.is_self_recursive);
self.load_args(proc.args, &proc.ret_layout)?;
for (layout, sym) in proc.args {
self.set_layout_map(*sym, layout)?;
@ -128,6 +137,35 @@ where
self.free_symbols(stmt)?;
Ok(())
}
Stmt::Join {
id,
parameters,
body,
remainder,
} => {
for param in parameters.iter() {
self.set_layout_map(param.symbol, &param.layout)?;
}
self.build_join(id, parameters, body, remainder, ret_layout)?;
self.free_symbols(stmt)?;
Ok(())
}
Stmt::Jump(id, args) => {
let mut arg_layouts: bumpalo::collections::Vec<Layout<'a>> =
bumpalo::vec![in self.env().arena];
arg_layouts.reserve(args.len());
let layout_map = self.layout_map();
for arg in *args {
if let Some(layout) = layout_map.get(arg) {
arg_layouts.push(*layout);
} else {
return Err(format!("the argument, {:?}, has no know layout", arg));
}
}
self.build_jump(id, args, arg_layouts.into_bump_slice(), ret_layout)?;
self.free_symbols(stmt)?;
Ok(())
}
x => Err(format!("the statement, {:?}, is not yet implemented", x)),
}
}
@ -141,6 +179,25 @@ where
ret_layout: &Layout<'a>,
) -> Result<(), String>;
// build_join generates a instructions for a join statement.
fn build_join(
&mut self,
id: &JoinPointId,
parameters: &'a [Param<'a>],
body: &'a Stmt<'a>,
remainder: &'a Stmt<'a>,
ret_layout: &Layout<'a>,
) -> Result<(), String>;
// build_jump generates a instructions for a jump statement.
fn build_jump(
&mut self,
id: &JoinPointId,
args: &'a [Symbol],
arg_layouts: &[Layout<'a>],
ret_layout: &Layout<'a>,
) -> Result<(), String>;
/// build_expr builds the expressions for the specified symbol.
/// The builder must keep track of the symbol because it may be referred to later.
fn build_expr(
@ -263,8 +320,7 @@ where
let layout_map = self.layout_map();
for arg in *arguments {
if let Some(layout) = layout_map.get(arg) {
// This is safe because every value in the map is always set with a valid layout and cannot be null.
arg_layouts.push(unsafe { *(*layout) });
arg_layouts.push(*layout);
} else {
return Err(format!("the argument, {:?}, has no know layout", arg));
}
@ -507,7 +563,7 @@ where
/// load_literal sets a symbol to be equal to a literal.
fn load_literal(&mut self, sym: &Symbol, lit: &Literal<'a>) -> Result<(), String>;
/// return_symbol moves a symbol to the correct return location for the backend.
/// return_symbol moves a symbol to the correct return location for the backend and adds a jump to the end of the function.
fn return_symbol(&mut self, sym: &Symbol, layout: &Layout<'a>) -> Result<(), String>;
/// free_symbols will free all symbols for the given statement.
@ -542,12 +598,10 @@ where
/// set_layout_map sets the layout for a specific symbol.
fn set_layout_map(&mut self, sym: Symbol, layout: &Layout<'a>) -> Result<(), String> {
if let Some(x) = self.layout_map().insert(sym, layout) {
if let Some(old_layout) = self.layout_map().insert(sym, *layout) {
// Layout map already contains the symbol. We should never need to overwrite.
// If the layout is not the same, that is a bug.
// There is always an old layout value and this dereference is safe.
let old_layout = unsafe { *x };
if old_layout != *layout {
if &old_layout != layout {
Err(format!(
"Overwriting layout for symbol, {:?}. This should never happen. got {:?}, want {:?}",
sym, layout, old_layout
@ -561,7 +615,7 @@ where
}
/// layout_map gets the map from symbol to layout.
fn layout_map(&mut self) -> &mut MutMap<Symbol, *const Layout<'a>>;
fn layout_map(&mut self) -> &mut MutMap<Symbol, Layout<'a>>;
fn create_free_map(&mut self) {
let mut free_map = MutMap::default();

View File

@ -34,7 +34,7 @@ pub fn build_module<'a>(
x86_64::X86_64FloatReg,
x86_64::X86_64Assembler,
x86_64::X86_64SystemV,
> = Backend::new(env, target)?;
> = Backend::new(env)?;
build_object(
env,
procedures,
@ -52,7 +52,7 @@ pub fn build_module<'a>(
x86_64::X86_64FloatReg,
x86_64::X86_64Assembler,
x86_64::X86_64SystemV,
> = Backend::new(env, target)?;
> = Backend::new(env)?;
build_object(
env,
procedures,
@ -74,7 +74,7 @@ pub fn build_module<'a>(
aarch64::AArch64FloatReg,
aarch64::AArch64Assembler,
aarch64::AArch64Call,
> = Backend::new(env, target)?;
> = Backend::new(env)?;
build_object(
env,
procedures,
@ -304,6 +304,7 @@ fn build_object<'a, B: Backend<'a>>(
return Err(format!("failed to find fn symbol for {:?}", name));
}
}
Relocation::JmpToReturn { .. } => unreachable!(),
};
relocations.push((section_id, elfreloc));
}

View File

@ -281,6 +281,24 @@ mod dev_num {
);
}
#[test]
fn gen_fast_fib_fn() {
assert_evals_to!(
indoc!(
r#"
fib = \n, a, b ->
if n == 0 then
a
else
fib (n - 1) b (a + b)
fib 10 0 1
"#
),
55,
i64
);
}
#[test]
fn f64_abs() {
assert_evals_to!("Num.abs -4.7", 4.7, f64);
@ -580,18 +598,18 @@ mod dev_num {
// assert_evals_to!("0.0 >= 0.0", true, bool);
// }
// #[test]
// fn gen_order_of_arithmetic_ops() {
// assert_evals_to!(
// indoc!(
// r#"
// 1 + 3 * 7 - 2
// "#
// ),
// 20,
// i64
// );
// }
#[test]
fn gen_order_of_arithmetic_ops() {
assert_evals_to!(
indoc!(
r#"
1 + 3 * 7 - 2
"#
),
20,
i64
);
}
// #[test]
// fn gen_order_of_arithmetic_ops_complex_float() {
@ -606,59 +624,59 @@ mod dev_num {
// );
// }
// #[test]
// fn if_guard_bind_variable_false() {
// assert_evals_to!(
// indoc!(
// r#"
// wrapper = \{} ->
// when 10 is
// x if x == 5 -> 0
// _ -> 42
#[test]
fn if_guard_bind_variable_false() {
assert_evals_to!(
indoc!(
r#"
wrapper = \{} ->
when 10 is
x if x == 5 -> 0
_ -> 42
// wrapper {}
// "#
// ),
// 42,
// i64
// );
// }
wrapper {}
"#
),
42,
i64
);
}
// #[test]
// fn if_guard_bind_variable_true() {
// assert_evals_to!(
// indoc!(
// r#"
// wrapper = \{} ->
// when 10 is
// x if x == 10 -> 42
// _ -> 0
#[test]
fn if_guard_bind_variable_true() {
assert_evals_to!(
indoc!(
r#"
wrapper = \{} ->
when 10 is
x if x == 10 -> 42
_ -> 0
// wrapper {}
// "#
// ),
// 42,
// i64
// );
// }
wrapper {}
"#
),
42,
i64
);
}
// #[test]
// fn tail_call_elimination() {
// assert_evals_to!(
// indoc!(
// r#"
// sum = \n, accum ->
// when n is
// 0 -> accum
// _ -> sum (n - 1) (n + accum)
#[test]
fn tail_call_elimination() {
assert_evals_to!(
indoc!(
r#"
sum = \n, accum ->
when n is
0 -> accum
_ -> sum (n - 1) (n + accum)
// sum 1_000_000 0
// "#
// ),
// 500000500000,
// i64
// );
// }
sum 1_000_000 0
"#
),
500000500000,
i64
);
}
// #[test]
// fn int_negate() {

View File

@ -1 +1,2 @@
add
fib

View File

@ -3,25 +3,13 @@ app "fib"
imports []
provides [ main ] to base
main = \n -> fib n
main = \n -> fib n 0 1
fib = \n ->
if n == 0 then
0
else if n == 1 then
1
else
(fib (n - 1)) + (fib (n - 2))
# the clever implementation requires join points
# fib = \n, a, b ->
# if n == 0 then
# a
#
# else
# fib (n - 1) b (a + b)
#
# fib n 0 1
fib = \n, a, b ->
if n == 0 then
a
else
fib (n - 1) b (a + b)