Merge pull request #2097 from rtfeldman/wasm-empty-record

Wasm empty record
This commit is contained in:
Folkert de Vries 2021-11-29 13:15:40 +01:00 committed by GitHub
commit 167af34d4b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 352 additions and 190 deletions

View File

@ -7,7 +7,7 @@ use roc_module::symbol::Symbol;
use roc_mono::ir::{CallType, Expr, JoinPointId, Literal, Proc, Stmt};
use roc_mono::layout::{Builtin, Layout, LayoutIds};
use crate::layout::{StackMemoryFormat, WasmLayout};
use crate::layout::{CallConv, ReturnMethod, WasmLayout};
use crate::low_level::{decode_low_level, LowlevelBuildResult};
use crate::storage::{Storage, StoredValue, StoredValueKind};
use crate::wasm_module::linking::{
@ -177,16 +177,20 @@ impl<'a> WasmBackend<'a> {
fn start_proc(&mut self, proc: &Proc<'a>) {
let ret_layout = WasmLayout::new(&proc.ret_layout);
let ret_type = if ret_layout.is_stack_memory() {
self.storage.arg_types.push(PTR_TYPE);
self.start_block(BlockType::NoResult); // block to ensure all paths pop stack memory (if any)
None
} else {
let ty = ret_layout.value_type();
self.start_block(BlockType::Value(ty)); // block to ensure all paths pop stack memory (if any)
Some(ty)
let ret_type = match ret_layout.return_method() {
ReturnMethod::Primitive(ty) => Some(ty),
ReturnMethod::NoReturnValue => None,
ReturnMethod::WriteToPointerArg => {
self.storage.arg_types.push(PTR_TYPE);
None
}
};
// Create a block so we can exit the function without skipping stack frame "pop" code.
// We never use the `return` instruction. Instead, we break from this block.
self.start_block(BlockType::from(ret_type));
for (layout, symbol) in proc.args {
let arg_layout = WasmLayout::new(layout);
self.storage
@ -219,10 +223,9 @@ impl<'a> WasmBackend<'a> {
***********************************************************/
/// start a loop that leaves a value on the stack
fn start_loop_with_return(&mut self, value_type: ValueType) {
fn start_loop(&mut self, block_type: BlockType) {
self.block_depth += 1;
self.code_builder.loop_(BlockType::Value(value_type));
self.code_builder.loop_(block_type);
}
fn start_block(&mut self, block_type: BlockType) {
@ -335,7 +338,7 @@ impl<'a> WasmBackend<'a> {
}
let is_bool = matches!(cond_layout, Layout::Builtin(Builtin::Bool));
let cond_type = WasmLayout::new(cond_layout).value_type();
let cond_type = WasmLayout::new(cond_layout).arg_types(CallConv::C)[0];
// then, we jump whenever the value under scrutiny is equal to the value of a branch
for (i, (value, _, _)) in branches.iter().enumerate() {
@ -419,10 +422,14 @@ impl<'a> WasmBackend<'a> {
self.end_block();
// A `return` inside of a `loop` seems to make it so that the `loop` itself
// also "returns" (so, leaves on the stack) a value of the return type.
let return_wasm_layout = WasmLayout::new(ret_layout);
self.start_loop_with_return(return_wasm_layout.value_type());
// A loop (or any block) needs to declare the type of the value it leaves on the stack on exit.
// The runtime needs this to statically validate the program before running it.
let loop_block_type = match WasmLayout::new(ret_layout).return_method() {
ReturnMethod::Primitive(ty) => BlockType::Value(ty),
ReturnMethod::WriteToPointerArg => BlockType::NoResult,
ReturnMethod::NoReturnValue => BlockType::NoResult,
};
self.start_loop(loop_block_type);
self.build_stmt(body, ret_layout)?;
@ -488,19 +495,14 @@ impl<'a> WasmBackend<'a> {
return self.build_low_level(lowlevel, arguments, *sym, wasm_layout);
}
let mut wasm_args_tmp: Vec<Symbol>;
let (wasm_args, has_return_val) = match wasm_layout {
WasmLayout::StackMemory { .. } => {
wasm_args_tmp =
Vec::with_capacity_in(arguments.len() + 1, self.env.arena);
wasm_args_tmp.push(*sym);
wasm_args_tmp.extend_from_slice(*arguments);
(wasm_args_tmp.as_slice(), false)
}
_ => (*arguments, true),
};
self.storage.load_symbols(&mut self.code_builder, wasm_args);
let (param_types, ret_type) = self.storage.load_symbols_for_call(
self.env.arena,
&mut self.code_builder,
arguments,
*sym,
&wasm_layout,
CallConv::C,
);
// Index of the called function in the code section. Assumes all functions end up in the binary.
// (We may decide to keep all procs even if calls are inlined, in case platform calls them)
@ -519,12 +521,10 @@ impl<'a> WasmBackend<'a> {
// Same as the function index since those are the first symbols we add
let symbol_index = func_index;
self.code_builder.call(
func_index,
symbol_index,
wasm_args.len(),
has_return_val,
);
let num_wasm_args = param_types.len();
let has_return_val = ret_type.is_some();
self.code_builder
.call(func_index, symbol_index, num_wasm_args, has_return_val);
Ok(())
}
@ -572,14 +572,13 @@ impl<'a> WasmBackend<'a> {
return_sym: Symbol,
return_layout: WasmLayout,
) -> Result<(), String> {
// Load symbols using the "fast calling convention" that Zig uses instead of the C ABI we normally use.
// It's only different from the C ABI for small structs, and we are using Zig for all of those cases.
// This is a workaround for a bug in Zig. If later versions fix it, we can change to the C ABI.
self.storage.load_symbols_fastcc(
let (param_types, ret_type) = self.storage.load_symbols_for_call(
self.env.arena,
&mut self.code_builder,
arguments,
return_sym,
&return_layout,
CallConv::Zig,
);
let build_result = decode_low_level(
@ -594,7 +593,7 @@ impl<'a> WasmBackend<'a> {
match build_result {
Done => Ok(()),
BuiltinCall(name) => {
self.call_zig_builtin(name, arguments, &return_layout);
self.call_zig_builtin(name, param_types, ret_type);
Ok(())
}
NotImplemented => Err(format!(
@ -767,7 +766,9 @@ impl<'a> WasmBackend<'a> {
);
}
} else {
return Err(format!("Not supported yet: zero-size struct at {:?}", sym));
// Zero-size struct. No code to emit.
// These values are purely conceptual, they only exist internally in the compiler
return Ok(());
}
}
_ => {
@ -789,7 +790,15 @@ impl<'a> WasmBackend<'a> {
/// Generate a call instruction to a Zig builtin function.
/// And if we haven't seen it before, add an Import and linker data for it.
/// Zig calls use LLVM's "fast" calling convention rather than our usual C ABI.
fn call_zig_builtin(&mut self, name: &'a str, arguments: &[Symbol], ret_layout: &WasmLayout) {
fn call_zig_builtin(
&mut self,
name: &'a str,
param_types: Vec<'a, ValueType>,
ret_type: Option<ValueType>,
) {
let num_wasm_args = param_types.len();
let has_return_val = ret_type.is_some();
let (fn_index, linker_symbol_index) = match self.builtin_sym_index_map.get(name) {
Some(sym_idx) => match &self.linker_symbols[*sym_idx] {
SymInfo::Function(WasmObjectSymbol::Imported { index, .. }) => {
@ -799,51 +808,13 @@ impl<'a> WasmBackend<'a> {
},
None => {
let mut param_types = Vec::with_capacity_in(1 + arguments.len(), self.env.arena);
let ret_type = if ret_layout.is_stack_memory() {
param_types.push(ValueType::I32);
None
} else {
Some(ret_layout.value_type())
};
for arg in arguments {
match self.storage.get(arg) {
StoredValue::StackMemory { size, format, .. } => {
use StackMemoryFormat::*;
match format {
Aggregate => {
// Zig's "fast calling convention" packs structs into CPU registers
// (stack machine slots) if possible. If they're small enough they
// can go into an I32 or I64. If they're big, they're pointers (I32).
if *size > 4 && *size <= 8 {
param_types.push(ValueType::I64)
} else {
// either
//
// - this is a small value, that fits in an i32
// - this is a big value, we pass a memory address
param_types.push(ValueType::I32)
}
}
Int128 | Float128 | Decimal => {
// these types are passed as 2 i64s
param_types.push(ValueType::I64);
param_types.push(ValueType::I64);
}
}
}
stored => param_types.push(stored.value_type()),
}
}
// Wasm function signature
let signature_index = self.module.types.insert(Signature {
param_types,
ret_type,
});
// Declare it as an import since it comes from a different .o file
let import_index = self.module.import.entries.len() as u32;
let import = Import {
module: BUILTINS_IMPORT_MODULE_NAME,
@ -852,22 +823,22 @@ impl<'a> WasmBackend<'a> {
};
self.module.import.entries.push(import);
// Provide symbol information for the linker
let sym_idx = self.linker_symbols.len();
let sym_info = SymInfo::Function(WasmObjectSymbol::Imported {
flags: WASM_SYM_UNDEFINED,
index: import_index,
});
self.linker_symbols.push(sym_info);
// Remember that we have created all of this data, and don't need to do it again
self.builtin_sym_index_map.insert(name, sym_idx);
(import_index, sym_idx as u32)
}
};
self.code_builder.call(
fn_index,
linker_symbol_index,
arguments.len(),
true, // TODO: handle builtins with no return value
);
self.code_builder
.call(fn_index, linker_symbol_index, num_wasm_args, has_return_val);
}
}

View File

@ -1,12 +1,26 @@
use roc_builtins::bitcode::{FloatWidth, IntWidth};
use roc_mono::layout::{Layout, UnionLayout};
use crate::{wasm_module::ValueType, PTR_SIZE, PTR_TYPE};
use crate::wasm_module::ValueType;
use crate::{PTR_SIZE, PTR_TYPE};
/// Manually keep up to date with the Zig version we are using for builtins
pub const BUILTINS_ZIG_VERSION: ZigVersion = ZigVersion::Zig8;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ReturnMethod {
/// This layout is returned from a Wasm function "normally" as a Primitive
Primitive(ValueType),
/// This layout is returned by writing to a pointer passed as the first argument
WriteToPointerArg,
/// This layout is empty and requires no return value or argument (e.g. refcount helpers)
NoReturnValue,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum StackMemoryFormat {
/// Record, Str, List, Dict, etc.
Aggregate,
DataStructure,
Int128,
Float128,
Decimal,
@ -82,7 +96,7 @@ impl WasmLayout {
| Layout::Union(NonRecursive(_)) => Self::StackMemory {
size,
alignment_bytes,
format: StackMemoryFormat::Aggregate,
format: StackMemoryFormat::DataStructure,
},
Layout::Union(
@ -95,10 +109,35 @@ impl WasmLayout {
}
}
pub fn value_type(&self) -> ValueType {
/// The `ValueType`s to use for this layout when calling a Wasm function
/// One Roc argument can become 0, 1, or 2 Wasm arguments
pub fn arg_types(&self, conv: CallConv) -> &'static [ValueType] {
use ValueType::*;
match self {
Self::Primitive(type_, _) => *type_,
_ => PTR_TYPE,
// 1 Roc argument => 1 Wasm argument (same for all calling conventions)
Self::Primitive(I32, _) => &[I32],
Self::Primitive(I64, _) => &[I64],
Self::Primitive(F32, _) => &[F32],
Self::Primitive(F64, _) => &[F64],
Self::HeapMemory => &[I32],
// 1 Roc argument => 0-2 Wasm arguments (depending on size and calling convention)
Self::StackMemory { size, format, .. } => conv.stack_memory_arg_types(*size, *format),
}
}
pub fn return_method(&self) -> ReturnMethod {
match self {
Self::Primitive(ty, _) => ReturnMethod::Primitive(*ty),
Self::HeapMemory => ReturnMethod::Primitive(PTR_TYPE),
Self::StackMemory { size, .. } => {
if *size == 0 {
ReturnMethod::NoReturnValue
} else {
ReturnMethod::WriteToPointerArg
}
}
}
}
@ -109,8 +148,62 @@ impl WasmLayout {
Self::HeapMemory => PTR_SIZE,
}
}
}
pub fn is_stack_memory(&self) -> bool {
matches!(self, Self::StackMemory { .. })
#[derive(PartialEq, Eq)]
pub enum ZigVersion {
Zig8,
Zig9,
}
#[derive(Debug, Clone, Copy)]
pub enum CallConv {
/// The C calling convention, as defined here:
/// https://github.com/WebAssembly/tool-conventions/blob/main/BasicCABI.md
C,
/// The calling convention that Zig 0.8 or 0.9 generates for Wasm when we *ask* it
/// for the .C calling convention, due to bugs in both versions of the Zig compiler.
Zig,
}
impl CallConv {
/// The Wasm argument types to use when passing structs or 128-bit numbers
pub fn stack_memory_arg_types(
&self,
size: u32,
format: StackMemoryFormat,
) -> &'static [ValueType] {
use StackMemoryFormat::*;
use ValueType::*;
match format {
Int128 | Float128 | Decimal => &[I64, I64],
DataStructure => {
if size == 0 {
// Zero-size Roc values like `{}` => no Wasm arguments
return &[];
}
match self {
CallConv::C => {
&[I32] // Always pass structs by reference (pointer to stack memory)
}
CallConv::Zig => {
if size <= 4 {
&[I32] // Small struct: pass by value
} else if size <= 8 {
&[I64] // Small struct: pass by value
} else if size <= 12 && BUILTINS_ZIG_VERSION == ZigVersion::Zig9 {
&[I64, I32] // Medium struct: pass by value, as two Wasm arguments
} else if size <= 16 {
&[I64, I64] // Medium struct: pass by value, as two Wasm arguments
} else {
&[I32] // Large struct: pass by reference
}
}
}
}
}
}
}

View File

@ -118,6 +118,9 @@ pub fn copy_memory(code_builder: &mut CodeBuilder, config: CopyMemoryConfig) {
if config.from_ptr == config.to_ptr && config.from_offset == config.to_offset {
return;
}
if config.size == 0 {
return;
}
let alignment = Align::from(config.alignment_bytes);
let mut i = 0;

View File

@ -2,7 +2,7 @@ use roc_builtins::bitcode::{self, FloatWidth};
use roc_module::low_level::{LowLevel, LowLevel::*};
use roc_module::symbol::Symbol;
use crate::layout::{StackMemoryFormat, WasmLayout};
use crate::layout::{CallConv, StackMemoryFormat, WasmLayout};
use crate::storage::Storage;
use crate::wasm_module::{
CodeBuilder,
@ -71,14 +71,14 @@ pub fn decode_low_level<'a>(
F64 => code_builder.f64_add(),
},
WasmLayout::StackMemory { format, .. } => match format {
StackMemoryFormat::Aggregate => return NotImplemented,
StackMemoryFormat::DataStructure => return NotImplemented,
StackMemoryFormat::Int128 => return NotImplemented,
StackMemoryFormat::Float128 => return NotImplemented,
StackMemoryFormat::Decimal => return BuiltinCall(bitcode::DEC_ADD_WITH_OVERFLOW),
},
WasmLayout::HeapMemory { .. } => return NotImplemented,
},
NumAddWrap => match ret_layout.value_type() {
NumAddWrap => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => {
code_builder.i32_add();
wrap_i32(code_builder, ret_layout.size());
@ -88,13 +88,13 @@ pub fn decode_low_level<'a>(
F64 => code_builder.f64_add(),
},
NumAddChecked => return NotImplemented,
NumSub => match ret_layout.value_type() {
NumSub => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_sub(),
I64 => code_builder.i64_sub(),
F32 => code_builder.f32_sub(),
F64 => code_builder.f64_sub(),
},
NumSubWrap => match ret_layout.value_type() {
NumSubWrap => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => {
code_builder.i32_sub();
wrap_i32(code_builder, ret_layout.size());
@ -104,13 +104,13 @@ pub fn decode_low_level<'a>(
F64 => code_builder.f64_sub(),
},
NumSubChecked => return NotImplemented,
NumMul => match ret_layout.value_type() {
NumMul => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_mul(),
I64 => code_builder.i64_mul(),
F32 => code_builder.f32_mul(),
F64 => code_builder.f64_mul(),
},
NumMulWrap => match ret_layout.value_type() {
NumMulWrap => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => {
code_builder.i32_mul();
wrap_i32(code_builder, ret_layout.size());
@ -120,46 +120,46 @@ pub fn decode_low_level<'a>(
F64 => code_builder.f64_mul(),
},
NumMulChecked => return NotImplemented,
NumGt => match storage.get(&args[0]).value_type() {
NumGt => match storage.get(&args[0]).arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_gt_s(),
I64 => code_builder.i64_gt_s(),
F32 => code_builder.f32_gt(),
F64 => code_builder.f64_gt(),
},
NumGte => match storage.get(&args[0]).value_type() {
NumGte => match storage.get(&args[0]).arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_ge_s(),
I64 => code_builder.i64_ge_s(),
F32 => code_builder.f32_ge(),
F64 => code_builder.f64_ge(),
},
NumLt => match storage.get(&args[0]).value_type() {
NumLt => match storage.get(&args[0]).arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_lt_s(),
I64 => code_builder.i64_lt_s(),
F32 => code_builder.f32_lt(),
F64 => code_builder.f64_lt(),
},
NumLte => match storage.get(&args[0]).value_type() {
NumLte => match storage.get(&args[0]).arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_le_s(),
I64 => code_builder.i64_le_s(),
F32 => code_builder.f32_le(),
F64 => code_builder.f64_le(),
},
NumCompare => return NotImplemented,
NumDivUnchecked => match ret_layout.value_type() {
NumDivUnchecked => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_div_s(),
I64 => code_builder.i64_div_s(),
F32 => code_builder.f32_div(),
F64 => code_builder.f64_div(),
},
NumDivCeilUnchecked => return NotImplemented,
NumRemUnchecked => match ret_layout.value_type() {
NumRemUnchecked => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_rem_s(),
I64 => code_builder.i64_rem_s(),
F32 => return NotImplemented,
F64 => return NotImplemented,
},
NumIsMultipleOf => return NotImplemented,
NumAbs => match ret_layout.value_type() {
NumAbs => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => {
let arg_storage = storage.get(&args[0]).to_owned();
storage.ensure_value_has_local(code_builder, args[0], arg_storage);
@ -188,7 +188,7 @@ pub fn decode_low_level<'a>(
F64 => code_builder.f64_abs(),
},
NumNeg => {
match ret_layout.value_type() {
match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => {
// Unfortunate local.set/local.get
code_builder.i32_const(0);
@ -219,7 +219,10 @@ pub fn decode_low_level<'a>(
let width = float_width_from_layout(ret_layout);
return BuiltinCall(&bitcode::NUM_ROUND[width]);
}
NumToFloat => match (ret_layout.value_type(), storage.get(&args[0]).value_type()) {
NumToFloat => match (
ret_layout.arg_types(CallConv::Zig)[0],
storage.get(&args[0]).arg_types(CallConv::Zig)[0],
) {
(F32, I32) => code_builder.f32_convert_s_i32(),
(F32, I64) => code_builder.f32_convert_s_i64(),
(F32, F32) => {}
@ -231,7 +234,7 @@ pub fn decode_low_level<'a>(
_ => panic_ret_type(),
},
NumPow => return NotImplemented,
NumCeiling => match ret_layout.value_type() {
NumCeiling => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => {
code_builder.f32_ceil();
code_builder.i32_trunc_s_f32()
@ -243,7 +246,7 @@ pub fn decode_low_level<'a>(
_ => panic_ret_type(),
},
NumPowInt => return NotImplemented,
NumFloor => match ret_layout.value_type() {
NumFloor => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => {
code_builder.f32_floor();
code_builder.i32_trunc_s_f32()
@ -254,7 +257,7 @@ pub fn decode_low_level<'a>(
}
_ => panic_ret_type(),
},
NumIsFinite => match ret_layout.value_type() {
NumIsFinite => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_const(1),
I64 => code_builder.i32_const(1),
F32 => {
@ -286,17 +289,17 @@ pub fn decode_low_level<'a>(
}
NumBytesToU16 => return NotImplemented,
NumBytesToU32 => return NotImplemented,
NumBitwiseAnd => match ret_layout.value_type() {
NumBitwiseAnd => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_and(),
I64 => code_builder.i64_and(),
_ => panic_ret_type(),
},
NumBitwiseXor => match ret_layout.value_type() {
NumBitwiseXor => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_xor(),
I64 => code_builder.i64_xor(),
_ => panic_ret_type(),
},
NumBitwiseOr => match ret_layout.value_type() {
NumBitwiseOr => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_or(),
I64 => code_builder.i64_or(),
_ => panic_ret_type(),
@ -304,23 +307,26 @@ pub fn decode_low_level<'a>(
NumShiftLeftBy => {
// Unfortunate local.set/local.get
storage.load_symbols(code_builder, &[args[1], args[0]]);
match ret_layout.value_type() {
match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_shl(),
I64 => code_builder.i64_shl(),
_ => panic_ret_type(),
}
}
NumShiftRightBy => match ret_layout.value_type() {
NumShiftRightBy => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_shr_s(),
I64 => code_builder.i64_shr_s(),
_ => panic_ret_type(),
},
NumShiftRightZfBy => match ret_layout.value_type() {
NumShiftRightZfBy => match ret_layout.arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_shr_u(),
I64 => code_builder.i64_shr_u(),
_ => panic_ret_type(),
},
NumIntCast => match (ret_layout.value_type(), storage.get(&args[0]).value_type()) {
NumIntCast => match (
ret_layout.arg_types(CallConv::Zig)[0],
storage.get(&args[0]).arg_types(CallConv::Zig)[0],
) {
(I32, I32) => {}
(I32, I64) => code_builder.i32_wrap_i64(),
(I32, F32) => code_builder.i32_trunc_s_f32(),
@ -343,7 +349,7 @@ pub fn decode_low_level<'a>(
},
Eq => {
// TODO: For non-number types, this will implement pointer equality, which is wrong
match storage.get(&args[0]).value_type() {
match storage.get(&args[0]).arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_eq(),
I64 => code_builder.i64_eq(),
F32 => code_builder.f32_eq(),
@ -352,7 +358,7 @@ pub fn decode_low_level<'a>(
}
NotEq => {
// TODO: For non-number types, this will implement pointer inequality, which is wrong
match storage.get(&args[0]).value_type() {
match storage.get(&args[0]).arg_types(CallConv::Zig)[0] {
I32 => code_builder.i32_ne(),
I64 => code_builder.i64_ne(),
F32 => code_builder.f32_ne(),
@ -390,7 +396,7 @@ fn wrap_i32(code_builder: &mut CodeBuilder, size: u32) {
}
fn float_width_from_layout(wasm_layout: &WasmLayout) -> FloatWidth {
if wasm_layout.value_type() == ValueType::F32 {
if wasm_layout.arg_types(CallConv::Zig)[0] == ValueType::F32 {
FloatWidth::F32
} else {
FloatWidth::F64

View File

@ -4,7 +4,9 @@ use bumpalo::Bump;
use roc_collections::all::MutMap;
use roc_module::symbol::Symbol;
use crate::layout::{StackMemoryFormat, WasmLayout};
use crate::layout::{
CallConv, ReturnMethod, StackMemoryFormat, WasmLayout, ZigVersion, BUILTINS_ZIG_VERSION,
};
use crate::wasm_module::{Align, CodeBuilder, LocalId, ValueType, VmSymbolState};
use crate::{copy_memory, round_up_to_alignment, CopyMemoryConfig, PTR_SIZE, PTR_TYPE};
@ -55,11 +57,22 @@ pub enum StoredValue {
}
impl StoredValue {
pub fn value_type(&self) -> ValueType {
/// Value types to pass to Wasm functions
/// One Roc value can become 0, 1, or 2 Wasm arguments
pub fn arg_types(&self, conv: CallConv) -> &'static [ValueType] {
use ValueType::*;
match self {
Self::VirtualMachineStack { value_type, .. } => *value_type,
Self::Local { value_type, .. } => *value_type,
Self::StackMemory { .. } => ValueType::I32,
// Simple numbers: 1 Roc argument => 1 Wasm argument
Self::VirtualMachineStack { value_type, .. } | Self::Local { value_type, .. } => {
match value_type {
I32 => &[I32],
I64 => &[I64],
F32 => &[F32],
F64 => &[F64],
}
}
// Stack memory values: 1 Roc argument => 0-2 Wasm arguments
Self::StackMemory { size, format, .. } => conv.stack_memory_arg_types(*size, *format),
}
}
}
@ -152,12 +165,18 @@ impl<'a> Storage<'a> {
} => {
let location = match kind {
StoredValueKind::Parameter => {
self.arg_types.push(PTR_TYPE);
StackMemoryLocation::PointerArg(next_local_id)
if *size > 0 {
self.arg_types.push(PTR_TYPE);
StackMemoryLocation::PointerArg(next_local_id)
} else {
// An argument with zero size is purely conceptual, and will not exist in Wasm.
// However we need to track the symbol, so we treat it like a local variable.
StackMemoryLocation::FrameOffset(0)
}
}
StoredValueKind::Variable => {
if self.stack_frame_pointer.is_none() {
if self.stack_frame_pointer.is_none() && *size > 0 {
self.stack_frame_pointer = Some(next_local_id);
self.local_types.push(PTR_TYPE);
}
@ -243,13 +262,20 @@ impl<'a> Storage<'a> {
}
StoredValue::StackMemory {
location, format, ..
location,
format,
size,
..
} => {
if size == 0 {
return;
}
let (local_id, offset) = location.local_and_offset(self.stack_frame_pointer);
code_builder.get_local(local_id);
if format == StackMemoryFormat::Aggregate {
if format == StackMemoryFormat::DataStructure {
if offset != 0 {
code_builder.i32_const(offset as i32);
code_builder.i32_add();
@ -269,6 +295,44 @@ impl<'a> Storage<'a> {
}
}
fn load_symbol_zig(&mut self, code_builder: &mut CodeBuilder, arg: Symbol) {
if let StoredValue::StackMemory {
location,
size,
alignment_bytes,
format: StackMemoryFormat::DataStructure,
} = self.get(&arg)
{
if *size == 0 {
// do nothing
} else if *size > 16 {
self.load_symbol_ccc(code_builder, arg);
} else {
let (local_id, offset) = location.local_and_offset(self.stack_frame_pointer);
code_builder.get_local(local_id);
let align = Align::from(*alignment_bytes);
if *size == 1 {
code_builder.i32_load8_u(align, offset);
} else if *size == 2 {
code_builder.i32_load16_u(align, offset);
} else if *size <= 4 {
code_builder.i32_load(align, offset);
} else if *size <= 8 {
code_builder.i64_load(align, offset);
} else if *size <= 12 && BUILTINS_ZIG_VERSION == ZigVersion::Zig9 {
code_builder.i64_load(align, offset);
code_builder.i32_load(align, offset + 8);
} else {
code_builder.i64_load(align, offset);
code_builder.i64_load(align, offset + 8);
}
}
} else {
self.load_symbol_ccc(code_builder, arg);
}
}
/// stack memory values are returned by pointer. e.g. a roc function
///
/// add : I128, I128 -> I128
@ -284,7 +348,11 @@ impl<'a> Storage<'a> {
StoredValue::VirtualMachineStack { .. } | StoredValue::Local { .. } => {
unreachable!("these storage types are not returned by writing to a pointer")
}
StoredValue::StackMemory { location, .. } => {
StoredValue::StackMemory { location, size, .. } => {
if size == 0 {
return;
}
let (local_id, offset) = location.local_and_offset(self.stack_frame_pointer);
code_builder.get_local(local_id);
@ -311,58 +379,58 @@ impl<'a> Storage<'a> {
}
}
/// Load symbols in a way compatible with LLVM's "fast calling convention"
/// A bug in Zig means it always uses this for Wasm even when we specify C calling convention.
/// It squashes small structs into primitive values where possible, avoiding stack memory
/// in favour of CPU registers (or VM stack values, which eventually become CPU registers).
/// We need to convert some of our structs from our internal C-like representation to work with Zig.
/// We are sticking to C ABI for better compatibility on the platform side.
pub fn load_symbols_fastcc(
/// Load symbols for a function call
pub fn load_symbols_for_call(
&mut self,
arena: &'a Bump,
code_builder: &mut CodeBuilder,
symbols: &[Symbol],
arguments: &[Symbol],
return_symbol: Symbol,
return_layout: &WasmLayout,
) {
// Note: we are not doing verify_stack_match in this case so we may generate more code.
// We would need more bookkeeping in CodeBuilder to track which representation is on the stack!
call_conv: CallConv,
) -> (Vec<'a, ValueType>, Option<ValueType>) {
let mut wasm_arg_types = Vec::with_capacity_in(arguments.len() * 2 + 1, arena);
let mut wasm_args = Vec::with_capacity_in(arguments.len() * 2 + 1, arena);
if return_layout.is_stack_memory() {
// Load the address where the return value should be written
self.load_return_address_ccc(code_builder, return_symbol);
}
let return_method = return_layout.return_method();
let return_type = match return_method {
ReturnMethod::Primitive(ty) => Some(ty),
ReturnMethod::NoReturnValue => None,
ReturnMethod::WriteToPointerArg => {
wasm_arg_types.push(PTR_TYPE);
wasm_args.push(return_symbol);
None
}
};
for sym in symbols {
if let StoredValue::StackMemory {
location,
size,
alignment_bytes,
format: StackMemoryFormat::Aggregate,
} = self.get(sym)
{
if *size == 0 {
unimplemented!("Passing zero-sized values is not implemented yet");
} else if *size > 8 {
return self.load_symbol_ccc(code_builder, *sym);
}
let (local_id, offset) = location.local_and_offset(self.stack_frame_pointer);
code_builder.get_local(local_id);
let align = Align::from(*alignment_bytes);
if *size == 1 {
code_builder.i32_load8_u(align, offset);
} else if *size == 2 {
code_builder.i32_load16_u(align, offset);
} else if *size <= 4 {
code_builder.i32_load(align, offset);
} else {
code_builder.i64_load(align, offset);
}
} else {
self.load_symbol_ccc(code_builder, *sym);
for arg in arguments {
let stored = self.symbol_storage_map.get(arg).unwrap();
let arg_types = stored.arg_types(call_conv);
wasm_arg_types.extend_from_slice(arg_types);
match arg_types.len() {
0 => {}
1 => wasm_args.push(*arg),
2 => wasm_args.extend_from_slice(&[*arg, *arg]),
n => unreachable!("Cannot have {} Wasm arguments for 1 Roc argument", n),
}
}
// If the symbols were already at the top of the stack, do nothing!
// Should be common for simple cases, due to the structure of the Mono IR
if !code_builder.verify_stack_match(&wasm_args) {
if return_method == ReturnMethod::WriteToPointerArg {
self.load_return_address_ccc(code_builder, return_symbol);
};
for arg in arguments {
match call_conv {
CallConv::C => self.load_symbol_ccc(code_builder, *arg),
CallConv::Zig => self.load_symbol_zig(code_builder, *arg),
}
}
}
(wasm_arg_types, return_type)
}
/// Generate code to copy a StoredValue to an arbitrary memory location

View File

@ -50,6 +50,15 @@ impl BlockType {
}
}
impl From<Option<ValueType>> for BlockType {
fn from(opt: Option<ValueType>) -> Self {
match opt {
Some(ty) => BlockType::Value(ty),
None => BlockType::NoResult,
}
}
}
/// A control block in our model of the VM
/// Child blocks cannot "see" values from their parent block
struct VmBlock<'a> {
@ -429,10 +438,12 @@ impl<'a> CodeBuilder<'a> {
) {
self.build_local_declarations(local_types);
if let Some(frame_ptr_id) = frame_pointer {
let aligned_size = round_up_to_alignment(frame_size, FRAME_ALIGNMENT_BYTES);
self.build_stack_frame_push(aligned_size, frame_ptr_id);
self.build_stack_frame_pop(aligned_size, frame_ptr_id);
if frame_size != 0 {
if let Some(frame_ptr_id) = frame_pointer {
let aligned_size = round_up_to_alignment(frame_size, FRAME_ALIGNMENT_BYTES);
self.build_stack_frame_push(aligned_size, frame_ptr_id);
self.build_stack_frame_pop(aligned_size, frame_ptr_id);
}
}
self.code.push(END as u8);

View File

@ -1171,7 +1171,7 @@ fn gen_order_of_arithmetic_ops_complex_float() {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-dev"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-dev", feature = "gen-wasm"))]
fn if_guard_bind_variable_false() {
assert_evals_to!(
indoc!(
@ -1190,7 +1190,7 @@ fn if_guard_bind_variable_false() {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-dev"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-dev", feature = "gen-wasm"))]
fn if_guard_bind_variable_true() {
assert_evals_to!(
indoc!(

View File

@ -996,7 +996,7 @@ fn annotation_without_body() {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-dev"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-dev", feature = "gen-wasm"))]
fn simple_closure() {
assert_evals_to!(
indoc!(
@ -2602,7 +2602,7 @@ fn hit_unresolved_type_variable() {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-dev"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-dev", feature = "gen-wasm"))]
fn pattern_match_empty_record() {
assert_evals_to!(
indoc!(

View File

@ -254,7 +254,7 @@ fn twice_record_access() {
);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-dev"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-dev", feature = "gen-wasm"))]
fn empty_record() {
assert_evals_to!(
indoc!(

View File

@ -141,6 +141,16 @@ where
}
}
impl Wasm32TestResult for () {
fn build_wrapper_body(code_builder: &mut CodeBuilder, main_function_index: u32) {
// Main's symbol index is the same as its function index, since the first symbols we created were for procs
let main_symbol_index = main_function_index;
code_builder.call(main_function_index, main_symbol_index, 0, false);
code_builder.get_global(0);
code_builder.build_fn_header(&[], 0, None);
}
}
impl<T, U> Wasm32TestResult for (T, U)
where
T: Wasm32TestResult + FromWasm32Memory,