Merge pull request #5336 from roc-lang/dev-backend-list-map

dev backend: many more builtins
This commit is contained in:
Folkert de Vries 2023-05-01 10:20:15 +02:00 committed by GitHub
commit a5a91d428f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 900 additions and 373 deletions

View File

@ -73,6 +73,9 @@ comptime {
exportNumFn(num.bytesToU64C, "bytes_to_u64");
exportNumFn(num.bytesToU128C, "bytes_to_u128");
exportNumFn(num.shiftRightZeroFillI128, "shift_right_zero_fill.i128");
exportNumFn(num.shiftRightZeroFillU128, "shift_right_zero_fill.u128");
inline for (INTEGERS) |T, i| {
num.exportPow(T, ROC_BUILTINS ++ "." ++ NUM ++ ".pow_int.");
num.exportDivCeil(T, ROC_BUILTINS ++ "." ++ NUM ++ ".div_ceil.");
@ -91,6 +94,9 @@ comptime {
num.exportMulWithOverflow(T, WIDEINTS[i], ROC_BUILTINS ++ "." ++ NUM ++ ".mul_with_overflow.");
num.exportMulOrPanic(T, WIDEINTS[i], ROC_BUILTINS ++ "." ++ NUM ++ ".mul_or_panic.");
num.exportMulSaturatedInt(T, WIDEINTS[i], ROC_BUILTINS ++ "." ++ NUM ++ ".mul_saturated.");
num.exportMulWrappedInt(T, ROC_BUILTINS ++ "." ++ NUM ++ ".mul_wrapped.");
num.exportIsMultipleOf(T, ROC_BUILTINS ++ "." ++ NUM ++ ".is_multiple_of.");
num.exportCountLeadingZeroBits(T, ROC_BUILTINS ++ "." ++ NUM ++ ".count_leading_zero_bits.");
num.exportCountTrailingZeroBits(T, ROC_BUILTINS ++ "." ++ NUM ++ ".count_trailing_zero_bits.");

View File

@ -254,6 +254,30 @@ fn bytesToU128(arg: RocList, position: usize) u128 {
return @bitCast(u128, [_]u8{ bytes[position], bytes[position + 1], bytes[position + 2], bytes[position + 3], bytes[position + 4], bytes[position + 5], bytes[position + 6], bytes[position + 7], bytes[position + 8], bytes[position + 9], bytes[position + 10], bytes[position + 11], bytes[position + 12], bytes[position + 13], bytes[position + 14], bytes[position + 15] });
}
fn isMultipleOf(comptime T: type, lhs: T, rhs: T) bool {
if (rhs == 0 or rhs == -1) {
// lhs is a multiple of rhs iff
//
// - rhs == -1
// - both rhs and lhs are 0
//
// the -1 case is important for overflow reasons `isize::MIN % -1` crashes in rust
return (rhs == -1) or (lhs == 0);
} else {
const rem = @mod(lhs, rhs);
return rem == 0;
}
}
pub fn exportIsMultipleOf(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T, other: T) callconv(.C) bool {
return @call(.{ .modifier = always_inline }, isMultipleOf, .{ T, self, other });
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
fn addWithOverflow(comptime T: type, self: T, other: T) WithOverflow(T) {
switch (@typeInfo(T)) {
.Int => {
@ -464,6 +488,31 @@ pub fn exportMulSaturatedInt(comptime T: type, comptime W: type, comptime name:
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn exportMulWrappedInt(comptime T: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T, other: T) callconv(.C) T {
return self *% other;
}
}.func;
@export(f, .{ .name = name ++ @typeName(T), .linkage = .Strong });
}
pub fn shiftRightZeroFillI128(self: i128, other: u8) callconv(.C) i128 {
if (other & 0b1000_0000 > 0) {
return 0;
} else {
return self >> @intCast(u7, other);
}
}
pub fn shiftRightZeroFillU128(self: u128, other: u8) callconv(.C) u128 {
if (other & 0b1000_0000 > 0) {
return 0;
} else {
return self >> @intCast(u7, other);
}
}
pub fn exportMulOrPanic(comptime T: type, comptime W: type, comptime name: []const u8) void {
comptime var f = struct {
fn func(self: T, other: T) callconv(.C) T {

View File

@ -107,15 +107,16 @@ pub fn memcpy(dst: [*]u8, src: [*]u8, size: usize) void {
// indirection because otherwise zig creates an alias to the panic function which our LLVM code
// does not know how to deal with
pub fn test_panic(c_ptr: *anyopaque, alignment: u32) callconv(.C) void {
pub fn test_panic(c_ptr: *anyopaque, crash_tag: u32) callconv(.C) void {
_ = c_ptr;
_ = alignment;
// const cstr = @ptrCast([*:0]u8, c_ptr);
_ = crash_tag;
// const stderr = std.io.getStdErr().writer();
// stderr.print("Roc panicked: {s}!\n", .{cstr}) catch unreachable;
// std.c.exit(1);
// const cstr = @ptrCast([*:0]u8, c_ptr);
//
// const stderr = std.io.getStdErr().writer();
// stderr.print("Roc panicked: {s}!\n", .{cstr}) catch unreachable;
//
// std.c.exit(1);
}
pub const Inc = fn (?[*]u8) callconv(.C) void;

View File

@ -285,10 +285,16 @@ pub const NUM_SUB_CHECKED_FLOAT: IntrinsicName =
pub const NUM_MUL_OR_PANIC_INT: IntrinsicName = int_intrinsic!("roc_builtins.num.mul_or_panic");
pub const NUM_MUL_SATURATED_INT: IntrinsicName = int_intrinsic!("roc_builtins.num.mul_saturated");
pub const NUM_MUL_WRAP_INT: IntrinsicName = int_intrinsic!("roc_builtins.num.mul_wrapped");
pub const NUM_MUL_CHECKED_INT: IntrinsicName = int_intrinsic!("roc_builtins.num.mul_with_overflow");
pub const NUM_MUL_CHECKED_FLOAT: IntrinsicName =
float_intrinsic!("roc_builtins.num.mul_with_overflow");
pub const NUM_IS_MULTIPLE_OF: IntrinsicName = int_intrinsic!("roc_builtins.num.is_multiple_of");
pub const NUM_SHIFT_RIGHT_ZERO_FILL: IntrinsicName =
int_intrinsic!("roc_builtins.num.shift_right_zero_fill");
pub const NUM_COUNT_LEADING_ZERO_BITS: IntrinsicName =
int_intrinsic!("roc_builtins.num.count_leading_zero_bits");
pub const NUM_COUNT_TRAILING_ZERO_BITS: IntrinsicName =

View File

@ -581,6 +581,32 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
udiv_reg64_reg64_reg64(buf, dst, src1, src2);
}
fn irem_reg64_reg64_reg64<'a, ASM, CC>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<'a, '_, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
) where
ASM: Assembler<AArch64GeneralReg, AArch64FloatReg>,
CC: CallConv<AArch64GeneralReg, AArch64FloatReg, ASM>,
{
todo!()
}
fn urem_reg64_reg64_reg64<'a, ASM, CC>(
_buf: &mut Vec<'a, u8>,
_storage_manager: &mut StorageManager<'a, '_, AArch64GeneralReg, AArch64FloatReg, ASM, CC>,
_dst: AArch64GeneralReg,
_src1: AArch64GeneralReg,
_src2: AArch64GeneralReg,
) where
ASM: Assembler<AArch64GeneralReg, AArch64FloatReg>,
CC: CallConv<AArch64GeneralReg, AArch64FloatReg, ASM>,
{
todo!()
}
#[inline(always)]
fn mul_freg32_freg32_freg32(
buf: &mut Vec<'_, u8>,
@ -725,8 +751,28 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
fmov_freg_freg(buf, FloatWidth::F64, dst, src);
}
#[inline(always)]
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, src: AArch64GeneralReg) {
mov_reg64_reg64(buf, dst, src);
fn mov_reg_reg(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: AArch64GeneralReg,
src: AArch64GeneralReg,
) {
match register_width {
RegisterWidth::W8 => todo!(),
RegisterWidth::W16 => todo!(),
RegisterWidth::W32 => todo!(),
RegisterWidth::W64 => mov_reg64_reg64(buf, dst, src),
}
}
#[inline(always)]
fn movsx_reg_reg(
_buf: &mut Vec<'_, u8>,
_input_width: RegisterWidth,
_dst: AArch64GeneralReg,
_src: AArch64GeneralReg,
) {
todo!("move with sign extension");
}
#[inline(always)]
@ -944,14 +990,26 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
todo!("saving floating point reg to stack for AArch64");
}
#[inline(always)]
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: AArch64GeneralReg) {
if offset < 0 {
todo!("negative stack offsets for AArch64");
} else if offset < (0xFFF << 8) {
debug_assert!(offset % 8 == 0);
str_reg64_reg64_imm12(buf, src, AArch64GeneralReg::ZRSP, (offset as u16) >> 3);
} else {
todo!("stack offsets over 32k for AArch64");
fn mov_stack32_reg(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
offset: i32,
src: AArch64GeneralReg,
) {
match register_width {
RegisterWidth::W8 => todo!(),
RegisterWidth::W16 => todo!(),
RegisterWidth::W32 => todo!(),
RegisterWidth::W64 => {
if offset < 0 {
todo!("negative stack offsets for AArch64");
} else if offset < (0xFFF << 8) {
debug_assert!(offset % 8 == 0);
str_reg64_reg64_imm12(buf, src, AArch64GeneralReg::ZRSP, (offset as u16) >> 3);
} else {
todo!("stack offsets over 32k for AArch64");
}
}
}
}
#[inline(always)]
@ -985,7 +1043,7 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
}
#[inline(always)]
fn eq_reg64_reg64_reg64(
fn eq_reg_reg_reg(
buf: &mut Vec<'_, u8>,
_register_width: RegisterWidth,
dst: AArch64GeneralReg,

View File

@ -37,6 +37,18 @@ pub enum RegisterWidth {
W64,
}
impl RegisterWidth {
fn try_from_layout(layout: InLayout) -> Option<Self> {
match layout {
Layout::BOOL | Layout::I8 | Layout::U8 => Some(RegisterWidth::W8),
Layout::I16 | Layout::U16 => Some(RegisterWidth::W16),
Layout::U32 | Layout::I32 => Some(RegisterWidth::W32),
Layout::I64 | Layout::U64 => Some(RegisterWidth::W64),
_ => None,
}
}
}
pub trait CallConv<GeneralReg: RegTrait, FloatReg: RegTrait, ASM: Assembler<GeneralReg, FloatReg>>:
Sized + Copy
{
@ -256,7 +268,33 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
);
fn mov_reg64_imm64(buf: &mut Vec<'_, u8>, dst: GeneralReg, imm: i64);
fn mov_freg64_freg64(buf: &mut Vec<'_, u8>, dst: FloatReg, src: FloatReg);
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: GeneralReg);
fn mov_reg_reg(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: GeneralReg,
src: GeneralReg,
);
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: GeneralReg) {
Self::mov_reg_reg(buf, RegisterWidth::W64, dst, src);
}
fn mov_reg32_reg32(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: GeneralReg) {
Self::mov_reg_reg(buf, RegisterWidth::W32, dst, src);
}
fn mov_reg16_reg16(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: GeneralReg) {
Self::mov_reg_reg(buf, RegisterWidth::W16, dst, src);
}
fn mov_reg8_reg8(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: GeneralReg) {
Self::mov_reg_reg(buf, RegisterWidth::W8, dst, src);
}
// move with sign extension
fn movsx_reg_reg(
buf: &mut Vec<'_, u8>,
input_width: RegisterWidth,
dst: GeneralReg,
src: GeneralReg,
);
// base32 is similar to stack based instructions but they reference the base/frame pointer.
fn mov_freg64_base32(buf: &mut Vec<'_, u8>, dst: FloatReg, offset: i32);
@ -332,7 +370,26 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
fn mov_freg64_stack32(buf: &mut Vec<'_, u8>, dst: FloatReg, offset: i32);
fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32);
fn mov_stack32_freg64(buf: &mut Vec<'_, u8>, offset: i32, src: FloatReg);
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg);
fn mov_stack32_reg(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
offset: i32,
src: GeneralReg,
);
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg) {
Self::mov_stack32_reg(buf, RegisterWidth::W64, offset, src)
}
fn mov_stack32_reg32(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg) {
Self::mov_stack32_reg(buf, RegisterWidth::W32, offset, src)
}
fn mov_stack32_reg16(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg) {
Self::mov_stack32_reg(buf, RegisterWidth::W16, offset, src)
}
fn mov_stack32_reg8(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg) {
Self::mov_stack32_reg(buf, RegisterWidth::W8, offset, src)
}
fn sqrt_freg64_freg64(buf: &mut Vec<'_, u8>, dst: FloatReg, src: FloatReg);
fn sqrt_freg32_freg32(buf: &mut Vec<'_, u8>, dst: FloatReg, src: FloatReg);
@ -387,6 +444,7 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
) where
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn udiv_reg64_reg64_reg64<'a, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, '_, GeneralReg, FloatReg, ASM, CC>,
@ -397,6 +455,26 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn irem_reg64_reg64_reg64<'a, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, '_, GeneralReg, FloatReg, ASM, CC>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
) where
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn urem_reg64_reg64_reg64<'a, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, '_, GeneralReg, FloatReg, ASM, CC>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
) where
ASM: Assembler<GeneralReg, FloatReg>,
CC: CallConv<GeneralReg, FloatReg, ASM>;
fn sub_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GeneralReg, src1: GeneralReg, imm32: i32);
fn sub_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
@ -405,7 +483,7 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
src2: GeneralReg,
);
fn eq_reg64_reg64_reg64(
fn eq_reg_reg_reg(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: GeneralReg,
@ -413,6 +491,15 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait>: Sized + Copy {
src2: GeneralReg,
);
fn eq_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
dst: GeneralReg,
src1: GeneralReg,
src2: GeneralReg,
) {
Self::eq_reg_reg_reg(buf, RegisterWidth::W64, dst, src1, src2)
}
fn neq_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
@ -779,13 +866,22 @@ impl<
// move return value to dst.
match *ret_layout {
single_register_integers!() => {
let width = RegisterWidth::try_from_layout(*ret_layout).unwrap();
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
ASM::mov_reg64_reg64(&mut self.buf, dst_reg, CC::GENERAL_RETURN_REGS[0]);
ASM::mov_reg_reg(&mut self.buf, width, dst_reg, CC::GENERAL_RETURN_REGS[0]);
}
single_register_floats!() => {
let dst_reg = self.storage_manager.claim_float_reg(&mut self.buf, dst);
ASM::mov_freg64_freg64(&mut self.buf, dst_reg, CC::FLOAT_RETURN_REGS[0]);
}
Layout::I128 | Layout::U128 => {
let offset = self.storage_manager.claim_stack_area(dst, 16);
ASM::mov_base32_reg64(&mut self.buf, offset, CC::GENERAL_RETURN_REGS[0]);
ASM::mov_base32_reg64(&mut self.buf, offset + 8, CC::GENERAL_RETURN_REGS[1]);
}
other => {
//
match self.layout_interner.get(other) {
@ -1093,6 +1189,21 @@ impl<
src2_reg,
);
}
Layout::Builtin(Builtin::Int(IntWidth::I128 | IntWidth::U128)) => {
let int_width = match *layout {
Layout::I128 => IntWidth::I128,
Layout::U128 => IntWidth::U128,
_ => unreachable!(),
};
self.build_fn_call(
dst,
bitcode::NUM_MUL_WRAP_INT[int_width].to_string(),
&[*src1, *src2],
&[*layout, *layout],
layout,
);
}
Layout::Builtin(Builtin::Float(FloatWidth::F64)) => {
let dst_reg = self.storage_manager.claim_float_reg(&mut self.buf, dst);
let src1_reg = self.storage_manager.load_to_float_reg(&mut self.buf, src1);
@ -1165,6 +1276,50 @@ impl<
}
}
fn build_num_rem(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>) {
match self.layout_interner.get(*layout) {
Layout::Builtin(Builtin::Int(
IntWidth::I64 | IntWidth::I32 | IntWidth::I16 | IntWidth::I8,
)) => {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
let src1_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src1);
let src2_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src2);
ASM::irem_reg64_reg64_reg64(
&mut self.buf,
&mut self.storage_manager,
dst_reg,
src1_reg,
src2_reg,
);
}
Layout::Builtin(Builtin::Int(
IntWidth::U64 | IntWidth::U32 | IntWidth::U16 | IntWidth::U8,
)) => {
let dst_reg = self.storage_manager.claim_general_reg(&mut self.buf, dst);
let src1_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src1);
let src2_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src2);
ASM::urem_reg64_reg64_reg64(
&mut self.buf,
&mut self.storage_manager,
dst_reg,
src1_reg,
src2_reg,
);
}
x => todo!("NumDiv: layout, {:?}", x),
}
}
fn build_num_neg(&mut self, dst: &Symbol, src: &Symbol, layout: &InLayout<'a>) {
match self.layout_interner.get(*layout) {
Layout::Builtin(Builtin::Int(IntWidth::I64 | IntWidth::U64)) => {
@ -1222,7 +1377,43 @@ impl<
let src2_reg = self
.storage_manager
.load_to_general_reg(&mut self.buf, src2);
ASM::eq_reg64_reg64_reg64(&mut self.buf, width, dst_reg, src1_reg, src2_reg);
ASM::eq_reg_reg_reg(&mut self.buf, width, dst_reg, src1_reg, src2_reg);
}
Layout::U128 | Layout::I128 => {
let buf = &mut self.buf;
let dst_reg = self.storage_manager.claim_general_reg(buf, dst);
// put the arguments on the stack
let (src1_offset, _) = self.storage_manager.stack_offset_and_size(src1);
let (src2_offset, _) = self.storage_manager.stack_offset_and_size(src2);
let tmp1 = self
.storage_manager
.claim_general_reg(buf, &Symbol::DEV_TMP);
let tmp2 = self
.storage_manager
.claim_general_reg(buf, &Symbol::DEV_TMP2);
// move the upper 8 bytes of both arguments into a register
ASM::mov_reg64_base32(buf, tmp1, src1_offset);
ASM::mov_reg64_base32(buf, tmp2, src2_offset);
// store the result in our destination
ASM::eq_reg64_reg64_reg64(buf, dst_reg, tmp1, tmp2);
// move the lower 8 bytes of both arguments into a register
ASM::mov_reg64_base32(buf, tmp1, src1_offset + 8);
ASM::mov_reg64_base32(buf, tmp2, src2_offset + 8);
// store the result in tmp1
ASM::eq_reg64_reg64_reg64(buf, tmp1, tmp1, tmp2);
// now and dst and tmp1, storing the result in dst
ASM::and_reg64_reg64_reg64(buf, dst_reg, dst_reg, tmp1);
self.storage_manager.free_symbol(&Symbol::DEV_TMP);
self.storage_manager.free_symbol(&Symbol::DEV_TMP2);
}
Layout::F32 => todo!("NumEq: layout, {:?}", self.layout_interner.dbg(Layout::F32)),
Layout::F64 => todo!("NumEq: layout, {:?}", self.layout_interner.dbg(Layout::F64)),
@ -1245,7 +1436,7 @@ impl<
let width = RegisterWidth::W8; // we're comparing booleans
let dst_reg = self.storage_manager.load_to_general_reg(&mut self.buf, dst);
ASM::eq_reg64_reg64_reg64(&mut self.buf, width, dst_reg, dst_reg, tmp_reg);
ASM::eq_reg_reg_reg(&mut self.buf, width, dst_reg, dst_reg, tmp_reg);
}
other => {
let ident_ids = self
@ -1513,7 +1704,7 @@ impl<
self.caller_procs.push(caller_proc);
let inc_n_data = Symbol::DEV_TMP5;
let inc_n_data = Symbol::DEV_TMP;
self.build_fn_pointer(&inc_n_data, inc_n_data_string);
self.build_fn_pointer(&caller, caller_string);
@ -1539,7 +1730,7 @@ impl<
}
self.load_literal(
&Symbol::DEV_TMP3,
&Symbol::DEV_TMP2,
&Layout::BOOL,
&Literal::Bool(higher_order.passed_function.owns_captured_environment),
);
@ -1558,7 +1749,7 @@ impl<
caller,
data,
inc_n_data,
Symbol::DEV_TMP3,
Symbol::DEV_TMP2,
alignment,
old_element_width,
new_element_width,
@ -1584,26 +1775,26 @@ impl<
.claim_stack_area(dst, self.layout_interner.stack_size(ret_layout));
self.build_fn_call(
&Symbol::DEV_TMP4,
&Symbol::DEV_TMP3,
bitcode::LIST_MAP.to_string(),
&arguments,
&layouts,
&ret_layout,
);
self.free_symbol(&Symbol::DEV_TMP3);
self.free_symbol(&Symbol::DEV_TMP5);
self.free_symbol(&Symbol::DEV_TMP);
self.free_symbol(&Symbol::DEV_TMP2);
// Return list value from fn call
self.storage_manager.copy_symbol_to_stack_offset(
self.layout_interner,
&mut self.buf,
base_offset,
&Symbol::DEV_TMP4,
&Symbol::DEV_TMP3,
&ret_layout,
);
self.free_symbol(&Symbol::DEV_TMP4);
self.free_symbol(&Symbol::DEV_TMP3);
}
HigherOrder::ListMap2 { .. } => todo!(),
HigherOrder::ListMap3 { .. } => todo!(),
@ -2360,7 +2551,7 @@ impl<
ASM::mov_reg64_imm64(&mut self.buf, reg, i128::from_ne_bytes(val) as i64);
}
(
Literal::Int(bytes),
Literal::Int(bytes) | Literal::U128(bytes),
Layout::Builtin(Builtin::Int(IntWidth::I128 | IntWidth::U128)),
) => {
self.storage_manager.with_tmp_general_reg(
@ -2459,7 +2650,7 @@ impl<
self.create_array(sym, &Layout::U8, elements.into_bump_slice())
}
}
x => todo!("loading literal, {:?}", x),
_ => todo!("loading literal {:?} with layout {:?}", lit, layout),
}
}
@ -2685,7 +2876,21 @@ impl<
let buf = &mut self.buf;
match int_width {
IntWidth::U128 | IntWidth::I128 => todo!(),
IntWidth::U128 | IntWidth::I128 => {
let layout = match int_width {
IntWidth::I128 => Layout::I128,
IntWidth::U128 => Layout::U128,
_ => unreachable!(),
};
self.build_fn_call(
dst,
bitcode::NUM_SHIFT_RIGHT_ZERO_FILL[int_width].to_string(),
&[*src1, *src2],
&[layout, layout],
&layout,
);
}
_ => {
let dst_reg = self.storage_manager.claim_general_reg(buf, dst);
let src1_reg = self.storage_manager.load_to_general_reg(buf, src1);
@ -2721,18 +2926,95 @@ impl<
source: IntWidth,
target: IntWidth,
) {
use IntWidth::*;
let buf = &mut self.buf;
match (source, target) {
(U128, U64) => {
let dst_reg = self.storage_manager.claim_general_reg(buf, dst);
let (offset, _size) = self.storage_manager.stack_offset_and_size(src);
ASM::mov_reg64_base32(buf, dst_reg, offset + 8);
return;
}
(U64, U128) => {
let src_reg = self.storage_manager.load_to_general_reg(buf, src);
let base_offset = self.storage_manager.claim_stack_area(dst, 16);
let tmp = Symbol::DEV_TMP;
let tmp_reg = self.storage_manager.claim_general_reg(buf, &tmp);
// move a zero into the lower 8 bytes
ASM::mov_reg64_imm64(buf, tmp_reg, 0x0);
ASM::mov_base32_reg64(buf, base_offset, tmp_reg);
ASM::mov_base32_reg64(buf, base_offset + 8, src_reg);
return;
}
_ => {}
}
let dst_reg = self.storage_manager.claim_general_reg(buf, dst);
let src_reg = self.storage_manager.load_to_general_reg(buf, src);
if source.stack_size() == target.stack_size() {
match source.stack_size() {
8 => ASM::mov_reg64_reg64(buf, dst_reg, src_reg),
4 => ASM::mov_reg32_reg32(buf, dst_reg, src_reg),
2 => ASM::mov_reg16_reg16(buf, dst_reg, src_reg),
1 => ASM::mov_reg8_reg8(buf, dst_reg, src_reg),
_ => todo!("int cast from {source:?} to {target:?}"),
}
} else {
todo!("int cast from {source:?} to {target:?}");
match (source, target) {
// -- CASTING UP --
(I8 | U8, U16 | U32 | U64) => {
// zero out the register
ASM::xor_reg64_reg64_reg64(buf, dst_reg, dst_reg, dst_reg);
// move the 8-bit integer
ASM::mov_reg_reg(buf, RegisterWidth::W8, dst_reg, src_reg);
}
(U16, U32 | U64) => {
// zero out the register
ASM::xor_reg64_reg64_reg64(buf, dst_reg, dst_reg, dst_reg);
// move the 16-bit integer
ASM::mov_reg_reg(buf, RegisterWidth::W16, dst_reg, src_reg);
}
(U32, U64) => {
// zero out the register
ASM::xor_reg64_reg64_reg64(buf, dst_reg, dst_reg, dst_reg);
// move the 32-bit integer
ASM::mov_reg_reg(buf, RegisterWidth::W32, dst_reg, src_reg);
}
(I8, I16 | I32 | I64) => {
ASM::movsx_reg_reg(buf, RegisterWidth::W8, dst_reg, src_reg)
}
(I16, I32 | I64) => ASM::movsx_reg_reg(buf, RegisterWidth::W16, dst_reg, src_reg),
(I32, I64) => ASM::movsx_reg_reg(buf, RegisterWidth::W32, dst_reg, src_reg),
// -- CASTING DOWN --
(U64 | I64, I32 | U32) => {
// move as a 32-bit integer (leaving any other bits behind)
ASM::mov_reg_reg(buf, RegisterWidth::W32, dst_reg, src_reg);
}
(U64 | I64 | U32 | I32, I16 | U16) => {
// move as a 16-bit integer (leaving any other bits behind)
ASM::mov_reg_reg(buf, RegisterWidth::W16, dst_reg, src_reg);
}
(U64 | I64 | U32 | I32 | U16 | I16, I8 | U8) => {
// move as an 8-bit integer (leaving any other bits behind)
ASM::mov_reg_reg(buf, RegisterWidth::W8, dst_reg, src_reg);
}
_ => todo!("int cast from {source:?} to {target:?}"),
}
}
}
}
@ -2853,21 +3135,16 @@ impl<
if size - copied >= 8 {
for _ in (0..(size - copied)).step_by(8) {
ASM::mov_reg64_mem64_offset32(buf, tmp_reg, ptr_reg, copied);
ASM::mov_base32_reg64(buf, base_offset, tmp_reg);
ASM::mov_base32_reg64(buf, base_offset + copied, tmp_reg);
copied += 8;
}
}
if size - copied > 0 {
panic!("value only partially copied");
}
/*
if size - copied >= 4 {
for _ in (0..(size - copied)).step_by(4) {
ASM::mov_reg32_base32(buf, reg, from_offset + copied);
ASM::mov_base32_reg32(buf, to_offset + copied, reg);
ASM::mov_reg32_mem32_offset32(buf, tmp_reg, ptr_reg, copied);
ASM::mov_base32_reg32(buf, base_offset + copied, tmp_reg);
copied += 4;
}
@ -2875,8 +3152,8 @@ impl<
if size - copied >= 2 {
for _ in (0..(size - copied)).step_by(2) {
ASM::mov_reg16_base32(buf, reg, from_offset + copied);
ASM::mov_base32_reg16(buf, to_offset + copied, reg);
ASM::mov_reg16_mem16_offset32(buf, tmp_reg, ptr_reg, copied);
ASM::mov_base32_reg16(buf, base_offset + copied, tmp_reg);
copied += 2;
}
@ -2884,13 +3161,12 @@ impl<
if size - copied >= 1 {
for _ in (0..(size - copied)).step_by(1) {
ASM::mov_reg8_base32(buf, reg, from_offset + copied);
ASM::mov_base32_reg8(buf, to_offset + copied, reg);
ASM::mov_reg8_mem8_offset32(buf, tmp_reg, ptr_reg, copied);
ASM::mov_base32_reg8(buf, base_offset + copied, tmp_reg);
copied += 1;
}
}
*/
}
fn ptr_read(
@ -2959,6 +3235,15 @@ impl<
});
}
Layout::Union(UnionLayout::NonRecursive(_)) => {
// put it on the stack
let stack_size = layout_interner.stack_size(element_in_layout);
storage_manager.with_tmp_general_reg(buf, |storage_manager, buf, tmp_reg| {
Self::unbox_to_stack(buf, storage_manager, dst, stack_size, ptr_reg, tmp_reg);
});
}
_ => todo!("unboxing of {:?}", layout_interner.dbg(element_in_layout)),
}
}

View File

@ -852,15 +852,10 @@ impl<
)
}
_ if layout_interner.stack_size(*layout) == 0 => {}
// TODO: Verify this is always true.
// The dev backend does not deal with refcounting and does not care about if data is safe to memcpy.
// It is just temporarily storing the value due to needing to free registers.
// Later, it will be reloaded and stored in refcounted as needed.
_ if layout_interner.stack_size(*layout) > 8 => {
Layout::Struct { .. } | Layout::Union(UnionLayout::NonRecursive(_)) => {
let (from_offset, size) = self.stack_offset_and_size(sym);
debug_assert_eq!(from_offset % 8, 0);
debug_assert_eq!(size % 8, 0);
debug_assert_eq!(size, layout_interner.stack_size(*layout));
self.copy_to_stack_offset(buf, size, from_offset, to_offset)
}
x => todo!("copying data to the stack with layout, {:?}", x),

View File

@ -4,10 +4,10 @@ use crate::{
single_register_layouts, Relocation,
};
use bumpalo::collections::Vec;
use roc_builtins::bitcode::FloatWidth;
use roc_builtins::bitcode::{FloatWidth, IntWidth};
use roc_error_macros::internal_error;
use roc_module::symbol::Symbol;
use roc_mono::layout::{InLayout, Layout, LayoutInterner, STLayoutInterner, UnionLayout};
use roc_mono::layout::{Builtin, InLayout, Layout, LayoutInterner, STLayoutInterner, UnionLayout};
use super::{CompareOperation, RegisterWidth};
@ -458,6 +458,30 @@ impl X64_64SystemVStoreArgs {
match in_layout {
single_register_integers!() => self.store_arg_general(buf, storage_manager, sym),
single_register_floats!() => self.store_arg_float(buf, storage_manager, sym),
Layout::I128 | Layout::U128 => {
let (offset, _) = storage_manager.stack_offset_and_size(&sym);
if self.general_i + 1 < Self::GENERAL_PARAM_REGS.len() {
let reg1 = Self::GENERAL_PARAM_REGS[self.general_i];
let reg2 = Self::GENERAL_PARAM_REGS[self.general_i + 1];
X86_64Assembler::mov_reg64_base32(buf, reg1, offset);
X86_64Assembler::mov_reg64_base32(buf, reg2, offset + 8);
self.general_i += 2;
} else {
// Copy to stack using return reg as buffer.
let reg = Self::GENERAL_RETURN_REGS[0];
X86_64Assembler::mov_reg64_base32(buf, reg, offset);
X86_64Assembler::mov_stack32_reg64(buf, self.tmp_stack_offset, reg);
X86_64Assembler::mov_reg64_base32(buf, reg, offset + 8);
X86_64Assembler::mov_stack32_reg64(buf, self.tmp_stack_offset + 8, reg);
self.tmp_stack_offset += 16;
}
}
x if layout_interner.stack_size(x) == 0 => {}
x if layout_interner.stack_size(x) > 16 => {
// TODO: Double check this.
@ -512,21 +536,50 @@ impl X64_64SystemVStoreArgs {
self.tmp_stack_offset += size as i32;
}
Layout::Union(UnionLayout::NonRecursive(_)) => {
// for now, just also store this on the stack
type ASM = X86_64Assembler;
let tmp_reg = Self::GENERAL_RETURN_REGS[0];
let stack_offset = self.tmp_stack_offset as i32;
let mut copied = 0;
let (base_offset, size) = storage_manager.stack_offset_and_size(&sym);
debug_assert_eq!(base_offset % 8, 0);
for i in (0..size as i32).step_by(8) {
X86_64Assembler::mov_reg64_base32(
buf,
Self::GENERAL_RETURN_REGS[0],
base_offset + i,
);
X86_64Assembler::mov_stack32_reg64(
buf,
self.tmp_stack_offset + i,
Self::GENERAL_RETURN_REGS[0],
);
if size - copied >= 8 {
for _ in (0..(size - copied)).step_by(8) {
ASM::mov_reg64_base32(buf, tmp_reg, base_offset + copied as i32);
ASM::mov_stack32_reg64(buf, stack_offset + copied as i32, tmp_reg);
copied += 8;
}
}
if size - copied >= 4 {
for _ in (0..(size - copied)).step_by(4) {
ASM::mov_reg32_base32(buf, tmp_reg, base_offset + copied as i32);
ASM::mov_stack32_reg32(buf, stack_offset + copied as i32, tmp_reg);
copied += 4;
}
}
if size - copied >= 2 {
for _ in (0..(size - copied)).step_by(2) {
ASM::mov_reg16_base32(buf, tmp_reg, base_offset + copied as i32);
ASM::mov_stack32_reg16(buf, stack_offset + copied as i32, tmp_reg);
copied += 2;
}
}
if size - copied >= 1 {
for _ in (0..(size - copied)).step_by(1) {
ASM::mov_reg8_base32(buf, tmp_reg, base_offset + copied as i32);
ASM::mov_stack32_reg8(buf, stack_offset + copied as i32, tmp_reg);
copied += 1;
}
}
self.tmp_stack_offset += size as i32;
}
_ => {
@ -633,6 +686,10 @@ impl X64_64SystemVLoadArgs {
storage_manager.complex_stack_arg(&sym, self.argument_offset, stack_size);
self.argument_offset += stack_size as i32;
}
Layout::Builtin(Builtin::Int(IntWidth::U128 | IntWidth::I128)) => {
storage_manager.complex_stack_arg(&sym, self.argument_offset, stack_size);
self.argument_offset += stack_size as i32;
}
Layout::Union(UnionLayout::NonRecursive(_)) => {
// for now, just also store this on the stack
storage_manager.complex_stack_arg(&sym, self.argument_offset, stack_size);
@ -1322,6 +1379,46 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
mov_reg64_reg64(buf, dst, X86_64GeneralReg::RAX);
}
fn irem_reg64_reg64_reg64<'a, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, '_, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) where
ASM: Assembler<X86_64GeneralReg, X86_64FloatReg>,
CC: CallConv<X86_64GeneralReg, X86_64FloatReg, ASM>,
{
use crate::generic64::RegStorage;
storage_manager.ensure_reg_free(buf, RegStorage::General(X86_64GeneralReg::RAX));
storage_manager.ensure_reg_free(buf, RegStorage::General(X86_64GeneralReg::RDX));
mov_reg64_reg64(buf, X86_64GeneralReg::RAX, src1);
idiv_reg64_reg64(buf, src2);
mov_reg64_reg64(buf, dst, X86_64GeneralReg::RDX);
}
fn urem_reg64_reg64_reg64<'a, ASM, CC>(
buf: &mut Vec<'a, u8>,
storage_manager: &mut StorageManager<'a, '_, X86_64GeneralReg, X86_64FloatReg, ASM, CC>,
dst: X86_64GeneralReg,
src1: X86_64GeneralReg,
src2: X86_64GeneralReg,
) where
ASM: Assembler<X86_64GeneralReg, X86_64FloatReg>,
CC: CallConv<X86_64GeneralReg, X86_64FloatReg, ASM>,
{
use crate::generic64::RegStorage;
storage_manager.ensure_reg_free(buf, RegStorage::General(X86_64GeneralReg::RAX));
storage_manager.ensure_reg_free(buf, RegStorage::General(X86_64GeneralReg::RDX));
mov_reg64_reg64(buf, X86_64GeneralReg::RAX, src1);
udiv_reg64_reg64(buf, src2);
mov_reg64_reg64(buf, dst, X86_64GeneralReg::RDX);
}
#[inline(always)]
fn jmp_imm32(buf: &mut Vec<'_, u8>, offset: i32) -> usize {
jmp_imm32(buf, offset);
@ -1385,8 +1482,22 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
movsd_freg64_freg64(buf, dst, src);
}
#[inline(always)]
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) {
mov_reg64_reg64(buf, dst, src);
fn mov_reg_reg(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: X86_64GeneralReg,
src: X86_64GeneralReg,
) {
mov_reg_reg(buf, register_width, dst, src);
}
#[inline(always)]
fn movsx_reg_reg(
buf: &mut Vec<'_, u8>,
input_width: RegisterWidth,
dst: X86_64GeneralReg,
src: X86_64GeneralReg,
) {
raw_movsx_reg_reg(buf, input_width, dst, src);
}
#[inline(always)]
@ -1558,8 +1669,13 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
movsd_base64_offset32_freg64(buf, X86_64GeneralReg::RSP, offset, src)
}
#[inline(always)]
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: X86_64GeneralReg) {
mov_base64_offset32_reg64(buf, X86_64GeneralReg::RSP, offset, src)
fn mov_stack32_reg(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
offset: i32,
src: X86_64GeneralReg,
) {
mov_base_offset32_reg(buf, register_width, X86_64GeneralReg::RSP, offset, src)
}
#[inline(always)]
@ -1590,7 +1706,7 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
}
#[inline(always)]
fn eq_reg64_reg64_reg64(
fn eq_reg_reg_reg(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: X86_64GeneralReg,
@ -1858,6 +1974,34 @@ fn add_reg_extension<T: RegTrait>(reg: T, byte: u8) -> u8 {
}
}
#[inline(always)]
fn binop_reg8_reg8(op_code: u8, buf: &mut Vec<u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) {
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = src as u8 % 8;
if dst_high || src_high {
let rex = add_rm_extension(dst, REX);
let rex = add_reg_extension(src, rex);
buf.extend([rex, op_code, 0xC0 | dst_mod | (src_mod << 3)])
} else {
let rex_prefix = [
X86_64GeneralReg::RBP,
X86_64GeneralReg::RSP,
X86_64GeneralReg::RSI,
X86_64GeneralReg::RDI,
];
if rex_prefix.contains(&src) || rex_prefix.contains(&dst) {
buf.push(0x40);
}
buf.extend([op_code, 0xC0 | dst_mod | (src_mod << 3)]);
}
}
#[inline(always)]
fn binop_reg16_reg16(
op_code: u8,
@ -2401,24 +2545,89 @@ fn lea_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg) {
])
}
/// `MOV r/m64,r64` -> Move r64 to r/m64.
/// This will not generate anything if dst and src are the same.
#[inline(always)]
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) {
if dst != src {
raw_mov_reg64_reg64(buf, dst, src);
fn raw_mov_reg_reg(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: X86_64GeneralReg,
src: X86_64GeneralReg,
) {
match register_width {
RegisterWidth::W8 => binop_reg8_reg8(0x88, buf, dst, src),
RegisterWidth::W16 => binop_reg16_reg16(0x89, buf, dst, src),
RegisterWidth::W32 => binop_reg32_reg32(0x89, buf, dst, src),
RegisterWidth::W64 => binop_reg64_reg64(0x89, buf, dst, src),
}
}
#[allow(unused)]
fn raw_movsx_reg_reg(
buf: &mut Vec<u8>,
input_width: RegisterWidth,
dst: X86_64GeneralReg,
src: X86_64GeneralReg,
) {
let dst_high = dst as u8 > 7;
let dst_mod = dst as u8 % 8;
let src_high = src as u8 > 7;
let src_mod = src as u8 % 8;
// NOTE src and dst seem to be flipped here. It works this way though
let mod_rm = 0xC0 | (dst_mod << 3) | src_mod;
let rex = add_rm_extension(src, REX_W);
let rex = add_reg_extension(dst, rex);
match input_width {
RegisterWidth::W8 => {
buf.extend([rex, 0x0f, 0xbe, mod_rm]);
}
RegisterWidth::W16 => {
buf.extend([rex, 0x0f, 0xbf, mod_rm]);
}
RegisterWidth::W32 => {
buf.extend([rex, 0x63, mod_rm]);
}
RegisterWidth::W64 => { /* do nothing */ }
}
}
/// `MOV r/m64,r64` -> Move r64 to r/m64.
/// This will always generate the move. It is used for verification.
/// This will not generate anything if dst and src are the same.
#[inline(always)]
fn raw_mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) {
binop_reg64_reg64(0x89, buf, dst, src);
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) {
mov_reg_reg(buf, RegisterWidth::W64, dst, src)
}
#[inline(always)]
fn mov_reg_reg(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
dst: X86_64GeneralReg,
src: X86_64GeneralReg,
) {
if dst != src {
raw_mov_reg_reg(buf, register_width, dst, src);
}
}
// The following base and stack based operations could be optimized based on how many bytes the offset actually is.
#[inline(always)]
fn mov_base_offset32_reg(
buf: &mut Vec<'_, u8>,
register_width: RegisterWidth,
base: X86_64GeneralReg,
offset: i32,
src: X86_64GeneralReg,
) {
match register_width {
RegisterWidth::W8 => mov_base16_offset32_reg16(buf, base, offset, src),
RegisterWidth::W16 => mov_base16_offset32_reg16(buf, base, offset, src),
RegisterWidth::W32 => mov_base32_offset32_reg32(buf, base, offset, src),
RegisterWidth::W64 => mov_base64_offset32_reg64(buf, base, offset, src),
}
}
/// `MOV r/m64,r64` -> Move r64 to r/m64, where m64 references a base + offset.
#[inline(always)]
fn mov_base64_offset32_reg64(
@ -3051,8 +3260,8 @@ mod tests {
X86_64GeneralReg::RDX => "edx",
X86_64GeneralReg::RBP => "ebp",
X86_64GeneralReg::RSP => "esp",
X86_64GeneralReg::RDI => "edi",
X86_64GeneralReg::RSI => "esi",
X86_64GeneralReg::RDI => "edi",
X86_64GeneralReg::R8 => "r8d",
X86_64GeneralReg::R9 => "r9d",
X86_64GeneralReg::R10 => "r10d",
@ -3073,8 +3282,8 @@ mod tests {
X86_64GeneralReg::RDX => "dx",
X86_64GeneralReg::RBP => "bp",
X86_64GeneralReg::RSP => "sp",
X86_64GeneralReg::RDI => "di",
X86_64GeneralReg::RSI => "si",
X86_64GeneralReg::RDI => "di",
X86_64GeneralReg::R8 => "r8w",
X86_64GeneralReg::R9 => "r9w",
X86_64GeneralReg::R10 => "r10w",
@ -3095,8 +3304,9 @@ mod tests {
X86_64GeneralReg::RDX => "dl",
X86_64GeneralReg::RBP => "bpl",
X86_64GeneralReg::RSP => "spl",
X86_64GeneralReg::RDI => "dil",
X86_64GeneralReg::RSI => "sil",
X86_64GeneralReg::RDI => "dil",
X86_64GeneralReg::R8 => "r8b",
X86_64GeneralReg::R9 => "r9b",
X86_64GeneralReg::R10 => "r10b",
@ -3111,6 +3321,13 @@ mod tests {
const TEST_I32: i32 = 0x12345678;
const TEST_I64: i64 = 0x1234_5678_9ABC_DEF0;
const ALL_REGISTER_WIDTHS: &[RegisterWidth] = &[
RegisterWidth::W8,
RegisterWidth::W16,
RegisterWidth::W32,
RegisterWidth::W64,
];
const ALL_GENERAL_REGS: &[X86_64GeneralReg] = &[
X86_64GeneralReg::RAX,
X86_64GeneralReg::RBX,
@ -3118,8 +3335,8 @@ mod tests {
X86_64GeneralReg::RDX,
X86_64GeneralReg::RBP,
X86_64GeneralReg::RSP,
X86_64GeneralReg::RDI,
X86_64GeneralReg::RSI,
X86_64GeneralReg::RDI,
X86_64GeneralReg::R8,
X86_64GeneralReg::R9,
X86_64GeneralReg::R10,
@ -3434,8 +3651,58 @@ mod tests {
#[test]
fn test_mov_reg64_reg64() {
disassembler_test!(
raw_mov_reg64_reg64,
|reg1, reg2| format!("mov {}, {}", reg1, reg2),
raw_mov_reg_reg,
|w, reg1, reg2| {
match w {
RegisterWidth::W8 => format!(
"mov {}, {}",
X86_64GeneralReg::low_8bits_string(&reg1),
X86_64GeneralReg::low_8bits_string(&reg2)
),
RegisterWidth::W16 => format!(
"mov {}, {}",
X86_64GeneralReg::low_16bits_string(&reg1),
X86_64GeneralReg::low_16bits_string(&reg2)
),
RegisterWidth::W32 => format!(
"mov {}, {}",
X86_64GeneralReg::low_32bits_string(&reg1),
X86_64GeneralReg::low_32bits_string(&reg2)
),
RegisterWidth::W64 => format!("mov {}, {}", reg1, reg2),
}
},
ALL_REGISTER_WIDTHS,
ALL_GENERAL_REGS,
ALL_GENERAL_REGS
);
}
#[test]
fn test_movsx_reg64_reg64() {
disassembler_test!(
raw_movsx_reg_reg,
|w, reg1, reg2| {
match w {
RegisterWidth::W8 => format!(
"movsx {}, {}",
reg1,
X86_64GeneralReg::low_8bits_string(&reg2)
),
RegisterWidth::W16 => format!(
"movsx {}, {}",
reg1,
X86_64GeneralReg::low_16bits_string(&reg2)
),
RegisterWidth::W32 => format!(
"movsxd {}, {}",
reg1,
X86_64GeneralReg::low_32bits_string(&reg2)
),
RegisterWidth::W64 => String::new(),
}
},
ALL_REGISTER_WIDTHS,
ALL_GENERAL_REGS,
ALL_GENERAL_REGS
);

View File

@ -14,8 +14,8 @@ use roc_module::low_level::{LowLevel, LowLevelWrapperType};
use roc_module::symbol::{Interns, ModuleId, Symbol};
use roc_mono::code_gen_help::{CallerProc, CodeGenHelp};
use roc_mono::ir::{
BranchInfo, CallType, Expr, HigherOrderLowLevel, JoinPointId, ListLiteralElement, Literal,
Param, Proc, ProcLayout, SelfRecursive, Stmt,
BranchInfo, CallType, CrashTag, Expr, HigherOrderLowLevel, JoinPointId, ListLiteralElement,
Literal, Param, Proc, ProcLayout, SelfRecursive, Stmt,
};
use roc_mono::layout::{
Builtin, InLayout, Layout, LayoutIds, LayoutInterner, STLayoutInterner, TagIdIntType,
@ -279,9 +279,33 @@ trait Backend<'a> {
self.build_jump(id, args, arg_layouts.into_bump_slice(), ret_layout);
self.free_symbols(stmt);
}
Stmt::Crash(msg, crash_tag) => self.roc_panic(*msg, *crash_tag),
x => todo!("the statement, {:?}", x),
}
}
fn roc_panic(&mut self, msg: Symbol, crash_tag: CrashTag) {
self.load_literal(
&Symbol::DEV_TMP,
&Layout::U32,
&Literal::Int((crash_tag as u128).to_ne_bytes()),
);
// Now that the arguments are needed, load them if they are literals.
let arguments = &[msg, Symbol::DEV_TMP];
self.load_literal_symbols(arguments);
self.build_fn_call(
&Symbol::DEV_TMP2,
String::from("roc_panic"),
arguments,
&[Layout::STR, Layout::U32],
&Layout::UNIT,
);
self.free_symbol(&Symbol::DEV_TMP);
self.free_symbol(&Symbol::DEV_TMP2);
}
// build_switch generates a instructions for a switch statement.
fn build_switch(
&mut self,
@ -546,22 +570,8 @@ trait Backend<'a> {
arg_layouts,
ret_layout,
),
LowLevel::NumMul => {
debug_assert_eq!(
2,
args.len(),
"NumMul: expected to have exactly two argument"
);
debug_assert_eq!(
arg_layouts[0], arg_layouts[1],
"NumMul: expected all arguments of to have the same layout"
);
debug_assert_eq!(
arg_layouts[0], *ret_layout,
"NumMul: expected to have the same argument and return layout"
);
self.build_num_mul(sym, &args[0], &args[1], ret_layout)
}
LowLevel::NumMul => self.build_num_mul(sym, &args[0], &args[1], ret_layout),
LowLevel::NumMulWrap => self.build_num_mul(sym, &args[0], &args[1], ret_layout),
LowLevel::NumDivTruncUnchecked | LowLevel::NumDivFrac => {
debug_assert_eq!(
2,
@ -578,6 +588,8 @@ trait Backend<'a> {
);
self.build_num_div(sym, &args[0], &args[1], ret_layout)
}
LowLevel::NumRemUnchecked => self.build_num_rem(sym, &args[0], &args[1], ret_layout),
LowLevel::NumNeg => {
debug_assert_eq!(
1,
@ -1174,6 +1186,11 @@ trait Backend<'a> {
self.build_num_int_cast(sym, &args[0], source_width, target_width)
}
LowLevel::NumIsMultipleOf => {
let int_width = arg_layouts[0].try_int_width().unwrap();
let intrinsic = bitcode::NUM_IS_MULTIPLE_OF[int_width].to_string();
self.build_fn_call(sym, intrinsic, args, arg_layouts, ret_layout);
}
x => todo!("low level, {:?}", x),
}
}
@ -1328,6 +1345,9 @@ trait Backend<'a> {
/// build_num_mul stores `src1 / src2` into dst.
fn build_num_div(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>);
/// build_num_mul stores `src1 % src2` into dst.
fn build_num_rem(&mut self, dst: &Symbol, src1: &Symbol, src2: &Symbol, layout: &InLayout<'a>);
/// build_num_neg stores the negated value of src into dst.
fn build_num_neg(&mut self, dst: &Symbol, src: &Symbol, layout: &InLayout<'a>);
@ -1788,7 +1808,9 @@ trait Backend<'a> {
Stmt::Expect { .. } => todo!("expect is not implemented in the dev backend"),
Stmt::ExpectFx { .. } => todo!("expect-fx is not implemented in the dev backend"),
Stmt::Crash(..) => todo!("crash is not implemented in the dev backend"),
Stmt::Crash(msg, _crash_tag) => {
self.set_last_seen(*msg, stmt);
}
}
}

View File

@ -1014,8 +1014,7 @@ impl<'a> LowLevelCall<'a> {
NumMulWrap => match self.ret_layout_raw {
Layout::Builtin(Builtin::Int(width)) => match width {
IntWidth::I128 | IntWidth::U128 => {
// TODO: don't panic
self.load_args_and_call_zig(backend, &bitcode::NUM_MUL_OR_PANIC_INT[width])
self.load_args_and_call_zig(backend, &bitcode::NUM_MUL_WRAP_INT[width])
}
IntWidth::I64 | IntWidth::U64 => {
self.load_args(backend);

View File

@ -54,11 +54,11 @@ pub fn eq_generic<'a>(
Stmt::Let(
Symbol::BOOL_TRUE,
Expr::Literal(Literal::Int(1i128.to_ne_bytes())),
Expr::Literal(Literal::Bool(true)),
LAYOUT_BOOL,
root.arena.alloc(Stmt::Let(
Symbol::BOOL_FALSE,
Expr::Literal(Literal::Int(0i128.to_ne_bytes())),
Expr::Literal(Literal::Bool(false)),
LAYOUT_BOOL,
root.arena.alloc(main_body),
)),

View File

@ -1176,29 +1176,21 @@ fn gen_div_checked_by_zero_i64() {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn gen_rem_i64() {
assert_evals_to!(
indoc!(
r#"
Num.rem 8 3
"#
),
2,
i64
);
assert_evals_to!("Num.rem 8 3", 2, i64);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn gen_rem_checked_div_by_zero_i64() {
assert_evals_to!(
indoc!(
r#"
when Num.remChecked 8 0 is
Err DivByZero -> 4
Ok _ -> -23
"#
when Num.remChecked 8 0 is
Err DivByZero -> 4
Ok _ -> -23
"#
),
4,
i64
@ -1978,17 +1970,15 @@ fn float_negative_mul_overflow() {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn int_mul_wrap() {
assert_evals_to!(
indoc!(
r#"
Num.mulWrap Num.maxI64 2
"#
),
-2,
i64
);
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn int_mul_wrap_i64() {
assert_evals_to!("Num.mulWrap Num.maxI64 2", -2, i64);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn int_mul_wrap_i128() {
assert_evals_to!("Num.mulWrap Num.maxI128 2", -2, i128);
}
#[test]
@ -2107,7 +2097,7 @@ fn shift_right_zf_by() {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn shift_right_cast_i8() {
// FIXME (Brian) Something funny happening with 8-bit binary literals in tests
@ -2141,311 +2131,135 @@ fn shift_right_cast_i8() {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn min_i128() {
assert_evals_to!(
indoc!(
r#"
Num.minI128
"#
),
i128::MIN,
i128
);
assert_evals_to!("Num.minI128", i128::MIN, i128);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn max_i128() {
assert_evals_to!(
indoc!(
r#"
Num.maxI128
"#
),
i128::MAX,
i128
);
assert_evals_to!("Num.maxI128", i128::MAX, i128);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn min_i64() {
assert_evals_to!(
indoc!(
r#"
Num.minI64
"#
),
i64::MIN,
i64
);
assert_evals_to!("Num.minI64", i64::MIN, i64);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn max_i64() {
assert_evals_to!(
indoc!(
r#"
Num.maxI64
"#
),
i64::MAX,
i64
);
assert_evals_to!("Num.maxI64", i64::MAX, i64);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn min_u64() {
assert_evals_to!(
indoc!(
r#"
Num.minU64
"#
),
u64::MIN,
u64
);
assert_evals_to!("Num.minU64", u64::MIN, u64);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn max_u64() {
assert_evals_to!(
indoc!(
r#"
Num.maxU64
"#
),
u64::MAX,
u64
);
assert_evals_to!("Num.maxU64", u64::MAX, u64);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn min_i32() {
assert_evals_to!(
indoc!(
r#"
Num.minI32
"#
),
i32::MIN,
i32
);
assert_evals_to!("Num.minI32", i32::MIN, i32);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn max_i32() {
assert_evals_to!(
indoc!(
r#"
Num.maxI32
"#
),
i32::MAX,
i32
);
assert_evals_to!("Num.maxI32", i32::MAX, i32);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn min_u32() {
assert_evals_to!(
indoc!(
r#"
Num.minU32
"#
),
u32::MIN,
u32
);
assert_evals_to!("Num.minU32", u32::MIN, u32);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn max_u32() {
assert_evals_to!(
indoc!(
r#"
Num.maxU32
"#
),
u32::MAX,
u32
);
assert_evals_to!("Num.maxU32", u32::MAX, u32);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn min_i16() {
assert_evals_to!(
indoc!(
r#"
Num.minI16
"#
),
i16::MIN,
i16
);
assert_evals_to!("Num.minI16", i16::MIN, i16);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn max_i16() {
assert_evals_to!(
indoc!(
r#"
Num.maxI16
"#
),
i16::MAX,
i16
);
assert_evals_to!("Num.maxI16", i16::MAX, i16);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn min_u16() {
assert_evals_to!(
indoc!(
r#"
Num.minU16
"#
),
u16::MIN,
u16
);
assert_evals_to!("Num.minU16", u16::MIN, u16);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn max_u16() {
assert_evals_to!(
indoc!(
r#"
Num.maxU16
"#
),
u16::MAX,
u16
);
assert_evals_to!("Num.maxU16", u16::MAX, u16);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn min_i8() {
assert_evals_to!(
indoc!(
r#"
Num.minI8
"#
),
i8::MIN,
i8
);
assert_evals_to!("Num.minI8", i8::MIN, i8);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn max_i8() {
assert_evals_to!(
indoc!(
r#"
Num.maxI8
"#
),
i8::MAX,
i8
);
assert_evals_to!("Num.maxI8", i8::MAX, i8);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn min_u8() {
assert_evals_to!(
indoc!(
r#"
Num.minU8
"#
),
u8::MIN,
u8
);
assert_evals_to!("Num.minU8", u8::MIN, u8);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn max_u8() {
assert_evals_to!(
indoc!(
r#"
Num.maxU8
"#
),
u8::MAX,
u8
);
assert_evals_to!("Num.maxU8", u8::MAX, u8);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn max_f64() {
assert_evals_to!(
indoc!(
r#"
Num.maxF64
"#
),
f64::MAX,
f64
);
assert_evals_to!("Num.maxF64", f64::MAX, f64);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn min_f64() {
assert_evals_to!(
indoc!(
r#"
Num.minF64
"#
),
f64::MIN,
f64
);
assert_evals_to!("Num.minF64", f64::MIN, f64);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn max_f32() {
assert_evals_to!(
indoc!(
r#"
Num.maxF32
"#
),
f32::MAX,
f32
);
assert_evals_to!("Num.maxF32", f32::MAX, f32);
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
fn min_f32() {
assert_evals_to!(
indoc!(
r#"
Num.minF32
"#
),
f32::MIN,
f32
);
assert_evals_to!("Num.minF32", f32::MIN, f32);
}
#[test]
@ -2458,7 +2272,7 @@ fn to_nat_truncate_wraps() {
macro_rules! num_conversion_tests {
($($fn:expr, $typ:ty, ($($test_name:ident, $input:expr, $output:expr $(, [$($support_gen:literal),*])? )*))*) => {$($(
#[test]
#[cfg(any(feature = "gen-llvm", $($(feature = $support_gen)*)?))]
#[cfg(any(feature = "gen-llvm", $($(feature = $support_gen,)*)?))]
fn $test_name() {
let input = format!("{} {}", $fn, $input);
assert_evals_to!(&input, $output, $typ)
@ -2468,25 +2282,31 @@ macro_rules! num_conversion_tests {
num_conversion_tests! {
"Num.toI8", i8, (
to_i8_same_width, "15u8", 15, ["gen-wasm"]
to_i8_truncate, "115i32", 115, ["gen-wasm"]
to_i8_truncate_wraps, "500i32", -12, ["gen-wasm"]
to_i8_same_width, "15u8", 15, ["gen-wasm", "gen-dev"]
to_i8_truncate, "115i32", 115, ["gen-wasm", "gen-dev"]
to_i8_truncate_wraps, "500i32", -12, ["gen-wasm", "gen-dev"]
)
"Num.toI16", i16, (
to_i16_same_width, "15u16", 15, ["gen-wasm"]
to_i16_extend, "15i8", 15, ["gen-wasm"]
to_i16_truncate, "115i32", 115, ["gen-wasm"]
to_i16_truncate_wraps, "60000i32", -5536, ["gen-wasm"]
to_i16_same_width, "15u16", 15, ["gen-wasm", "gen-dev"]
to_i16_extend, "15i8", 15, ["gen-wasm", "gen-dev"]
to_i16_sign_extend_i8, "-15i8", -15, ["gen-wasm", "gen-dev"]
to_i16_truncate, "115i32", 115, ["gen-wasm", "gen-dev"]
to_i16_truncate_wraps, "60000i32", -5536, ["gen-wasm", "gen-dev"]
)
"Num.toI32", i32, (
to_i32_same_width, "15u32", 15, ["gen-wasm"]
to_i32_extend, "15i8", 15, ["gen-wasm"]
to_i32_truncate, "115i64", 115, ["gen-wasm"]
to_i32_truncate_wraps, "5000000000i64", 705032704, ["gen-wasm"]
to_i32_same_width, "15u32", 15, ["gen-wasm", "gen-dev"]
to_i32_extend, "15i8", 15, ["gen-wasm", "gen-dev"]
to_i32_sign_extend_i8, "-15i8", -15, ["gen-wasm", "gen-dev"]
to_i32_sign_extend_i16, "-15i16", -15, ["gen-wasm", "gen-dev"]
to_i32_truncate, "115i64", 115, ["gen-wasm", "gen-dev"]
to_i32_truncate_wraps, "5000000000i64", 705032704, ["gen-wasm", "gen-dev"]
)
"Num.toI64", i64, (
to_i64_same_width, "15u64", 15, ["gen-wasm"]
to_i64_extend, "15i8", 15, ["gen-wasm"]
to_i64_same_width, "15u64", 15, ["gen-wasm", "gen-dev"]
to_i64_extend, "15i8", 15, ["gen-wasm", "gen-dev"]
to_i64_sign_extend_i8, "-15i8", -15, ["gen-wasm", "gen-dev"]
to_i64_sign_extend_i16, "-15i16", -15, ["gen-wasm", "gen-dev"]
to_i64_sign_extend_i32, "-15i32", -15, ["gen-wasm", "gen-dev"]
to_i64_truncate, "115i128", 115
to_i64_truncate_wraps, "10_000_000_000_000_000_000i128", -8446744073709551616
)
@ -2495,25 +2315,25 @@ num_conversion_tests! {
to_i128_extend, "15i8", 15
)
"Num.toU8", u8, (
to_u8_same_width, "15i8", 15, ["gen-wasm"]
to_u8_truncate, "115i32", 115, ["gen-wasm"]
to_u8_truncate_wraps, "500i32", 244, ["gen-wasm"]
to_u8_same_width, "15i8", 15, ["gen-wasm", "gen-dev"]
to_u8_truncate, "115i32", 115, ["gen-wasm", "gen-dev"]
to_u8_truncate_wraps, "500i32", 244, ["gen-wasm", "gen-dev"]
)
"Num.toU16", u16, (
to_u16_same_width, "15i16", 15, ["gen-wasm"]
to_u16_extend, "15i8", 15, ["gen-wasm"]
to_u16_truncate, "115i32", 115, ["gen-wasm"]
to_u16_truncate_wraps, "600000000i32", 17920, ["gen-wasm"]
to_u16_same_width, "15i16", 15, ["gen-wasm", "gen-dev"]
to_u16_extend, "15i8", 15, ["gen-wasm", "gen-dev"]
to_u16_truncate, "115i32", 115, ["gen-wasm", "gen-dev"]
to_u16_truncate_wraps, "600000000i32", 17920, ["gen-wasm", "gen-dev"]
)
"Num.toU32", u32, (
to_u32_same_width, "15i32", 15, ["gen-wasm"]
to_u32_extend, "15i8", 15, ["gen-wasm"]
to_u32_truncate, "115i64", 115, ["gen-wasm"]
to_u32_truncate_wraps, "5000000000000000000i64", 1156841472, ["gen-wasm"]
to_u32_same_width, "15i32", 15, ["gen-wasm", "gen-dev"]
to_u32_extend, "15i8", 15, ["gen-wasm", "gen-dev"]
to_u32_truncate, "115i64", 115, ["gen-wasm", "gen-dev"]
to_u32_truncate_wraps, "5000000000000000000i64", 1156841472, ["gen-wasm", "gen-dev"]
)
"Num.toU64", u64, (
to_u64_same_width, "15i64", 15, ["gen-wasm"]
to_u64_extend, "15i8", 15, ["gen-wasm"]
to_u64_same_width, "15i64", 15, ["gen-wasm", "gen-dev"]
to_u64_extend, "15i8", 15, ["gen-wasm", "gen-dev"]
to_u64_truncate, "115i128", 115
to_u64_truncate_wraps, "10_000_000_000_000_000_000_000i128", 1864712049423024128
)
@ -2522,8 +2342,8 @@ num_conversion_tests! {
to_u128_extend, "15i8", 15
)
"Num.toNat", usize, (
to_nat_same_width, "15i64", 15, ["gen-wasm"]
to_nat_extend, "15i8", 15, ["gen-wasm"]
to_nat_same_width, "15i64", 15, ["gen-wasm", "gen-dev"]
to_nat_extend, "15i8", 15, ["gen-wasm", "gen-dev"]
to_nat_truncate, "115i128", 115
)
"Num.toF32", f32, (
@ -2704,7 +2524,7 @@ to_int_checked_tests! {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn is_multiple_of_signed() {
// true
assert_evals_to!("Num.isMultipleOf 5 1", true, bool);
@ -3710,7 +3530,7 @@ fn to_float_f64() {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
// https://github.com/roc-lang/roc/issues/2696
fn upcast_of_int_is_zext() {
assert_evals_to!(
@ -3922,7 +3742,7 @@ fn when_on_decimals() {
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm"))]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn when_on_i128() {
assert_evals_to!(
indoc!(

View File

@ -3288,6 +3288,25 @@ fn box_and_unbox_big_string() {
)
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn box_and_unbox_nonrecursive_tag() {
assert_evals_to!(
indoc!(
r#"
result : Result U64 U64
result = Ok 42
result
|> Box.box
|> Box.unbox
"#
),
roc_std::RocResult::ok(42),
roc_std::RocResult<u64, u64>
)
}
#[test]
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
fn box_num() {