mirror of
https://github.com/roc-lang/roc.git
synced 2024-11-11 05:34:11 +03:00
Pass layout to literal loading in dev backend
This commit is contained in:
parent
7174e25007
commit
54861ef5fa
@ -359,6 +359,15 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
|
||||
unimplemented!("jump not equal instructions not yet implemented for AArch64");
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn mov_freg32_imm32(
|
||||
_buf: &mut Vec<'_, u8>,
|
||||
_relocs: &mut Vec<'_, Relocation>,
|
||||
_dst: AArch64FloatReg,
|
||||
_imm: f32,
|
||||
) {
|
||||
unimplemented!("loading f32 literal not yet implemented for AArch64");
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mov_freg64_imm64(
|
||||
_buf: &mut Vec<'_, u8>,
|
||||
@ -366,7 +375,7 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
|
||||
_dst: AArch64FloatReg,
|
||||
_imm: f64,
|
||||
) {
|
||||
unimplemented!("loading float literal not yet implemented for AArch64");
|
||||
unimplemented!("loading f64 literal not yet implemented for AArch64");
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mov_reg64_imm64(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, imm: i64) {
|
||||
|
@ -130,6 +130,12 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait> {
|
||||
offset: i32,
|
||||
) -> usize;
|
||||
|
||||
fn mov_freg32_imm32(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
relocs: &mut Vec<'_, Relocation>,
|
||||
dst: FloatReg,
|
||||
imm: f32,
|
||||
);
|
||||
fn mov_freg64_imm64(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
relocs: &mut Vec<'_, Relocation>,
|
||||
@ -244,7 +250,7 @@ pub struct Backend64Bit<
|
||||
free_map: MutMap<*const Stmt<'a>, Vec<'a, Symbol>>,
|
||||
|
||||
symbol_storage_map: MutMap<Symbol, SymbolStorage<GeneralReg, FloatReg>>,
|
||||
literal_map: MutMap<Symbol, Literal<'a>>,
|
||||
literal_map: MutMap<Symbol, (Literal<'a>, Layout<'a>)>,
|
||||
join_map: MutMap<JoinPointId, u64>,
|
||||
|
||||
// This should probably be smarter than a vec.
|
||||
@ -331,7 +337,7 @@ impl<
|
||||
.extend_from_slice(CC::FLOAT_DEFAULT_FREE_REGS);
|
||||
}
|
||||
|
||||
fn literal_map(&mut self) -> &mut MutMap<Symbol, Literal<'a>> {
|
||||
fn literal_map(&mut self) -> &mut MutMap<Symbol, (Literal<'a>, Layout<'a>)> {
|
||||
&mut self.literal_map
|
||||
}
|
||||
|
||||
@ -977,19 +983,36 @@ impl<
|
||||
}
|
||||
}
|
||||
|
||||
fn load_literal(&mut self, sym: &Symbol, lit: &Literal<'a>) {
|
||||
match lit {
|
||||
Literal::Int(x) => {
|
||||
fn load_literal(&mut self, sym: &Symbol, layout: &Layout<'a>, lit: &Literal<'a>) {
|
||||
match (lit, layout) {
|
||||
(
|
||||
Literal::Int(x),
|
||||
Layout::Builtin(Builtin::Int(
|
||||
IntWidth::U8
|
||||
| IntWidth::U16
|
||||
| IntWidth::U32
|
||||
| IntWidth::U64
|
||||
| IntWidth::I8
|
||||
| IntWidth::I16
|
||||
| IntWidth::I32
|
||||
| IntWidth::I64,
|
||||
)),
|
||||
) => {
|
||||
let reg = self.claim_general_reg(sym);
|
||||
let val = *x;
|
||||
ASM::mov_reg64_imm64(&mut self.buf, reg, val as i64);
|
||||
}
|
||||
Literal::Float(x) => {
|
||||
(Literal::Float(x), Layout::Builtin(Builtin::Float(FloatWidth::F64))) => {
|
||||
let reg = self.claim_float_reg(sym);
|
||||
let val = *x;
|
||||
ASM::mov_freg64_imm64(&mut self.buf, &mut self.relocs, reg, val);
|
||||
}
|
||||
Literal::Str(x) if x.len() < 16 => {
|
||||
(Literal::Float(x), Layout::Builtin(Builtin::Float(FloatWidth::F32))) => {
|
||||
let reg = self.claim_float_reg(sym);
|
||||
let val = *x as f32;
|
||||
ASM::mov_freg32_imm32(&mut self.buf, &mut self.relocs, reg, val);
|
||||
}
|
||||
(Literal::Str(x), Layout::Builtin(Builtin::Str)) if x.len() < 16 => {
|
||||
// Load small string.
|
||||
let reg = self.get_tmp_general_reg();
|
||||
|
||||
|
@ -963,6 +963,19 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
|
||||
buf.len()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn mov_freg32_imm32(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
relocs: &mut Vec<'_, Relocation>,
|
||||
dst: X86_64FloatReg,
|
||||
imm: f32,
|
||||
) {
|
||||
movss_freg32_rip_offset32(buf, dst, 0);
|
||||
relocs.push(Relocation::LocalData {
|
||||
offset: buf.len() as u64 - 4,
|
||||
data: imm.to_le_bytes().to_vec(),
|
||||
});
|
||||
}
|
||||
#[inline(always)]
|
||||
fn mov_freg64_imm64(
|
||||
buf: &mut Vec<'_, u8>,
|
||||
@ -1439,6 +1452,20 @@ fn movsd_freg64_freg64(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, src: X86_64Fl
|
||||
}
|
||||
}
|
||||
|
||||
// `MOVSS xmm, m32` -> Load scalar single-precision floating-point value from m32 to xmm register.
|
||||
#[inline(always)]
|
||||
fn movss_freg32_rip_offset32(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, offset: u32) {
|
||||
let dst_mod = dst as u8 % 8;
|
||||
if dst as u8 > 7 {
|
||||
buf.reserve(9);
|
||||
buf.extend(&[0xF3, 0x44, 0x0F, 0x10, 0x05 + (dst_mod << 3)]);
|
||||
} else {
|
||||
buf.reserve(8);
|
||||
buf.extend(&[0xF3, 0x0F, 0x10, 0x05 + (dst_mod << 3)]);
|
||||
}
|
||||
buf.extend(&offset.to_le_bytes());
|
||||
}
|
||||
|
||||
// `MOVSD xmm, m64` -> Load scalar double-precision floating-point value from m64 to xmm register.
|
||||
#[inline(always)]
|
||||
fn movsd_freg64_rip_offset32(buf: &mut Vec<'_, u8>, dst: X86_64FloatReg, offset: u32) {
|
||||
@ -2145,6 +2172,27 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_movss_freg32_rip_offset32() {
|
||||
let arena = bumpalo::Bump::new();
|
||||
let mut buf = bumpalo::vec![in &arena];
|
||||
for ((dst, offset), expected) in &[
|
||||
(
|
||||
(X86_64FloatReg::XMM0, TEST_I32),
|
||||
vec![0xF3, 0x0F, 0x10, 0x05],
|
||||
),
|
||||
(
|
||||
(X86_64FloatReg::XMM15, TEST_I32),
|
||||
vec![0xF3, 0x44, 0x0F, 0x10, 0x3D],
|
||||
),
|
||||
] {
|
||||
buf.clear();
|
||||
movss_freg32_rip_offset32(&mut buf, *dst, *offset as u32);
|
||||
assert_eq!(&expected[..], &buf[..(buf.len() - 4)]);
|
||||
assert_eq!(TEST_I32.to_le_bytes(), &buf[(buf.len() - 4)..]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_movsd_freg64_rip_offset32() {
|
||||
let arena = bumpalo::Bump::new();
|
||||
|
@ -196,9 +196,9 @@ where
|
||||
match expr {
|
||||
Expr::Literal(lit) => {
|
||||
if self.env().lazy_literals {
|
||||
self.literal_map().insert(*sym, *lit);
|
||||
self.literal_map().insert(*sym, (*lit, *layout));
|
||||
} else {
|
||||
self.load_literal(sym, lit);
|
||||
self.load_literal(sym, layout, lit);
|
||||
}
|
||||
}
|
||||
Expr::Call(roc_mono::ir::Call {
|
||||
@ -494,7 +494,7 @@ where
|
||||
"NumIsZero: expected to have return layout of type Bool"
|
||||
);
|
||||
|
||||
self.load_literal(&Symbol::DEV_TMP, &Literal::Int(0));
|
||||
self.load_literal(&Symbol::DEV_TMP, &arg_layouts[0], &Literal::Int(0));
|
||||
self.build_eq(sym, &args[0], &Symbol::DEV_TMP, &arg_layouts[0]);
|
||||
self.free_symbol(&Symbol::DEV_TMP)
|
||||
}
|
||||
@ -546,19 +546,22 @@ where
|
||||
ret_layout: &Layout<'a>,
|
||||
);
|
||||
|
||||
/// literal_map gets the map from symbol to literal, used for lazy loading and literal folding.
|
||||
fn literal_map(&mut self) -> &mut MutMap<Symbol, Literal<'a>>;
|
||||
/// literal_map gets the map from symbol to literal and layout, used for lazy loading and literal folding.
|
||||
fn literal_map(&mut self) -> &mut MutMap<Symbol, (Literal<'a>, Layout<'a>)>;
|
||||
|
||||
fn load_literal_symbols(&mut self, syms: &[Symbol]) {
|
||||
if self.env().lazy_literals {
|
||||
for sym in syms {
|
||||
if let Some(lit) = self.literal_map().remove(sym) {
|
||||
self.load_literal(sym, &lit);
|
||||
if let Some((lit, layout)) = self.literal_map().remove(sym) {
|
||||
self.load_literal(sym, &layout, &lit);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// load_literal sets a symbol to be equal to a literal.
|
||||
fn load_literal(&mut self, sym: &Symbol, layout: &Layout<'a>, lit: &Literal<'a>);
|
||||
|
||||
/// create_struct creates a struct with the elements specified loaded into it as data.
|
||||
fn create_struct(&mut self, sym: &Symbol, layout: &Layout<'a>, fields: &'a [Symbol]);
|
||||
|
||||
@ -571,9 +574,6 @@ where
|
||||
field_layouts: &'a [Layout<'a>],
|
||||
);
|
||||
|
||||
/// load_literal sets a symbol to be equal to a literal.
|
||||
fn load_literal(&mut self, sym: &Symbol, lit: &Literal<'a>);
|
||||
|
||||
/// return_symbol moves a symbol to the correct return location for the backend and adds a jump to the end of the function.
|
||||
fn return_symbol(&mut self, sym: &Symbol, layout: &Layout<'a>);
|
||||
|
||||
|
@ -1309,7 +1309,7 @@ fn num_to_float() {
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(any(feature = "gen-dev"))]
|
||||
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
|
||||
fn num_to_float_f64_to_f32() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
@ -1327,25 +1327,25 @@ fn num_to_float_f64_to_f32() {
|
||||
);
|
||||
}
|
||||
|
||||
// #[test]
|
||||
// #[cfg(any(feature = "gen-dev"))]
|
||||
// fn num_to_float_f32_to_f64() {
|
||||
// assert_evals_to!(
|
||||
// indoc!(
|
||||
// r#"
|
||||
#[test]
|
||||
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
|
||||
fn num_to_float_f32_to_f64() {
|
||||
assert_evals_to!(
|
||||
indoc!(
|
||||
r#"
|
||||
|
||||
// f32 : F32
|
||||
// f32 = 9.0
|
||||
f32 : F32
|
||||
f32 = 9.0
|
||||
|
||||
// f64 : F64
|
||||
// f64 = Num.toFloat f32
|
||||
// f64
|
||||
// "#
|
||||
// ),
|
||||
// 9.0,
|
||||
// f64
|
||||
// );
|
||||
// }
|
||||
f64 : F64
|
||||
f64 = Num.toFloat f32
|
||||
f64
|
||||
"#
|
||||
),
|
||||
9.0,
|
||||
f64
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(any(feature = "gen-llvm", feature = "gen-wasm", feature = "gen-dev"))]
|
||||
|
Loading…
Reference in New Issue
Block a user