Switch to base pionter offset addressing.

This change will be needed to deal with passing function arguments.
Without this change, we would need to do multiple passes due to function
arguments being on top of the stack and conflicting with variables.
This commit is contained in:
Brendan Hansknecht 2021-02-12 16:19:31 -08:00
parent 846c82961f
commit 5a3ec0c0ac
4 changed files with 194 additions and 114 deletions

View File

@ -136,28 +136,24 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg> for AArch64Call {
#[inline(always)]
fn setup_stack(
buf: &mut Vec<'_, u8>,
leaf_function: bool,
saved_regs: &[AArch64GeneralReg],
requested_stack_size: i32,
) -> Result<i32, String> {
// full size is upcast to i64 to make sure we don't overflow here.
let mut full_size = 8 * saved_regs.len() as i64 + requested_stack_size as i64;
if !leaf_function {
full_size += 8;
}
let alignment = if full_size <= 0 {
// Full size is upcast to i64 to make sure we don't overflow here.
let full_stack_size = requested_stack_size
.checked_add(8 * saved_regs.len() as i32 + 8) // The extra 8 is space to store the frame pointer.
.ok_or("Ran out of stack space")?;
let alignment = if full_stack_size <= 0 {
0
} else {
full_size % STACK_ALIGNMENT as i64
full_stack_size % STACK_ALIGNMENT as i32
};
let offset = if alignment == 0 {
0
} else {
STACK_ALIGNMENT - alignment as u8
};
if let Some(aligned_stack_size) =
requested_stack_size.checked_add(8 * saved_regs.len() as i32 + offset as i32)
{
if let Some(aligned_stack_size) = full_stack_size.checked_add(offset as i32) {
if aligned_stack_size > 0 {
AArch64Assembler::sub_reg64_reg64_imm32(
buf,
@ -168,12 +164,10 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg> for AArch64Call {
// All the following stores could be optimized by using `STP` to store pairs.
let mut offset = aligned_stack_size;
if !leaf_function {
offset -= 8;
AArch64Assembler::mov_stack32_reg64(buf, offset, AArch64GeneralReg::LR);
offset -= 8;
AArch64Assembler::mov_stack32_reg64(buf, offset, AArch64GeneralReg::FP);
}
offset -= 8;
AArch64Assembler::mov_stack32_reg64(buf, offset, AArch64GeneralReg::LR);
offset -= 8;
AArch64Assembler::mov_stack32_reg64(buf, offset, AArch64GeneralReg::FP);
for reg in saved_regs {
offset -= 8;
AArch64Assembler::mov_stack32_reg64(buf, offset, *reg);
@ -190,19 +184,16 @@ impl CallConv<AArch64GeneralReg, AArch64FloatReg> for AArch64Call {
#[inline(always)]
fn cleanup_stack(
buf: &mut Vec<'_, u8>,
leaf_function: bool,
saved_regs: &[AArch64GeneralReg],
aligned_stack_size: i32,
) -> Result<(), String> {
if aligned_stack_size > 0 {
// All the following stores could be optimized by using `STP` to store pairs.
let mut offset = aligned_stack_size;
if !leaf_function {
offset -= 8;
AArch64Assembler::mov_reg64_stack32(buf, AArch64GeneralReg::LR, offset);
offset -= 8;
AArch64Assembler::mov_reg64_stack32(buf, AArch64GeneralReg::FP, offset);
}
offset -= 8;
AArch64Assembler::mov_reg64_stack32(buf, AArch64GeneralReg::LR, offset);
offset -= 8;
AArch64Assembler::mov_reg64_stack32(buf, AArch64GeneralReg::FP, offset);
for reg in saved_regs {
offset -= 8;
AArch64Assembler::mov_reg64_stack32(buf, *reg, offset);
@ -305,6 +296,43 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
mov_reg64_reg64(buf, dst, src);
}
#[inline(always)]
fn mov_freg64_base32(_buf: &mut Vec<'_, u8>, _dst: AArch64FloatReg, _offset: i32) {
unimplemented!(
"loading floating point reg from base offset not yet implemented for AArch64"
);
}
#[inline(always)]
fn mov_reg64_base32(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, offset: i32) {
if offset < 0 {
unimplemented!("negative base offsets are not yet implement for AArch64");
} else if offset < (0xFFF << 8) {
debug_assert!(offset % 8 == 0);
ldr_reg64_imm12(buf, dst, AArch64GeneralReg::FP, (offset as u16) >> 3);
} else {
unimplemented!("base offsets over 32k are not yet implement for AArch64");
}
}
#[inline(always)]
fn mov_base32_freg64(_buf: &mut Vec<'_, u8>, _offset: i32, _src: AArch64FloatReg) {
unimplemented!("saving floating point reg to base offset not yet implemented for AArch64");
}
#[inline(always)]
fn mov_base32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: AArch64GeneralReg) {
if offset < 0 {
unimplemented!("negative base offsets are not yet implement for AArch64");
} else if offset < (0xFFF << 8) {
debug_assert!(offset % 8 == 0);
str_reg64_imm12(buf, src, AArch64GeneralReg::FP, (offset as u16) >> 3);
} else {
unimplemented!("base offsets over 32k are not yet implement for AArch64");
}
}
#[inline(always)]
fn mov_freg64_stack32(_buf: &mut Vec<'_, u8>, _dst: AArch64FloatReg, _offset: i32) {
unimplemented!("loading floating point reg from stack not yet implemented for AArch64");
}
#[inline(always)]
fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: AArch64GeneralReg, offset: i32) {
if offset < 0 {
@ -316,16 +344,10 @@ impl Assembler<AArch64GeneralReg, AArch64FloatReg> for AArch64Assembler {
unimplemented!("stack offsets over 32k are not yet implement for AArch64");
}
}
fn mov_freg64_stack32(_buf: &mut Vec<'_, u8>, _dst: AArch64FloatReg, _offset: i32) {
unimplemented!("loading floating point reg from stack not yet implemented for AArch64");
}
#[inline(always)]
fn mov_stack32_freg64(_buf: &mut Vec<'_, u8>, _offset: i32, _src: AArch64FloatReg) {
unimplemented!("saving floating point reg to stack not yet implemented for AArch64");
}
#[inline(always)]
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: AArch64GeneralReg) {
if offset < 0 {

View File

@ -34,13 +34,11 @@ pub trait CallConv<GeneralReg: RegTrait, FloatReg: RegTrait> {
fn setup_stack<'a>(
buf: &mut Vec<'a, u8>,
leaf_function: bool,
general_saved_regs: &[GeneralReg],
requested_stack_size: i32,
) -> Result<i32, String>;
fn cleanup_stack<'a>(
buf: &mut Vec<'a, u8>,
leaf_function: bool,
general_saved_regs: &[GeneralReg],
aligned_stack_size: i32,
) -> Result<(), String>;
@ -77,10 +75,17 @@ pub trait Assembler<GeneralReg: RegTrait, FloatReg: RegTrait> {
fn mov_reg64_imm64(buf: &mut Vec<'_, u8>, dst: GeneralReg, imm: i64);
fn mov_freg64_freg64(buf: &mut Vec<'_, u8>, dst: FloatReg, src: FloatReg);
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: GeneralReg, src: GeneralReg);
fn mov_freg64_base32(buf: &mut Vec<'_, u8>, dst: FloatReg, offset: i32);
fn mov_reg64_base32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32);
fn mov_base32_freg64(buf: &mut Vec<'_, u8>, offset: i32, src: FloatReg);
fn mov_base32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg);
fn mov_freg64_stack32(buf: &mut Vec<'_, u8>, dst: FloatReg, offset: i32);
fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: GeneralReg, offset: i32);
fn mov_stack32_freg64(buf: &mut Vec<'_, u8>, offset: i32, src: FloatReg);
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: GeneralReg);
fn sub_reg64_reg64_imm32(buf: &mut Vec<'_, u8>, dst: GeneralReg, src1: GeneralReg, imm32: i32);
fn sub_reg64_reg64_reg64(
buf: &mut Vec<'_, u8>,
@ -104,9 +109,9 @@ enum SymbolStorage<GeneralReg: RegTrait, FloatReg: RegTrait> {
// I think whenever a symbol would be used, we specify layout anyways.
GeneralReg(GeneralReg),
FloatReg(FloatReg),
Stack(i32),
StackAndGeneralReg(GeneralReg, i32),
StackAndFloatReg(FloatReg, i32),
Base(i32),
BaseAndGeneralReg(GeneralReg, i32),
BaseAndFloatReg(FloatReg, i32),
}
pub trait RegTrait: Copy + Eq + std::hash::Hash + std::fmt::Debug + 'static {}
@ -124,10 +129,6 @@ pub struct Backend64Bit<
buf: Vec<'a, u8>,
relocs: Vec<'a, Relocation>,
/// leaf_function is true if the only calls this function makes are tail calls.
/// If that is the case, we can skip emitting the frame pointer and updating the stack.
leaf_function: bool,
last_seen_map: MutMap<Symbol, *const Stmt<'a>>,
free_map: MutMap<*const Stmt<'a>, Vec<'a, Symbol>>,
symbols_map: MutMap<Symbol, SymbolStorage<GeneralReg, FloatReg>>,
@ -164,7 +165,6 @@ impl<
phantom_asm: PhantomData,
phantom_cc: PhantomData,
env,
leaf_function: true,
buf: bumpalo::vec!(in env.arena),
relocs: bumpalo::vec!(in env.arena),
last_seen_map: MutMap::default(),
@ -187,7 +187,6 @@ impl<
fn reset(&mut self) {
self.stack_size = 0;
self.leaf_function = true;
self.last_seen_map.clear();
self.free_map.clear();
self.symbols_map.clear();
@ -204,11 +203,6 @@ impl<
.extend_from_slice(CC::FLOAT_DEFAULT_FREE_REGS);
}
fn set_not_leaf_function(&mut self) {
self.leaf_function = false;
self.stack_size = CC::SHADOW_SPACE_SIZE as i32;
}
fn literal_map(&mut self) -> &mut MutMap<Symbol, Literal<'a>> {
&mut self.literal_map
}
@ -231,15 +225,14 @@ impl<
// Setup stack.
let mut used_regs = bumpalo::vec![in self.env.arena];
used_regs.extend(&self.general_used_callee_saved_regs);
let aligned_stack_size =
CC::setup_stack(&mut out, self.leaf_function, &used_regs, self.stack_size)?;
let aligned_stack_size = CC::setup_stack(&mut out, &used_regs, self.stack_size)?;
let setup_offset = out.len();
// Add function body.
out.extend(&self.buf);
// Cleanup stack.
CC::cleanup_stack(&mut out, self.leaf_function, &used_regs, aligned_stack_size)?;
CC::cleanup_stack(&mut out, &used_regs, aligned_stack_size)?;
ASM::ret(&mut out);
// Update relocs to include stack setup offset.
@ -489,19 +482,19 @@ impl<
Some(SymbolStorage::FloatReg(_reg)) => {
Err("Cannot load floating point symbol into GeneralReg".to_string())
}
Some(SymbolStorage::StackAndGeneralReg(reg, offset)) => {
Some(SymbolStorage::BaseAndGeneralReg(reg, offset)) => {
self.symbols_map
.insert(*sym, SymbolStorage::StackAndGeneralReg(reg, offset));
.insert(*sym, SymbolStorage::BaseAndGeneralReg(reg, offset));
Ok(reg)
}
Some(SymbolStorage::StackAndFloatReg(_reg, _offset)) => {
Some(SymbolStorage::BaseAndFloatReg(_reg, _offset)) => {
Err("Cannot load floating point symbol into GeneralReg".to_string())
}
Some(SymbolStorage::Stack(offset)) => {
Some(SymbolStorage::Base(offset)) => {
let reg = self.claim_general_reg(sym)?;
self.symbols_map
.insert(*sym, SymbolStorage::StackAndGeneralReg(reg, offset));
ASM::mov_reg64_stack32(&mut self.buf, reg, offset as i32);
.insert(*sym, SymbolStorage::BaseAndGeneralReg(reg, offset));
ASM::mov_reg64_base32(&mut self.buf, reg, offset as i32);
Ok(reg)
}
None => Err(format!("Unknown symbol: {}", sym)),
@ -518,19 +511,19 @@ impl<
self.symbols_map.insert(*sym, SymbolStorage::FloatReg(reg));
Ok(reg)
}
Some(SymbolStorage::StackAndGeneralReg(_reg, _offset)) => {
Some(SymbolStorage::BaseAndGeneralReg(_reg, _offset)) => {
Err("Cannot load integer point symbol into FloatReg".to_string())
}
Some(SymbolStorage::StackAndFloatReg(reg, offset)) => {
Some(SymbolStorage::BaseAndFloatReg(reg, offset)) => {
self.symbols_map
.insert(*sym, SymbolStorage::StackAndFloatReg(reg, offset));
.insert(*sym, SymbolStorage::BaseAndFloatReg(reg, offset));
Ok(reg)
}
Some(SymbolStorage::Stack(offset)) => {
Some(SymbolStorage::Base(offset)) => {
let reg = self.claim_float_reg(sym)?;
self.symbols_map
.insert(*sym, SymbolStorage::StackAndFloatReg(reg, offset));
ASM::mov_freg64_stack32(&mut self.buf, reg, offset as i32);
.insert(*sym, SymbolStorage::BaseAndFloatReg(reg, offset));
ASM::mov_freg64_base32(&mut self.buf, reg, offset as i32);
Ok(reg)
}
None => Err(format!("Unknown symbol: {}", sym)),
@ -542,26 +535,28 @@ impl<
match val {
Some(SymbolStorage::GeneralReg(reg)) => {
let offset = self.increase_stack_size(8)?;
ASM::mov_stack32_reg64(&mut self.buf, offset as i32, reg);
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
// For base addresssing, use the negative offset.
ASM::mov_base32_reg64(&mut self.buf, -offset, reg);
self.symbols_map.insert(*sym, SymbolStorage::Base(-offset));
Ok(())
}
Some(SymbolStorage::FloatReg(reg)) => {
let offset = self.increase_stack_size(8)?;
ASM::mov_stack32_freg64(&mut self.buf, offset as i32, reg);
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
// For base addresssing, use the negative offset.
ASM::mov_base32_freg64(&mut self.buf, -offset, reg);
self.symbols_map.insert(*sym, SymbolStorage::Base(-offset));
Ok(())
}
Some(SymbolStorage::StackAndGeneralReg(_, offset)) => {
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
Some(SymbolStorage::BaseAndGeneralReg(_, offset)) => {
self.symbols_map.insert(*sym, SymbolStorage::Base(offset));
Ok(())
}
Some(SymbolStorage::StackAndFloatReg(_, offset)) => {
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
Some(SymbolStorage::BaseAndFloatReg(_, offset)) => {
self.symbols_map.insert(*sym, SymbolStorage::Base(offset));
Ok(())
}
Some(SymbolStorage::Stack(offset)) => {
self.symbols_map.insert(*sym, SymbolStorage::Stack(offset));
Some(SymbolStorage::Base(offset)) => {
self.symbols_map.insert(*sym, SymbolStorage::Base(offset));
Ok(())
}
None => Err(format!("Unknown symbol: {}", sym)),

View File

@ -143,21 +143,19 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64SystemV {
#[inline(always)]
fn setup_stack<'a>(
buf: &mut Vec<'a, u8>,
leaf_function: bool,
general_saved_regs: &[X86_64GeneralReg],
requested_stack_size: i32,
) -> Result<i32, String> {
x86_64_generic_setup_stack(buf, leaf_function, general_saved_regs, requested_stack_size)
x86_64_generic_setup_stack(buf, general_saved_regs, requested_stack_size)
}
#[inline(always)]
fn cleanup_stack<'a>(
buf: &mut Vec<'a, u8>,
leaf_function: bool,
general_saved_regs: &[X86_64GeneralReg],
aligned_stack_size: i32,
) -> Result<(), String> {
x86_64_generic_cleanup_stack(buf, leaf_function, general_saved_regs, aligned_stack_size)
x86_64_generic_cleanup_stack(buf, general_saved_regs, aligned_stack_size)
}
}
@ -256,52 +254,45 @@ impl CallConv<X86_64GeneralReg, X86_64FloatReg> for X86_64WindowsFastcall {
#[inline(always)]
fn setup_stack<'a>(
buf: &mut Vec<'a, u8>,
leaf_function: bool,
saved_regs: &[X86_64GeneralReg],
requested_stack_size: i32,
) -> Result<i32, String> {
x86_64_generic_setup_stack(buf, leaf_function, saved_regs, requested_stack_size)
x86_64_generic_setup_stack(buf, saved_regs, requested_stack_size)
}
#[inline(always)]
fn cleanup_stack<'a>(
buf: &mut Vec<'a, u8>,
leaf_function: bool,
saved_regs: &[X86_64GeneralReg],
aligned_stack_size: i32,
) -> Result<(), String> {
x86_64_generic_cleanup_stack(buf, leaf_function, saved_regs, aligned_stack_size)
x86_64_generic_cleanup_stack(buf, saved_regs, aligned_stack_size)
}
}
#[inline(always)]
fn x86_64_generic_setup_stack<'a>(
buf: &mut Vec<'a, u8>,
leaf_function: bool,
saved_regs: &[X86_64GeneralReg],
requested_stack_size: i32,
) -> Result<i32, String> {
if !leaf_function {
X86_64Assembler::push_reg64(buf, X86_64GeneralReg::RBP);
X86_64Assembler::mov_reg64_reg64(buf, X86_64GeneralReg::RBP, X86_64GeneralReg::RSP);
}
for reg in saved_regs {
X86_64Assembler::push_reg64(buf, *reg);
}
X86_64Assembler::push_reg64(buf, X86_64GeneralReg::RBP);
X86_64Assembler::mov_reg64_reg64(buf, X86_64GeneralReg::RBP, X86_64GeneralReg::RSP);
// full size is upcast to i64 to make sure we don't overflow here.
let full_size = 8 * saved_regs.len() as i64 + requested_stack_size as i64;
let alignment = if full_size <= 0 {
let full_stack_size = requested_stack_size
.checked_add(8 * saved_regs.len() as i32)
.ok_or("Ran out of stack space")?;
let alignment = if full_stack_size <= 0 {
0
} else {
full_size % STACK_ALIGNMENT as i64
full_stack_size % STACK_ALIGNMENT as i32
};
let offset = if alignment == 0 {
0
} else {
STACK_ALIGNMENT - alignment as u8
};
if let Some(aligned_stack_size) = requested_stack_size.checked_add(offset as i32) {
if let Some(aligned_stack_size) = full_stack_size.checked_add(offset as i32) {
if aligned_stack_size > 0 {
X86_64Assembler::sub_reg64_reg64_imm32(
buf,
@ -309,6 +300,13 @@ fn x86_64_generic_setup_stack<'a>(
X86_64GeneralReg::RSP,
aligned_stack_size,
);
// Put values at the top of the stack to avoid conflicts with previously saved variables.
let mut offset = aligned_stack_size;
for reg in saved_regs {
offset -= 8;
X86_64Assembler::mov_base32_reg64(buf, -offset, *reg);
}
Ok(aligned_stack_size)
} else {
Ok(0)
@ -321,11 +319,15 @@ fn x86_64_generic_setup_stack<'a>(
#[inline(always)]
fn x86_64_generic_cleanup_stack<'a>(
buf: &mut Vec<'a, u8>,
leaf_function: bool,
saved_regs: &[X86_64GeneralReg],
aligned_stack_size: i32,
) -> Result<(), String> {
if aligned_stack_size > 0 {
let mut offset = aligned_stack_size;
for reg in saved_regs {
offset -= 8;
X86_64Assembler::mov_reg64_base32(buf, *reg, -offset);
}
X86_64Assembler::add_reg64_reg64_imm32(
buf,
X86_64GeneralReg::RSP,
@ -333,13 +335,8 @@ fn x86_64_generic_cleanup_stack<'a>(
aligned_stack_size,
);
}
for reg in saved_regs.iter().rev() {
X86_64Assembler::pop_reg64(buf, *reg);
}
if !leaf_function {
X86_64Assembler::mov_reg64_reg64(buf, X86_64GeneralReg::RSP, X86_64GeneralReg::RBP);
X86_64Assembler::pop_reg64(buf, X86_64GeneralReg::RBP);
}
X86_64Assembler::mov_reg64_reg64(buf, X86_64GeneralReg::RSP, X86_64GeneralReg::RBP);
X86_64Assembler::pop_reg64(buf, X86_64GeneralReg::RBP);
Ok(())
}
@ -431,6 +428,26 @@ impl Assembler<X86_64GeneralReg, X86_64FloatReg> for X86_64Assembler {
fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64GeneralReg) {
mov_reg64_reg64(buf, dst, src);
}
#[inline(always)]
fn mov_freg64_base32(_buf: &mut Vec<'_, u8>, _dst: X86_64FloatReg, _offset: i32) {
unimplemented!(
"loading floating point reg from base offset not yet implemented for X86_64"
);
}
#[inline(always)]
fn mov_reg64_base32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, offset: i32) {
mov_reg64_base32(buf, dst, offset);
}
#[inline(always)]
fn mov_base32_freg64(_buf: &mut Vec<'_, u8>, _offset: i32, _src: X86_64FloatReg) {
unimplemented!("saving floating point reg to base offset not yet implemented for X86_64");
}
#[inline(always)]
fn mov_base32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: X86_64GeneralReg) {
mov_base32_reg64(buf, offset, src);
}
#[inline(always)]
fn mov_freg64_stack32(_buf: &mut Vec<'_, u8>, _dst: X86_64FloatReg, _offset: i32) {
unimplemented!("loading floating point reg from stack not yet implemented for X86_64");
@ -643,7 +660,33 @@ fn mov_reg64_reg64(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, src: X86_64Gene
binop_reg64_reg64(0x89, buf, dst, src);
}
/// `MOV r64,r/m64` -> Move r/m64 to r64.
/// `MOV r64,r/m64` -> Move r/m64 to r64. where m64 references the base pionter.
#[inline(always)]
fn mov_reg64_base32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, offset: i32) {
// This can be optimized based on how many bytes the offset actually is.
// This function can probably be made to take any memory offset, I didn't feel like figuring it out rn.
// Also, this may technically be faster genration since stack operations should be so common.
let rex = add_reg_extension(dst, REX_W);
let dst_mod = (dst as u8 % 8) << 3;
buf.reserve(8);
buf.extend(&[rex, 0x8B, 0x85 + dst_mod]);
buf.extend(&offset.to_le_bytes());
}
/// `MOV r/m64,r64` -> Move r64 to r/m64. where m64 references the base pionter.
#[inline(always)]
fn mov_base32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: X86_64GeneralReg) {
// This can be optimized based on how many bytes the offset actually is.
// This function can probably be made to take any memory offset, I didn't feel like figuring it out rn.
// Also, this may technically be faster genration since stack operations should be so common.
let rex = add_reg_extension(src, REX_W);
let src_mod = (src as u8 % 8) << 3;
buf.reserve(8);
buf.extend(&[rex, 0x89, 0x85 + src_mod]);
buf.extend(&offset.to_le_bytes());
}
/// `MOV r64,r/m64` -> Move r/m64 to r64. where m64 references the stack pionter.
#[inline(always)]
fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, offset: i32) {
// This can be optimized based on how many bytes the offset actually is.
@ -656,7 +699,7 @@ fn mov_reg64_stack32(buf: &mut Vec<'_, u8>, dst: X86_64GeneralReg, offset: i32)
buf.extend(&offset.to_le_bytes());
}
/// `MOV r/m64,r64` -> Move r64 to r/m64.
/// `MOV r/m64,r64` -> Move r64 to r/m64. where m64 references the stack pionter.
#[inline(always)]
fn mov_stack32_reg64(buf: &mut Vec<'_, u8>, offset: i32, src: X86_64GeneralReg) {
// This can be optimized based on how many bytes the offset actually is.
@ -976,6 +1019,36 @@ mod tests {
}
}
#[test]
fn test_mov_reg64_base32() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for ((dst, offset), expected) in &[
((X86_64GeneralReg::RAX, TEST_I32), [0x48, 0x8B, 0x85]),
((X86_64GeneralReg::R15, TEST_I32), [0x4C, 0x8B, 0xBD]),
] {
buf.clear();
mov_reg64_base32(&mut buf, *dst, *offset);
assert_eq!(expected, &buf[..3]);
assert_eq!(TEST_I32.to_le_bytes(), &buf[3..]);
}
}
#[test]
fn test_mov_base32_reg64() {
let arena = bumpalo::Bump::new();
let mut buf = bumpalo::vec![in &arena];
for ((offset, src), expected) in &[
((TEST_I32, X86_64GeneralReg::RAX), [0x48, 0x89, 0x85]),
((TEST_I32, X86_64GeneralReg::R15), [0x4C, 0x89, 0xBD]),
] {
buf.clear();
mov_base32_reg64(&mut buf, *offset, *src);
assert_eq!(expected, &buf[..3]);
assert_eq!(TEST_I32.to_le_bytes(), &buf[3..]);
}
}
#[test]
fn test_mov_reg64_stack32() {
let arena = bumpalo::Bump::new();

View File

@ -338,11 +338,7 @@ where
/// set_free_map sets the free map to the given map.
fn set_free_map(&mut self, map: MutMap<*const Stmt<'a>, Vec<'a, Symbol>>);
/// set_not_leaf_function lets the backend know that it is not a leaf function.
fn set_not_leaf_function(&mut self);
/// scan_ast runs through the ast and fill the last seen map.
/// It also checks if the function is a leaf function or not.
/// This must iterate through the ast in the same way that build_stmt does. i.e. then before else.
fn scan_ast(&mut self, stmt: &Stmt<'a>) {
match stmt {
@ -469,18 +465,12 @@ where
}
match call_type {
CallType::ByName { name: sym, .. } => {
// For functions that we won't inline, we should not be a leaf function.
if !INLINED_SYMBOLS.contains(sym) {
self.set_not_leaf_function();
}
}
CallType::ByName { .. } => {}
CallType::ByPointer { name: sym, .. } => {
self.set_not_leaf_function();
self.set_last_seen(*sym, stmt);
}
CallType::LowLevel { .. } => {}
CallType::Foreign { .. } => self.set_not_leaf_function(),
CallType::Foreign { .. } => {}
}
}
}