1
1
mirror of https://github.com/rui314/mold.git synced 2024-11-10 10:57:55 +03:00
This commit is contained in:
Rui Ueyama 2021-12-03 15:47:42 +09:00
parent 16921c78ea
commit fa8d5d97d0
7 changed files with 256 additions and 18 deletions

View File

@ -4,19 +4,184 @@
namespace mold::macho {
template <>
Relocation<ARM64>
// Returns [hi:lo] bits of val.
static u64 bits(u64 val, u64 hi, u64 lo) {
return (val >> lo) & (((u64)1 << (hi - lo + 1)) - 1);
}
static i64 read_addend(u8 *buf, const MachRel &r) {
switch (r.p2size) {
case 2:
return *(i32 *)(buf + r.offset);
case 3:
return *(i64 *)(buf + r.offset);
default:
unreachable();
}
}
static Relocation<ARM64>
read_reloc(Context<ARM64> &ctx, ObjectFile<ARM64> &file,
const MachSection &hdr, MachRel r) {
return {};
const MachSection &hdr, MachRel *rels, i64 &idx) {
i64 addend = 0;
switch (rels[idx].type) {
case ARM64_RELOC_UNSIGNED:
case ARM64_RELOC_SUBTRACTOR:
addend = read_addend((u8 *)file.mf->data + hdr.offset, rels[idx]);
break;
case ARM64_RELOC_ADDEND:
addend = rels[idx++].offset;
break;
}
MachRel &r = rels[idx];
Relocation<ARM64> rel{r.offset, (u8)r.type, (u8)r.p2size, (bool)r.is_pcrel};
if (r.is_extern) {
rel.sym = file.syms[r.idx];
rel.addend = addend;
return rel;
}
u32 addr;
if (r.is_pcrel)
addr = hdr.addr + r.offset + addend;
else
addr = addend;
Subsection<ARM64> *target = file.find_subsection(ctx, addr);
if (!target)
Fatal(ctx) << file << ": bad relocation: " << r.offset;
rel.subsec = target;
rel.addend = addr - target->input_addr;
return rel;
}
template <>
std::vector<Relocation<ARM64>>
read_relocations(Context<ARM64> &ctx, ObjectFile<ARM64> &file,
const MachSection &hdr) {
std::vector<Relocation<ARM64>> vec;
MachRel *rels = (MachRel *)(file.mf->data + hdr.reloff);
for (i64 i = 0; i < hdr.nreloc; i++)
vec.push_back(read_reloc(ctx, file, hdr, rels, i));
return vec;
}
template <>
void Subsection<ARM64>::scan_relocations(Context<ARM64> &ctx) {
for (Relocation<ARM64> &r : get_rels()) {
Symbol<ARM64> *sym = r.sym;
if (!sym)
continue;
switch (r.type) {
case ARM64_RELOC_GOT_LOAD_PAGE21:
case ARM64_RELOC_GOT_LOAD_PAGEOFF12:
case ARM64_RELOC_POINTER_TO_GOT:
sym->flags |= NEEDS_GOT;
break;
case ARM64_RELOC_TLVP_LOAD_PAGE21:
case ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
sym->flags |= NEEDS_THREAD_PTR;
break;
}
if (sym->file && sym->file->is_dylib) {
sym->flags |= NEEDS_STUB;
((DylibFile<ARM64> *)sym->file)->is_needed = true;
}
}
}
template <>
void Subsection<ARM64>::apply_reloc(Context<ARM64> &ctx, u8 *buf) {
std::span<Relocation<ARM64>> rels = get_rels();
for (i64 i = 0; i < rels.size(); i++) {
Relocation<ARM64> &r = rels[i];
if (r.sym && !r.sym->file) {
Error(ctx) << "undefined symbol: " << isec.file << ": " << *r.sym;
continue;
}
u64 val = 0;
switch (r.type) {
case ARM64_RELOC_UNSIGNED:
case ARM64_RELOC_BRANCH26:
case ARM64_RELOC_PAGE21:
case ARM64_RELOC_PAGEOFF12:
val = r.sym ? r.sym->get_addr(ctx) : r.subsec->get_addr(ctx);
break;
case ARM64_RELOC_SUBTRACTOR: {
Relocation<ARM64> s = rels[++i];
assert(s.type == ARM64_RELOC_UNSIGNED);
u64 val1 = r.sym ? r.sym->get_addr(ctx) : r.subsec->get_addr(ctx);
u64 val2 = s.sym ? s.sym->get_addr(ctx) : s.subsec->get_addr(ctx);
val = val2 - val1;
break;
}
case ARM64_RELOC_GOT_LOAD_PAGE21:
case ARM64_RELOC_GOT_LOAD_PAGEOFF12:
case ARM64_RELOC_POINTER_TO_GOT:
val = r.sym->get_got_addr(ctx);
break;
case ARM64_RELOC_TLVP_LOAD_PAGE21:
case ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
val = r.sym->get_tlv_addr(ctx);
break;
default:
Fatal(ctx) << isec << ": unknown reloc: " << (int)r.type;
}
val += r.addend;
if (r.is_pcrel)
val -= get_addr(ctx) + r.offset;
switch (r.type) {
case ARM64_RELOC_UNSIGNED:
case ARM64_RELOC_SUBTRACTOR:
case ARM64_RELOC_POINTER_TO_GOT:
switch (r.p2size) {
case 2:
*(i32 *)(buf + r.offset) = val;
break;
case 3:
*(i64 *)(buf + r.offset) = val;
break;
default:
unreachable();
}
break;
case ARM64_RELOC_BRANCH26:
*(u32 *)(buf + r.offset) |= bits(val, 27, 2);
break;
case ARM64_RELOC_PAGE21:
case ARM64_RELOC_GOT_LOAD_PAGE21:
case ARM64_RELOC_TLVP_LOAD_PAGE21:
*(u32 *)(buf + r.offset) |=
(bits(val, 13, 12) << 29) | (bits(val, 32, 14) << 5);
break;
case ARM64_RELOC_PAGEOFF12:
case ARM64_RELOC_GOT_LOAD_PAGEOFF12:
case ARM64_RELOC_TLVP_LOAD_PAGEOFF12: {
u32 insn = *(u32 *)(buf + r.offset);
i64 scale = 0;
if ((insn & 0x3b000000) == 0x39000000)
scale = insn >> 30;
*(u32 *)(buf + r.offset) |= bits(val, 12, scale) << 10;
break;
}
default:
Fatal(ctx) << isec << ": unknown reloc: " << (int)r.type;
}
}
}
} // namespace mold::macho

View File

@ -28,10 +28,9 @@ static i64 read_addend(u8 *buf, const MachRel &r) {
}
}
template <>
Relocation<X86_64>
static Relocation<X86_64>
read_reloc(Context<X86_64> &ctx, ObjectFile<X86_64> &file,
const MachSection &hdr, MachRel r) {
const MachSection &hdr, MachRel &r) {
if (r.p2size != 2 && r.p2size != 3)
Fatal(ctx) << file << ": invalid r.p2size: " << (u32)r.p2size;
@ -68,6 +67,19 @@ read_reloc(Context<X86_64> &ctx, ObjectFile<X86_64> &file,
return rel;
}
template <>
std::vector<Relocation<X86_64>>
read_relocations(Context<X86_64> &ctx, ObjectFile<X86_64> &file,
const MachSection &hdr) {
std::vector<Relocation<X86_64>> vec;
vec.reserve(hdr.nreloc);
MachRel *rels = (MachRel *)(file.mf->data + hdr.reloff);
for (i64 i = 0; i < hdr.nreloc; i++)
vec.push_back(read_reloc(ctx, file, hdr, rels[i]));
return vec;
}
template <>
void Subsection<X86_64>::scan_relocations(Context<X86_64> &ctx) {
for (Relocation<X86_64> &rel : get_rels()) {

View File

@ -17,9 +17,7 @@ void InputSection<E>::parse_relocations(Context<E> &ctx) {
rels.reserve(hdr.nreloc);
// Parse mach-o relocations to fill `rels` vector
MachRel *rel = (MachRel *)(file.mf->data + hdr.reloff);
for (i64 i = 0; i < hdr.nreloc; i++)
rels.push_back(read_reloc(ctx, file, hdr, rel[i]));
rels = read_relocations(ctx, file, hdr);
// Sort `rels` vector
sort(rels, [](const Relocation<E> &a, const Relocation<E> &b) {

View File

@ -672,11 +672,21 @@ struct CodeSignatureDirectory {
struct ARM64 {
static constexpr u32 cputype = CPU_TYPE_ARM64;
static constexpr u32 cpusubtype = CPU_SUBTYPE_ARM64_ALL;
static constexpr u32 wordsize = 8;
static constexpr u32 stub_size = 6;
static constexpr u32 stub_helper_hdr_size = 16;
static constexpr u32 stub_helper_size = 10;
static constexpr u32 got_size = 10;
};
struct X86_64 {
static constexpr u32 cputype = CPU_TYPE_X86_64;
static constexpr u32 cpusubtype = CPU_SUBTYPE_X86_64_ALL;
static constexpr u32 wordsize = 8;
static constexpr u32 stub_size = 6;
static constexpr u32 stub_helper_hdr_size = 16;
static constexpr u32 stub_helper_size = 10;
static constexpr u32 got_size = 10;
};
} // namespace mold::macho

View File

@ -192,8 +192,8 @@ public:
};
template <typename E>
Relocation<E> read_reloc(Context<E> &ctx, ObjectFile<E> &file,
const MachSection &hdr, MachRel r);
std::vector<Relocation<E>>
read_relocations(Context<E> &ctx, ObjectFile<E> &file, const MachSection &hdr);
//
// Symbol

View File

@ -120,7 +120,7 @@ static std::vector<SplitInfo<E>> split(Context<E> &ctx, ObjectFile<E> &file) {
for (i64 i = 0; i < file.mach_syms.size(); i++) {
MachSym &msym = file.mach_syms[i];
if (msym.type == N_SECT) {
if (msym.type == N_SECT && file.sections[msym.sect - 1]) {
SplitRegion r;
r.offset = msym.value - file.sections[msym.sect - 1]->hdr.addr;
r.symidx = i;

View File

@ -1072,8 +1072,8 @@ void StubsSection<E>::add(Context<E> &ctx, Symbol<E> *sym) {
ctx.lazy_symbol_ptr.hdr.size = nsyms * LazySymbolPtrSection<E>::ENTRY_SIZE;
}
template <typename E>
void StubsSection<E>::copy_buf(Context<E> &ctx) {
template <>
void StubsSection<ARM64>::copy_buf(Context<ARM64> &ctx) {
u8 *buf = ctx.buf + this->hdr.offset;
for (i64 i = 0; i < syms.size(); i++) {
@ -1084,13 +1084,66 @@ void StubsSection<E>::copy_buf(Context<E> &ctx) {
buf[i * 6] = 0xff;
buf[i * 6 + 1] = 0x25;
*(u32 *)(buf + i * 6 + 2) =
(ctx.lazy_symbol_ptr.hdr.addr + i * LazySymbolPtrSection<E>::ENTRY_SIZE) -
ctx.lazy_symbol_ptr.hdr.addr +
i * LazySymbolPtrSection<ARM64>::ENTRY_SIZE -
(this->hdr.addr + i * 6 + 6);
}
}
template <typename E>
void StubHelperSection<E>::copy_buf(Context<E> &ctx) {
template <>
void StubsSection<X86_64>::copy_buf(Context<X86_64> &ctx) {
u8 *buf = ctx.buf + this->hdr.offset;
for (i64 i = 0; i < syms.size(); i++) {
// `ff 25 xx xx xx xx` is a RIP-relative indirect jump instruction,
// i.e., `jmp *IMM(%rip)`. It loads an address from la_symbol_ptr
// and jump there.
assert(ENTRY_SIZE == 6);
buf[i * 6] = 0xff;
buf[i * 6 + 1] = 0x25;
*(u32 *)(buf + i * 6 + 2) =
ctx.lazy_symbol_ptr.hdr.addr +
i * LazySymbolPtrSection<X86_64>::ENTRY_SIZE -
(this->hdr.addr + i * 6 + 6);
}
}
template <>
void StubHelperSection<ARM64>::copy_buf(Context<ARM64> &ctx) {
u8 *start = ctx.buf + this->hdr.offset;
u8 *buf = start;
u8 insn0[16] = {
0x4c, 0x8d, 0x1d, 0, 0, 0, 0, // lea $__dyld_private(%rip), %r11
0x41, 0x53, // push %r11
0xff, 0x25, 0, 0, 0, 0, // jmp *$dyld_stub_binder@GOT(%rip)
0x90, // nop
};
memcpy(buf, insn0, sizeof(insn0));
*(u32 *)(buf + 3) =
intern(ctx, "__dyld_private")->get_addr(ctx) - this->hdr.addr - 7;
*(u32 *)(buf + 11) =
intern(ctx, "dyld_stub_binder")->get_got_addr(ctx) - this->hdr.addr - 15;
buf += 16;
for (i64 i = 0; i < ctx.stubs.syms.size(); i++) {
u8 insn[10] = {
0x68, 0, 0, 0, 0, // push $bind_offset
0xe9, 0, 0, 0, 0, // jmp $__stub_helper
};
memcpy(buf, insn, sizeof(insn));
*(u32 *)(buf + 1) = ctx.stubs.bind_offsets[i];
*(u32 *)(buf + 6) = start - buf - 10;
buf += 10;
}
}
template <>
void StubHelperSection<X86_64>::copy_buf(Context<X86_64> &ctx) {
u8 *start = ctx.buf + this->hdr.offset;
u8 *buf = start;