1
1
mirror of https://github.com/rui314/mold.git synced 2024-11-13 09:39:13 +03:00

[ELF] Add -z separate-code, -z noseparate-code and -z separate-lodable-segments

Fixes https://github.com/rui314/mold/issues/172
This commit is contained in:
Rui Ueyama 2021-12-22 14:55:09 +09:00
parent 088912e2fc
commit 5601cf4236
6 changed files with 132 additions and 53 deletions

View File

@ -633,6 +633,26 @@ after it finishes its job.
By default, \fBmold\fR generates a relro segment. \fB\-z
norelro\fR disables the feature.
.IP "\fB\-z separate\-loadable\-segments\fR"
.PD 0
.IP "\fB\-z separate\-code\fR"
.PD 0
.IP "\fB\-z noseparate\-code\fR"
.PD
If one memory page contains multiple segments, the page protection
bits are set in such a way that needed attributes (writable or
executable) are satisifed for all segments. This usually happens at a
boundary of two segments with two different attributes.
\fBseparate\-loadable\-segments\fR adds paddings between segments with
different attributes so that they do not share the same page. This is
the default.
\fBseparate\-code\fR adds paddings only between executable and
non-executable segments.
\fBnoseparate\-code\fR does not add any paddings between segments.
.IP "\fB\-z defs\fR"
.PD 0
.IP "\fB\-z nodefs\fR"

View File

@ -146,6 +146,10 @@ Options:
-z nodump Mark DSO not available to dldump
-z now Disable lazy function resolution
-z origin Mark object requiring immediate $ORIGIN processing at runtime
-z separate-loadable-segments
Separate all loadable segments to different pages
-z separate-code Separate code and data into different pages
-z noseparate-code Allow overlap in pages
-z relro Make some sections read-only after relocation (default)
-z norelro
-z text Report error if DT_TEXTREL is set
@ -589,6 +593,12 @@ void parse_nonpositional_args(Context<E> &ctx,
ctx.arg.z_origin = true;
} else if (read_z_flag(args, "nodefaultlib")) {
ctx.arg.z_nodefaultlib = true;
} else if (read_z_flag(args, "separate-loadable-segments")) {
ctx.arg.z_separate_code = SEPARATE_LOADABLE_SEGMENTS;
} else if (read_z_flag(args, "separate-code")) {
ctx.arg.z_separate_code = SEPARATE_CODE;
} else if (read_z_flag(args, "noseparate-code")) {
ctx.arg.z_separate_code = NOSEPARATE_CODE;
} else if (read_flag(args, "no-undefined")) {
ctx.arg.z_defs = true;
} else if (read_flag(args, "fatal-warnings")) {

View File

@ -304,6 +304,9 @@ private:
template <typename E>
bool is_relro(Context<E> &ctx, Chunk<E> *chunk);
template <typename E>
bool separate_page(Context<E> &ctx, Chunk<E> *a, Chunk<E> *b);
// Chunk represents a contiguous region in an output file.
template <typename E>
class Chunk {
@ -322,7 +325,6 @@ public:
std::string_view name;
i64 shndx = 0;
Kind kind;
bool new_page = false;
ElfShdr<E> shdr = {};
protected:
@ -1155,6 +1157,12 @@ struct BuildId {
typedef enum { COMPRESS_NONE, COMPRESS_GABI, COMPRESS_GNU } CompressKind;
typedef enum { ERROR, WARN, IGNORE } UnresolvedKind;
typedef enum {
SEPARATE_LOADABLE_SEGMENTS,
SEPARATE_CODE,
NOSEPARATE_CODE,
} SeparateCodeKind;
struct VersionPattern {
u16 ver_idx;
std::vector<std::string_view> patterns;
@ -1206,6 +1214,7 @@ struct Context {
struct {
BuildId build_id;
CompressKind compress_debug_sections = COMPRESS_NONE;
SeparateCodeKind z_separate_code = SEPARATE_LOADABLE_SEGMENTS;
UnresolvedKind unresolved_symbols = UnresolvedKind::ERROR;
bool Bsymbolic = false;
bool Bsymbolic_functions = false;

View File

@ -76,11 +76,12 @@ void OutputShdr<E>::copy_buf(Context<E> &ctx) {
}
template <typename E>
static i64 to_phdr_flags(Chunk<E> *chunk) {
static i64 to_phdr_flags(Context<E> &ctx, Chunk<E> *chunk) {
i64 ret = PF_R;
if (chunk->shdr.sh_flags & SHF_WRITE)
ret |= PF_W;
if (chunk->shdr.sh_flags & SHF_EXECINSTR)
if ((chunk->shdr.sh_flags & SHF_EXECINSTR) ||
(ctx.arg.z_separate_code == NOSEPARATE_CODE))
ret |= PF_X;
return ret;
}
@ -113,6 +114,22 @@ bool is_relro(Context<E> &ctx, Chunk<E> *chunk) {
return false;
}
template <typename E>
bool separate_page(Context<E> &ctx, Chunk<E> *x, Chunk<E> *y) {
if (ctx.arg.z_relro && is_relro(ctx, x) != is_relro(ctx, y))
return true;
switch (ctx.arg.z_separate_code) {
case SEPARATE_LOADABLE_SEGMENTS:
return to_phdr_flags(ctx, x) != to_phdr_flags(ctx, y);
case SEPARATE_CODE:
return (x->shdr.sh_flags & SHF_EXECINSTR) != (y->shdr.sh_flags & SHF_EXECINSTR);
case NOSEPARATE_CODE:
return false;
}
unreachable();
}
template <typename E>
std::vector<ElfPhdr<E>> create_phdr(Context<E> &ctx) {
std::vector<ElfPhdr<E>> vec;
@ -164,36 +181,32 @@ std::vector<ElfPhdr<E>> create_phdr(Context<E> &ctx) {
if (!is_note(first))
continue;
i64 flags = to_phdr_flags(first);
i64 flags = to_phdr_flags(ctx, first);
i64 alignment = first->shdr.sh_addralign;
define(PT_NOTE, flags, alignment, first);
while (i < end && is_note(ctx.chunks[i]) &&
to_phdr_flags(ctx.chunks[i]) == flags &&
to_phdr_flags(ctx, ctx.chunks[i]) == flags &&
ctx.chunks[i]->shdr.sh_addralign == alignment)
append(ctx.chunks[i++]);
}
// Create PT_LOAD segments.
for (Chunk<E> *chunk : ctx.chunks)
chunk->new_page = false;
for (i64 i = 0, end = ctx.chunks.size(); i < end;) {
Chunk<E> *first = ctx.chunks[i++];
if (!(first->shdr.sh_flags & SHF_ALLOC))
break;
i64 flags = to_phdr_flags(first);
i64 flags = to_phdr_flags(ctx, first);
define(PT_LOAD, flags, COMMON_PAGE_SIZE, first);
first->new_page = true;
if (!is_bss(first))
while (i < end && !is_bss(ctx.chunks[i]) &&
to_phdr_flags(ctx.chunks[i]) == flags)
to_phdr_flags(ctx, ctx.chunks[i]) == flags)
append(ctx.chunks[i++]);
while (i < end && is_bss(ctx.chunks[i]) &&
to_phdr_flags(ctx.chunks[i]) == flags)
to_phdr_flags(ctx, ctx.chunks[i]) == flags)
append(ctx.chunks[i++]);
}
@ -202,7 +215,7 @@ std::vector<ElfPhdr<E>> create_phdr(Context<E> &ctx) {
if (!(ctx.chunks[i]->shdr.sh_flags & SHF_TLS))
continue;
define(PT_TLS, to_phdr_flags(ctx.chunks[i]), 1, ctx.chunks[i]);
define(PT_TLS, to_phdr_flags(ctx, ctx.chunks[i]), 1, ctx.chunks[i]);
i++;
while (i < ctx.chunks.size() && (ctx.chunks[i]->shdr.sh_flags & SHF_TLS))
append(ctx.chunks[i++]);
@ -230,12 +243,9 @@ std::vector<ElfPhdr<E>> create_phdr(Context<E> &ctx) {
continue;
define(PT_GNU_RELRO, PF_R, 1, ctx.chunks[i]);
ctx.chunks[i]->new_page = true;
i++;
while (i < ctx.chunks.size() && is_relro(ctx, ctx.chunks[i]))
append(ctx.chunks[i++]);
if (i < ctx.chunks.size())
ctx.chunks[i]->new_page = true;
}
}
@ -1832,41 +1842,42 @@ void ReproSection<E>::copy_buf(Context<E> &ctx) {
contents->write_to(ctx.buf + this->shdr.sh_offset);
}
#define INSTANTIATE(E) \
template class Chunk<E>; \
template class OutputEhdr<E>; \
template class OutputShdr<E>; \
template class OutputPhdr<E>; \
template class InterpSection<E>; \
template class OutputSection<E>; \
template class GotSection<E>; \
template class GotPltSection<E>; \
template class PltSection<E>; \
template class PltGotSection<E>; \
template class RelPltSection<E>; \
template class RelDynSection<E>; \
template class StrtabSection<E>; \
template class ShstrtabSection<E>; \
template class DynstrSection<E>; \
template class DynamicSection<E>; \
template class SymtabSection<E>; \
template class DynsymSection<E>; \
template class HashSection<E>; \
template class GnuHashSection<E>; \
template class MergedSection<E>; \
template class EhFrameSection<E>; \
template class EhFrameHdrSection<E>; \
template class DynbssSection<E>; \
template class VersymSection<E>; \
template class VerneedSection<E>; \
template class VerdefSection<E>; \
template class BuildIdSection<E>; \
template class NotePropertySection<E>; \
template class GabiCompressedSection<E>; \
template class GnuCompressedSection<E>; \
template class ReproSection<E>; \
template i64 BuildId::size(Context<E> &) const; \
template bool is_relro(Context<E> &, Chunk<E> *); \
#define INSTANTIATE(E) \
template class Chunk<E>; \
template class OutputEhdr<E>; \
template class OutputShdr<E>; \
template class OutputPhdr<E>; \
template class InterpSection<E>; \
template class OutputSection<E>; \
template class GotSection<E>; \
template class GotPltSection<E>; \
template class PltSection<E>; \
template class PltGotSection<E>; \
template class RelPltSection<E>; \
template class RelDynSection<E>; \
template class StrtabSection<E>; \
template class ShstrtabSection<E>; \
template class DynstrSection<E>; \
template class DynamicSection<E>; \
template class SymtabSection<E>; \
template class DynsymSection<E>; \
template class HashSection<E>; \
template class GnuHashSection<E>; \
template class MergedSection<E>; \
template class EhFrameSection<E>; \
template class EhFrameHdrSection<E>; \
template class DynbssSection<E>; \
template class VersymSection<E>; \
template class VerneedSection<E>; \
template class VerdefSection<E>; \
template class BuildIdSection<E>; \
template class NotePropertySection<E>; \
template class GabiCompressedSection<E>; \
template class GnuCompressedSection<E>; \
template class ReproSection<E>; \
template i64 BuildId::size(Context<E> &) const; \
template bool is_relro(Context<E> &, Chunk<E> *); \
template bool separate_page(Context<E> &, Chunk<E> *, Chunk<E> *); \
template std::vector<ElfPhdr<E>> create_phdr(Context<E> &)
INSTANTIATE(X86_64);

View File

@ -819,7 +819,7 @@ i64 set_osec_offsets(Context<E> &ctx) {
Chunk<E> &chunk = *ctx.chunks[i];
u64 prev_vaddr = vaddr;
if (chunk.new_page)
if (i > 0 && separate_page(ctx, ctx.chunks[i - 1], &chunk))
vaddr = align_to(vaddr, COMMON_PAGE_SIZE);
vaddr = align_to(vaddr, chunk.shdr.sh_addralign);
fileoff += vaddr - prev_vaddr;
@ -834,7 +834,7 @@ i64 set_osec_offsets(Context<E> &ctx) {
for (; i < end && ctx.chunks[i]->shdr.sh_type == SHT_NOBITS; i++) {
Chunk<E> &chunk = *ctx.chunks[i];
if (chunk.new_page)
if (i > 0 && separate_page(ctx, ctx.chunks[i - 1], &chunk))
vaddr = align_to(vaddr, COMMON_PAGE_SIZE);
vaddr = align_to(vaddr, chunk.shdr.sh_addralign);
fileoff = align_with_skew(fileoff, COMMON_PAGE_SIZE, vaddr % COMMON_PAGE_SIZE);

29
test/elf/z-separate-code.sh Executable file
View File

@ -0,0 +1,29 @@
#!/bin/bash
export LANG=
set -e
cd $(dirname $0)
mold=`pwd`/../../mold
echo -n "Testing $(basename -s .sh $0) ... "
t=$(pwd)/../../out/test/elf/$(basename -s .sh $0)
mkdir -p $t
cat <<EOF | cc -o $t/a.o -c -xc -
#include <stdio.h>
int main() {
printf("Hello world\n");
}
EOF
clang -fuse-ld=$mold -o $t/exe1 $t/a.o -Wl,-z,separate-loadable-segments
$t/exe1 | grep -q 'Hello world'
clang -fuse-ld=$mold -o $t/exe2 $t/a.o -Wl,-z,separate-code -Wl,-z,norelro
$t/exe2 | grep -q 'Hello world'
clang -fuse-ld=$mold -o $t/exe3 $t/a.o -Wl,-z,noseparate-code -Wl,-z,norelro
$t/exe3 | grep -q 'Hello world'
readelf --segments $t/exe3 > $t/log
! grep 'LOAD .* RW ' $t/log || false
echo OK