Kernel: Enforce W^X more strictly (like PaX MPROTECT)

This patch adds enforcement of two new rules:

- Memory that was previously writable cannot become executable
- Memory that was previously executable cannot become writable

Unfortunately we have to make an exception for text relocations in the
dynamic loader. Since those necessitate writing into a private copy
of library code, we allow programs to transition from RW to RX under
very specific conditions. See the implementation of sys$mprotect()'s
should_make_executable_exception_for_dynamic_loader() for details.
This commit is contained in:
Andreas Kling 2021-01-29 14:38:49 +01:00
parent d988bd86e4
commit af3d3c5c4a
Notes: sideshowbarker 2024-07-18 22:46:17 +09:00
3 changed files with 106 additions and 13 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -33,25 +33,91 @@
#include <Kernel/VM/Region.h>
#include <Kernel/VM/SharedInodeVMObject.h>
#include <LibC/limits.h>
#include <LibELF/Validation.h>
namespace Kernel {
static bool validate_mmap_prot(int prot, bool map_stack)
static bool should_make_executable_exception_for_dynamic_loader(bool make_readable, bool make_writable, bool make_executable, const Region& region)
{
bool readable = prot & PROT_READ;
bool writable = prot & PROT_WRITE;
bool executable = prot & PROT_EXEC;
// Normally we don't allow W -> X transitions, but we have to make an exception
// for the dynamic loader, which needs to do this after performing text relocations.
if (writable && executable)
// FIXME: Investigate whether we could get rid of all text relocations entirely.
// The exception is only made if all the following criteria is fulfilled:
// This exception has not been made for the same region already
if (region.has_made_executable_exception_for_dynamic_loader())
return false;
// The region must be RW
if (!(region.is_readable() && region.is_writable() && !region.is_executable()))
return false;
// The region wants to become RX
if (!(make_readable && !make_writable && make_executable))
return false;
// The region is backed by a file
if (!region.vmobject().is_inode())
return false;
// The file mapping is private, not shared (no relocations in a shared mapping!)
if (!region.vmobject().is_private_inode())
return false;
Elf32_Ehdr header;
if (!copy_from_user(&header, region.vaddr().as_ptr(), sizeof(header)))
return false;
auto& inode = static_cast<const InodeVMObject&>(region.vmobject());
// The file is a valid ELF binary
if (!ELF::validate_elf_header(header, inode.size()))
return false;
// The file is an ELF shared object
if (header.e_type != ET_DYN)
return false;
// FIXME: Are there any additional checks/validations we could do here?
return true;
}
static bool validate_mmap_prot(int prot, bool map_stack, const Region* region = nullptr, bool* is_making_executable_exception_for_dynamic_loader = nullptr)
{
if (is_making_executable_exception_for_dynamic_loader)
*is_making_executable_exception_for_dynamic_loader = false;
bool make_readable = prot & PROT_READ;
bool make_writable = prot & PROT_WRITE;
bool make_executable = prot & PROT_EXEC;
if (make_writable && make_executable)
return false;
if (map_stack) {
if (executable)
if (make_executable)
return false;
if (!readable || !writable)
if (!make_readable || !make_writable)
return false;
}
if (region) {
if (make_writable && region->has_been_executable())
return false;
if (make_executable && region->has_been_writable()) {
if (should_make_executable_exception_for_dynamic_loader(make_readable, make_writable, make_executable, *region)) {
ASSERT(is_making_executable_exception_for_dynamic_loader);
*is_making_executable_exception_for_dynamic_loader = true;
return true;
}
return false;
}
}
return true;
}
@ -216,7 +282,8 @@ int Process::sys$mprotect(void* addr, size_t size, int prot)
if (auto* whole_region = find_region_from_range(range_to_mprotect)) {
if (!whole_region->is_mmap())
return -EPERM;
if (!validate_mmap_prot(prot, whole_region->is_stack()))
bool is_making_executable_exception_for_dynamic_loader = false;
if (!validate_mmap_prot(prot, whole_region->is_stack(), whole_region, &is_making_executable_exception_for_dynamic_loader))
return -EINVAL;
if (whole_region->access() == prot_to_region_access_flags(prot))
return 0;
@ -227,6 +294,10 @@ int Process::sys$mprotect(void* addr, size_t size, int prot)
whole_region->set_readable(prot & PROT_READ);
whole_region->set_writable(prot & PROT_WRITE);
whole_region->set_executable(prot & PROT_EXEC);
if (is_making_executable_exception_for_dynamic_loader)
whole_region->set_has_made_executable_exception_for_dynamic_loader();
whole_region->remap();
return 0;
}
@ -235,7 +306,8 @@ int Process::sys$mprotect(void* addr, size_t size, int prot)
if (auto* old_region = find_region_containing(range_to_mprotect)) {
if (!old_region->is_mmap())
return -EPERM;
if (!validate_mmap_prot(prot, old_region->is_stack()))
bool is_making_executable_exception_for_dynamic_loader = false;
if (!validate_mmap_prot(prot, old_region->is_stack(), old_region, &is_making_executable_exception_for_dynamic_loader))
return -EINVAL;
if (old_region->access() == prot_to_region_access_flags(prot))
return 0;
@ -254,6 +326,9 @@ int Process::sys$mprotect(void* addr, size_t size, int prot)
new_region.set_writable(prot & PROT_WRITE);
new_region.set_executable(prot & PROT_EXEC);
if (is_making_executable_exception_for_dynamic_loader)
new_region.set_has_made_executable_exception_for_dynamic_loader();
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
deallocate_region(*old_region);

View File

@ -44,7 +44,7 @@ Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offs
, m_offset_in_vmobject(offset_in_vmobject)
, m_vmobject(move(vmobject))
, m_name(name)
, m_access(access)
, m_access(access | ((access & 0x7) << 4))
, m_shared(shared)
, m_cacheable(cacheable)
, m_kernel(kernel)

View File

@ -50,10 +50,16 @@ class Region final
MAKE_SLAB_ALLOCATED(Region)
public:
enum Access {
// 76543210
// eXWR xwr
enum Access : u8 {
Read = 1,
Write = 2,
Execute = 4,
HasBeenReadable = 16,
HasBeenWritable = 32,
HasBeenExecutable = 64,
HasMadeExecutableExceptionForDynamicLoader = 128,
};
static NonnullOwnPtr<Region> create_user_accessible(Process*, const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable, bool shared);
@ -67,6 +73,18 @@ public:
bool is_readable() const { return m_access & Access::Read; }
bool is_writable() const { return m_access & Access::Write; }
bool is_executable() const { return m_access & Access::Execute; }
bool has_been_readable() const { return m_access & Access::HasBeenReadable; }
bool has_been_writable() const { return m_access & Access::HasBeenWritable; }
bool has_been_executable() const { return m_access & Access::HasBeenExecutable; }
bool has_made_executable_exception_for_dynamic_loader() const { return m_access & Access::HasMadeExecutableExceptionForDynamicLoader; }
void set_has_made_executable_exception_for_dynamic_loader()
{
ASSERT(!has_made_executable_exception_for_dynamic_loader());
m_access |= Access::HasMadeExecutableExceptionForDynamicLoader;
}
bool is_cacheable() const { return m_cacheable; }
const String& name() const { return m_name; }
unsigned access() const { return m_access; }
@ -245,7 +263,7 @@ private:
void set_access_bit(Access access, bool b)
{
if (b)
m_access |= access;
m_access |= access | (access << 4);
else
m_access &= ~access;
}