Kernel: Make kernel memory regions be non-executable by default

From now on, you'll have to request executable memory specifically
if you want some.
This commit is contained in:
Andreas Kling 2019-12-25 22:41:34 +01:00
parent 0b7a2e0a5a
commit 9e55bcb7da
Notes: sideshowbarker 2024-07-19 10:40:27 +09:00
7 changed files with 27 additions and 31 deletions

View File

@ -17,16 +17,16 @@
class KBufferImpl : public RefCounted<KBufferImpl> {
public:
static NonnullRefPtr<KBufferImpl> create_with_size(size_t size)
static NonnullRefPtr<KBufferImpl> create_with_size(size_t size, u8 access)
{
auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(size), "KBuffer", false, false);
auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(size), "KBuffer", access, false, false);
ASSERT(region);
return adopt(*new KBufferImpl(region.release_nonnull(), size));
}
static NonnullRefPtr<KBufferImpl> copy(const void* data, size_t size)
static NonnullRefPtr<KBufferImpl> copy(const void* data, size_t size, u8 access)
{
auto buffer = create_with_size(size);
auto buffer = create_with_size(size, access);
memcpy(buffer->data(), data, size);
return buffer;
}
@ -55,14 +55,14 @@ private:
class KBuffer {
public:
static KBuffer create_with_size(size_t size)
static KBuffer create_with_size(size_t size, u8 access = Region::Access::Read | Region::Access::Write)
{
return KBuffer(KBufferImpl::create_with_size(size));
return KBuffer(KBufferImpl::create_with_size(size, access));
}
static KBuffer copy(const void* data, size_t size)
static KBuffer copy(const void* data, size_t size, u8 access = Region::Access::Read | Region::Access::Write)
{
return KBuffer(KBufferImpl::copy(data, size));
return KBuffer(KBufferImpl::copy(data, size, access));
}
u8* data() { return m_impl->data(); }
@ -74,8 +74,8 @@ public:
const KBufferImpl& impl() const { return m_impl; }
KBuffer(const ByteBuffer& buffer)
: m_impl(KBufferImpl::copy(buffer.data(), buffer.size()))
KBuffer(const ByteBuffer& buffer, u8 access = Region::Access::Read | Region::Access::Write)
: m_impl(KBufferImpl::copy(buffer.data(), buffer.size(), access))
{
}

View File

@ -17,7 +17,7 @@ KBuffer KBufferBuilder::build()
}
KBufferBuilder::KBufferBuilder()
: m_buffer(KBuffer::create_with_size(1048576 * 4))
: m_buffer(KBuffer::create_with_size(4 * MB, Region::Access::Read | Region::Access::Write))
{
}

View File

@ -1019,7 +1019,7 @@ void create_signal_trampolines()
InterruptDisabler disabler;
// NOTE: We leak this region.
auto* trampoline_region = MM.allocate_user_accessible_kernel_region(PAGE_SIZE, "Signal trampolines").leak_ptr();
auto* trampoline_region = MM.allocate_user_accessible_kernel_region(PAGE_SIZE, "Signal trampolines", Region::Access::Read | Region::Access::Write | Region::Access::Execute).leak_ptr();
g_return_to_ring3_from_signal_trampoline = trampoline_region->vaddr();
u8* trampoline = (u8*)asm_signal_trampoline;
@ -1035,15 +1035,11 @@ void create_signal_trampolines()
void create_kernel_info_page()
{
auto* info_page_region_for_userspace = MM.allocate_user_accessible_kernel_region(PAGE_SIZE, "Kernel info page").leak_ptr();
auto* info_page_region_for_kernel = MM.allocate_kernel_region_with_vmobject(info_page_region_for_userspace->vmobject(), PAGE_SIZE, "Kernel info page").leak_ptr();
auto* info_page_region_for_userspace = MM.allocate_user_accessible_kernel_region(PAGE_SIZE, "Kernel info page", Region::Access::Read).leak_ptr();
auto* info_page_region_for_kernel = MM.allocate_kernel_region_with_vmobject(info_page_region_for_userspace->vmobject(), PAGE_SIZE, "Kernel info page", Region::Access::Read | Region::Access::Write).leak_ptr();
s_info_page_address_for_userspace = info_page_region_for_userspace->vaddr();
s_info_page_address_for_kernel = info_page_region_for_kernel->vaddr();
memset(s_info_page_address_for_kernel.as_ptr(), 0, PAGE_SIZE);
info_page_region_for_userspace->set_writable(false);
info_page_region_for_userspace->remap();
}
int Process::sys$restore_signal_mask(u32 mask)

View File

@ -84,7 +84,7 @@ Thread::Thread(Process& process)
m_tss.cr3 = m_process.page_directory().cr3();
if (m_process.is_ring0()) {
m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid), false, true);
m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid), Region::Access::Read | Region::Access::Write, false, true);
m_kernel_stack_region->set_stack(true);
m_kernel_stack_base = m_kernel_stack_region->vaddr().get();
m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u;
@ -92,7 +92,7 @@ Thread::Thread(Process& process)
kprintf("Allocated ring0 stack @ %p - %p\n", m_kernel_stack_base, m_kernel_stack_top);
} else {
// Ring3 processes need a separate stack for Ring0.
m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid), false, true);
m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid), Region::Access::Read | Region::Access::Write, false, true);
m_kernel_stack_region->set_stack(true);
m_kernel_stack_base = m_kernel_stack_region->vaddr().get();
m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u;

View File

@ -334,7 +334,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
return region->handle_fault(fault);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, bool user_accessible, bool should_commit)
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool should_commit)
{
InterruptDisabler disabler;
ASSERT(!(size % PAGE_SIZE));
@ -342,9 +342,9 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringVi
ASSERT(range.is_valid());
OwnPtr<Region> region;
if (user_accessible)
region = Region::create_user_accessible(range, name, PROT_READ | PROT_WRITE | PROT_EXEC);
region = Region::create_user_accessible(range, name, access);
else
region = Region::create_kernel_only(range, name, PROT_READ | PROT_WRITE | PROT_EXEC);
region = Region::create_kernel_only(range, name, access);
region->map(kernel_page_directory());
// FIXME: It would be cool if these could zero-fill on demand instead.
if (should_commit)
@ -352,18 +352,18 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringVi
return region;
}
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name)
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access)
{
return allocate_kernel_region(size, name, true);
return allocate_kernel_region(size, name, access, true);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name)
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name, u8 access)
{
InterruptDisabler disabler;
ASSERT(!(size % PAGE_SIZE));
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
ASSERT(range.is_valid());
auto region = make<Region>(range, vmobject, 0, name, PROT_READ | PROT_WRITE | PROT_EXEC);
auto region = make<Region>(range, vmobject, 0, name, access);
region->map(kernel_page_directory());
return region;
}

View File

@ -60,9 +60,9 @@ public:
void map_for_kernel(VirtualAddress, PhysicalAddress, bool cache_disabled = false);
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, bool user_accessible = false, bool should_commit = true);
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name);
OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name);
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true);
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access);
OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access);
unsigned user_physical_pages() const { return m_user_physical_pages; }
unsigned user_physical_pages_used() const { return m_user_physical_pages_used; }

View File

@ -107,7 +107,7 @@ String ELFLoader::symbolicate(u32 address, u32* out_offset) const
SortedSymbol* sorted_symbols = nullptr;
#ifdef KERNEL
if (!m_sorted_symbols_region) {
m_sorted_symbols_region = MM.allocate_kernel_region(PAGE_ROUND_UP(m_image.symbol_count() * sizeof(SortedSymbol)), "Sorted symbols");
m_sorted_symbols_region = MM.allocate_kernel_region(PAGE_ROUND_UP(m_image.symbol_count() * sizeof(SortedSymbol)), "Sorted symbols", Region::Access::Read | Region::Access::Write);
sorted_symbols = (SortedSymbol*)m_sorted_symbols_region->vaddr().as_ptr();
size_t index = 0;
m_image.for_each_symbol([&](auto& symbol) {