From 9e55bcb7da81c4c8da05a831d9c1cd4b5ededb9f Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Wed, 25 Dec 2019 22:41:34 +0100 Subject: [PATCH] Kernel: Make kernel memory regions be non-executable by default From now on, you'll have to request executable memory specifically if you want some. --- Kernel/KBuffer.h | 20 ++++++++++---------- Kernel/KBufferBuilder.cpp | 2 +- Kernel/Process.cpp | 10 +++------- Kernel/Thread.cpp | 4 ++-- Kernel/VM/MemoryManager.cpp | 14 +++++++------- Kernel/VM/MemoryManager.h | 6 +++--- Libraries/LibELF/ELFLoader.cpp | 2 +- 7 files changed, 27 insertions(+), 31 deletions(-) diff --git a/Kernel/KBuffer.h b/Kernel/KBuffer.h index 7f259291402..9c0d6fd040f 100644 --- a/Kernel/KBuffer.h +++ b/Kernel/KBuffer.h @@ -17,16 +17,16 @@ class KBufferImpl : public RefCounted { public: - static NonnullRefPtr create_with_size(size_t size) + static NonnullRefPtr create_with_size(size_t size, u8 access) { - auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(size), "KBuffer", false, false); + auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(size), "KBuffer", access, false, false); ASSERT(region); return adopt(*new KBufferImpl(region.release_nonnull(), size)); } - static NonnullRefPtr copy(const void* data, size_t size) + static NonnullRefPtr copy(const void* data, size_t size, u8 access) { - auto buffer = create_with_size(size); + auto buffer = create_with_size(size, access); memcpy(buffer->data(), data, size); return buffer; } @@ -55,14 +55,14 @@ private: class KBuffer { public: - static KBuffer create_with_size(size_t size) + static KBuffer create_with_size(size_t size, u8 access = Region::Access::Read | Region::Access::Write) { - return KBuffer(KBufferImpl::create_with_size(size)); + return KBuffer(KBufferImpl::create_with_size(size, access)); } - static KBuffer copy(const void* data, size_t size) + static KBuffer copy(const void* data, size_t size, u8 access = Region::Access::Read | Region::Access::Write) { - return KBuffer(KBufferImpl::copy(data, size)); + return KBuffer(KBufferImpl::copy(data, size, access)); } u8* data() { return m_impl->data(); } @@ -74,8 +74,8 @@ public: const KBufferImpl& impl() const { return m_impl; } - KBuffer(const ByteBuffer& buffer) - : m_impl(KBufferImpl::copy(buffer.data(), buffer.size())) + KBuffer(const ByteBuffer& buffer, u8 access = Region::Access::Read | Region::Access::Write) + : m_impl(KBufferImpl::copy(buffer.data(), buffer.size(), access)) { } diff --git a/Kernel/KBufferBuilder.cpp b/Kernel/KBufferBuilder.cpp index 0c4c369746b..07edbd2ffe7 100644 --- a/Kernel/KBufferBuilder.cpp +++ b/Kernel/KBufferBuilder.cpp @@ -17,7 +17,7 @@ KBuffer KBufferBuilder::build() } KBufferBuilder::KBufferBuilder() - : m_buffer(KBuffer::create_with_size(1048576 * 4)) + : m_buffer(KBuffer::create_with_size(4 * MB, Region::Access::Read | Region::Access::Write)) { } diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index 3a106835120..12325545ee0 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -1019,7 +1019,7 @@ void create_signal_trampolines() InterruptDisabler disabler; // NOTE: We leak this region. - auto* trampoline_region = MM.allocate_user_accessible_kernel_region(PAGE_SIZE, "Signal trampolines").leak_ptr(); + auto* trampoline_region = MM.allocate_user_accessible_kernel_region(PAGE_SIZE, "Signal trampolines", Region::Access::Read | Region::Access::Write | Region::Access::Execute).leak_ptr(); g_return_to_ring3_from_signal_trampoline = trampoline_region->vaddr(); u8* trampoline = (u8*)asm_signal_trampoline; @@ -1035,15 +1035,11 @@ void create_signal_trampolines() void create_kernel_info_page() { - auto* info_page_region_for_userspace = MM.allocate_user_accessible_kernel_region(PAGE_SIZE, "Kernel info page").leak_ptr(); - auto* info_page_region_for_kernel = MM.allocate_kernel_region_with_vmobject(info_page_region_for_userspace->vmobject(), PAGE_SIZE, "Kernel info page").leak_ptr(); + auto* info_page_region_for_userspace = MM.allocate_user_accessible_kernel_region(PAGE_SIZE, "Kernel info page", Region::Access::Read).leak_ptr(); + auto* info_page_region_for_kernel = MM.allocate_kernel_region_with_vmobject(info_page_region_for_userspace->vmobject(), PAGE_SIZE, "Kernel info page", Region::Access::Read | Region::Access::Write).leak_ptr(); s_info_page_address_for_userspace = info_page_region_for_userspace->vaddr(); s_info_page_address_for_kernel = info_page_region_for_kernel->vaddr(); - memset(s_info_page_address_for_kernel.as_ptr(), 0, PAGE_SIZE); - - info_page_region_for_userspace->set_writable(false); - info_page_region_for_userspace->remap(); } int Process::sys$restore_signal_mask(u32 mask) diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 81a6612f560..92017524531 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -84,7 +84,7 @@ Thread::Thread(Process& process) m_tss.cr3 = m_process.page_directory().cr3(); if (m_process.is_ring0()) { - m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid), false, true); + m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid), Region::Access::Read | Region::Access::Write, false, true); m_kernel_stack_region->set_stack(true); m_kernel_stack_base = m_kernel_stack_region->vaddr().get(); m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u; @@ -92,7 +92,7 @@ Thread::Thread(Process& process) kprintf("Allocated ring0 stack @ %p - %p\n", m_kernel_stack_base, m_kernel_stack_top); } else { // Ring3 processes need a separate stack for Ring0. - m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid), false, true); + m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid), Region::Access::Read | Region::Access::Write, false, true); m_kernel_stack_region->set_stack(true); m_kernel_stack_base = m_kernel_stack_region->vaddr().get(); m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u; diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 2a34ca13706..92eaf6a2cc3 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -334,7 +334,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault) return region->handle_fault(fault); } -OwnPtr MemoryManager::allocate_kernel_region(size_t size, const StringView& name, bool user_accessible, bool should_commit) +OwnPtr MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool should_commit) { InterruptDisabler disabler; ASSERT(!(size % PAGE_SIZE)); @@ -342,9 +342,9 @@ OwnPtr MemoryManager::allocate_kernel_region(size_t size, const StringVi ASSERT(range.is_valid()); OwnPtr region; if (user_accessible) - region = Region::create_user_accessible(range, name, PROT_READ | PROT_WRITE | PROT_EXEC); + region = Region::create_user_accessible(range, name, access); else - region = Region::create_kernel_only(range, name, PROT_READ | PROT_WRITE | PROT_EXEC); + region = Region::create_kernel_only(range, name, access); region->map(kernel_page_directory()); // FIXME: It would be cool if these could zero-fill on demand instead. if (should_commit) @@ -352,18 +352,18 @@ OwnPtr MemoryManager::allocate_kernel_region(size_t size, const StringVi return region; } -OwnPtr MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name) +OwnPtr MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access) { - return allocate_kernel_region(size, name, true); + return allocate_kernel_region(size, name, access, true); } -OwnPtr MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name) +OwnPtr MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name, u8 access) { InterruptDisabler disabler; ASSERT(!(size % PAGE_SIZE)); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); ASSERT(range.is_valid()); - auto region = make(range, vmobject, 0, name, PROT_READ | PROT_WRITE | PROT_EXEC); + auto region = make(range, vmobject, 0, name, access); region->map(kernel_page_directory()); return region; } diff --git a/Kernel/VM/MemoryManager.h b/Kernel/VM/MemoryManager.h index ef00bee9da7..53596b35893 100644 --- a/Kernel/VM/MemoryManager.h +++ b/Kernel/VM/MemoryManager.h @@ -60,9 +60,9 @@ public: void map_for_kernel(VirtualAddress, PhysicalAddress, bool cache_disabled = false); - OwnPtr allocate_kernel_region(size_t, const StringView& name, bool user_accessible = false, bool should_commit = true); - OwnPtr allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name); - OwnPtr allocate_user_accessible_kernel_region(size_t, const StringView& name); + OwnPtr allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true); + OwnPtr allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access); + OwnPtr allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access); unsigned user_physical_pages() const { return m_user_physical_pages; } unsigned user_physical_pages_used() const { return m_user_physical_pages_used; } diff --git a/Libraries/LibELF/ELFLoader.cpp b/Libraries/LibELF/ELFLoader.cpp index 363590a3777..ab8d38fba3e 100644 --- a/Libraries/LibELF/ELFLoader.cpp +++ b/Libraries/LibELF/ELFLoader.cpp @@ -107,7 +107,7 @@ String ELFLoader::symbolicate(u32 address, u32* out_offset) const SortedSymbol* sorted_symbols = nullptr; #ifdef KERNEL if (!m_sorted_symbols_region) { - m_sorted_symbols_region = MM.allocate_kernel_region(PAGE_ROUND_UP(m_image.symbol_count() * sizeof(SortedSymbol)), "Sorted symbols"); + m_sorted_symbols_region = MM.allocate_kernel_region(PAGE_ROUND_UP(m_image.symbol_count() * sizeof(SortedSymbol)), "Sorted symbols", Region::Access::Read | Region::Access::Write); sorted_symbols = (SortedSymbol*)m_sorted_symbols_region->vaddr().as_ptr(); size_t index = 0; m_image.for_each_symbol([&](auto& symbol) {