ladybird/Kernel/VM/PageDirectory.cpp
Andreas Kling 52deb09382 Kernel: Enable PAE (Physical Address Extension)
Introduce one more (CPU) indirection layer in the paging code: the page
directory pointer table (PDPT). Each PageDirectory now has 4 separate
PageDirectoryEntry arrays, governing 1 GB of VM each.

A really neat side-effect of this is that we can now share the physical
page containing the >=3GB kernel-only address space metadata between
all processes, instead of lazily cloning it on page faults.

This will give us access to the NX (No eXecute) bit, allowing us to
prevent execution of memory that's not supposed to be executed.
2019-12-25 13:35:57 +01:00

89 lines
3.4 KiB
C++

#include <Kernel/Process.h>
#include <Kernel/Thread.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/PageDirectory.h>
static const u32 userspace_range_base = 0x01000000;
static const u32 kernelspace_range_base = 0xc0000000;
static HashMap<u32, PageDirectory*>& cr3_map()
{
ASSERT_INTERRUPTS_DISABLED();
static HashMap<u32, PageDirectory*>* map;
if (!map)
map = new HashMap<u32, PageDirectory*>;
return *map;
}
RefPtr<PageDirectory> PageDirectory::find_by_cr3(u32 cr3)
{
InterruptDisabler disabler;
return cr3_map().get(cr3).value_or({});
}
PageDirectory::PageDirectory(PhysicalAddress paddr)
: m_range_allocator(VirtualAddress(0xc0000000), 0x3f000000)
{
m_directory_table = PhysicalPage::create(paddr, true, false);
m_directory_pages[0] = PhysicalPage::create(paddr.offset(PAGE_SIZE * 1), true, false);
m_directory_pages[1] = PhysicalPage::create(paddr.offset(PAGE_SIZE * 2), true, false);
m_directory_pages[2] = PhysicalPage::create(paddr.offset(PAGE_SIZE * 3), true, false);
m_directory_pages[3] = PhysicalPage::create(paddr.offset(PAGE_SIZE * 4), true, false);
table().raw[0] = (u64)m_directory_pages[0]->paddr().as_ptr() | 1;
table().raw[1] = (u64)m_directory_pages[1]->paddr().as_ptr() | 1;
table().raw[2] = (u64)m_directory_pages[2]->paddr().as_ptr() | 1;
table().raw[3] = (u64)m_directory_pages[3]->paddr().as_ptr() | 1;
InterruptDisabler disabler;
cr3_map().set(cr3(), this);
}
PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_range_allocator)
: m_process(&process)
, m_range_allocator(parent_range_allocator ? RangeAllocator(*parent_range_allocator) : RangeAllocator(VirtualAddress(userspace_range_base), kernelspace_range_base - userspace_range_base))
{
// Set up a userspace page directory
m_directory_table = MM.allocate_supervisor_physical_page();
m_directory_pages[0] = MM.allocate_supervisor_physical_page();
m_directory_pages[1] = MM.allocate_supervisor_physical_page();
m_directory_pages[2] = MM.allocate_supervisor_physical_page();
// Share the top 1 GB of kernel-only mappings (>=3GB or >=0xc0000000)
m_directory_pages[3] = MM.kernel_page_directory().m_directory_pages[3];
table().raw[0] = (u64)m_directory_pages[0]->paddr().as_ptr() | 1;
table().raw[1] = (u64)m_directory_pages[1]->paddr().as_ptr() | 1;
table().raw[2] = (u64)m_directory_pages[2]->paddr().as_ptr() | 1;
table().raw[3] = (u64)m_directory_pages[3]->paddr().as_ptr() | 1;
// Clone bottom 8 MB of mappings from kernel_page_directory
table().directory(0)[0].copy_from({}, MM.kernel_page_directory().table().directory(0)[0]);
table().directory(0)[1].copy_from({}, MM.kernel_page_directory().table().directory(0)[1]);
table().directory(0)[2].copy_from({}, MM.kernel_page_directory().table().directory(0)[2]);
table().directory(0)[3].copy_from({}, MM.kernel_page_directory().table().directory(0)[3]);
InterruptDisabler disabler;
cr3_map().set(cr3(), this);
}
PageDirectory::~PageDirectory()
{
#ifdef MM_DEBUG
dbgprintf("MM: ~PageDirectory K%x\n", this);
#endif
InterruptDisabler disabler;
cr3_map().remove(cr3());
}
void PageDirectory::flush(VirtualAddress vaddr)
{
#ifdef MM_DEBUG
dbgprintf("MM: Flush page V%p\n", vaddr.get());
#endif
if (!current)
return;
if (this == &MM.kernel_page_directory() || &current->process().page_directory() == this)
MM.flush_tlb(vaddr);
}