Kernel: Unmap Prekernel pages after they are no longer needed

The Prekernel's memory is only accessed until MemoryManager has been
initialized. Keeping them around afterwards is both unnecessary and bad,
as it prevents the userland from using the 0x100000-0x155000 virtual
address range.

Co-authored-by: Idan Horowitz <idan.horowitz@gmail.com>
This commit is contained in:
Daniel Bertalan 2021-12-19 01:15:12 +01:00 committed by Brian Gianforcaro
parent 2f1b4b8a81
commit 4fc28bfe02
Notes: sideshowbarker 2024-07-17 22:23:21 +09:00
4 changed files with 31 additions and 8 deletions

View File

@ -106,6 +106,19 @@ UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
}
}
UNMAP_AFTER_INIT void MemoryManager::unmap_prekernel()
{
SpinlockLocker page_lock(kernel_page_directory().get_lock());
SpinlockLocker mm_lock(s_mm_lock);
auto start = start_of_prekernel_image.page_base().get();
auto end = end_of_prekernel_image.page_base().get();
for (auto i = start; i <= end; i += PAGE_SIZE)
release_pte(kernel_page_directory(), VirtualAddress(i), i == end ? IsLastPTERelease::Yes : IsLastPTERelease::No, UnsafeIgnoreMissingPageTable::Yes);
flush_tlb(&kernel_page_directory(), VirtualAddress(start), (end - start) / PAGE_SIZE);
}
UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
{
SpinlockLocker page_lock(kernel_page_directory().get_lock());
@ -200,7 +213,6 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
// Register used memory regions that we know of.
m_used_memory_ranges.ensure_capacity(4);
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Prekernel, start_of_prekernel_image, end_of_prekernel_image });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image))) });
if (multiboot_flags & 0x4) {
@ -570,7 +582,7 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
}
void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, bool is_last_release)
void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, IsLastPTERelease is_last_pte_release, UnsafeIgnoreMissingPageTable unsafe_ignore_missing_page_table)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(s_mm_lock.is_locked_by_current_processor());
@ -586,7 +598,7 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va
auto& pte = page_table[page_table_index];
pte.clear();
if (is_last_release || page_table_index == 0x1ff) {
if (is_last_pte_release == IsLastPTERelease::Yes || page_table_index == 0x1ff) {
// If this is the last PTE in a region or the last PTE in a page table then
// check if we can also release the page table
bool all_clear = true;
@ -600,7 +612,7 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va
pde.clear();
auto result = page_directory.m_page_tables.remove(vaddr.get() & ~0x1fffff);
VERIFY(result);
VERIFY(unsafe_ignore_missing_page_table == UnsafeIgnoreMissingPageTable::Yes || result);
}
}
}

View File

@ -49,19 +49,19 @@ inline FlatPtr virtual_to_low_physical(FlatPtr virtual_)
enum class UsedMemoryRangeType {
LowMemory = 0,
Prekernel,
Kernel,
BootModule,
PhysicalPages,
__Count
};
static constexpr StringView UserMemoryRangeTypeNames[] {
"Low memory",
"Prekernel",
"Kernel",
"Boot module",
"Physical Pages"
};
static_assert(array_size(UserMemoryRangeTypeNames) == to_underlying(UsedMemoryRangeType::__Count));
struct UsedMemoryRange {
UsedMemoryRangeType type {};
@ -159,6 +159,7 @@ public:
void set_page_writable_direct(VirtualAddress, bool);
void protect_readonly_after_init_memory();
void unmap_prekernel();
void unmap_text_after_init();
void protect_ksyms_after_init();
@ -276,7 +277,15 @@ private:
PageTableEntry* pte(PageDirectory&, VirtualAddress);
PageTableEntry* ensure_pte(PageDirectory&, VirtualAddress);
void release_pte(PageDirectory&, VirtualAddress, bool);
enum class IsLastPTERelease {
Yes,
No
};
enum class UnsafeIgnoreMissingPageTable {
Yes,
No
};
void release_pte(PageDirectory&, VirtualAddress, IsLastPTERelease, UnsafeIgnoreMissingPageTable = UnsafeIgnoreMissingPageTable::No);
RefPtr<PageDirectory> m_kernel_page_directory;

View File

@ -243,7 +243,7 @@ void Region::unmap(ShouldDeallocateVirtualRange deallocate_range)
size_t count = page_count();
for (size_t i = 0; i < count; ++i) {
auto vaddr = vaddr_from_page_index(i);
MM.release_pte(*m_page_directory, vaddr, i == count - 1);
MM.release_pte(*m_page_directory, vaddr, i == count - 1 ? MemoryManager::IsLastPTERelease::Yes : MemoryManager::IsLastPTERelease::No);
}
MemoryManager::flush_tlb(m_page_directory, vaddr(), page_count());
if (deallocate_range == ShouldDeallocateVirtualRange::Yes) {

View File

@ -193,6 +193,8 @@ extern "C" [[noreturn]] UNMAP_AFTER_INIT void init(BootInfo const& boot_info)
CommandLine::initialize();
Memory::MemoryManager::initialize(0);
MM.unmap_prekernel();
// Ensure that the safemem sections are not empty. This could happen if the linker accidentally discards the sections.
VERIFY(+start_of_safemem_text != +end_of_safemem_text);
VERIFY(+start_of_safemem_atomic_text != +end_of_safemem_atomic_text);