Kernel: Stop taking MM lock while using PD/PT quickmaps

This is no longer required as these quickmaps are now per-CPU. :^)
This commit is contained in:
Andreas Kling 2022-08-22 14:56:26 +02:00
parent a838fdfd88
commit c8375c51ff
Notes: sideshowbarker 2024-07-17 08:03:45 +09:00
3 changed files with 0 additions and 12 deletions

View File

@ -334,7 +334,6 @@ struct KmallocGlobalData {
auto cpu_supports_nx = Processor::current().has_nx();
SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
SpinlockLocker mm_locker(Memory::s_mm_lock);
for (auto vaddr = new_subheap_base; !physical_pages.is_empty(); vaddr = vaddr.offset(PAGE_SIZE)) {
// FIXME: We currently leak physical memory when mapping it into the kmalloc heap.
@ -367,7 +366,6 @@ struct KmallocGlobalData {
// Make sure the entire kmalloc VM range is backed by page tables.
// This avoids having to deal with lazy page table allocation during heap expansion.
SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
SpinlockLocker mm_locker(Memory::s_mm_lock);
for (auto vaddr = reserved_region->range().base(); vaddr < reserved_region->range().end(); vaddr = vaddr.offset(PAGE_SIZE)) {
MM.ensure_pte(MM.kernel_page_directory(), vaddr);
}

View File

@ -140,7 +140,6 @@ UNMAP_AFTER_INIT void MemoryManager::unmap_prekernel()
UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
{
SpinlockLocker page_lock(kernel_page_directory().get_lock());
SpinlockLocker mm_lock(s_mm_lock);
// Disable writing to the .ro_after_init section
for (auto i = (FlatPtr)&start_of_ro_after_init; i < (FlatPtr)&end_of_ro_after_init; i += PAGE_SIZE) {
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
@ -152,7 +151,6 @@ UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
void MemoryManager::unmap_text_after_init()
{
SpinlockLocker page_lock(kernel_page_directory().get_lock());
SpinlockLocker mm_lock(s_mm_lock);
auto start = page_round_down((FlatPtr)&start_of_unmap_after_init);
auto end = page_round_up((FlatPtr)&end_of_unmap_after_init).release_value_but_fixme_should_propagate_errors();
@ -169,7 +167,6 @@ void MemoryManager::unmap_text_after_init()
UNMAP_AFTER_INIT void MemoryManager::protect_ksyms_after_init()
{
SpinlockLocker mm_lock(s_mm_lock);
SpinlockLocker page_lock(kernel_page_directory().get_lock());
auto start = page_round_down((FlatPtr)start_of_kernel_ksyms);
@ -543,7 +540,6 @@ PhysicalAddress MemoryManager::get_physical_address(PhysicalPage const& physical
PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress vaddr)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(s_mm_lock.is_locked_by_current_processor());
VERIFY(page_directory.get_lock().is_locked_by_current_processor());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
@ -560,7 +556,6 @@ PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress
PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(s_mm_lock.is_locked_by_current_processor());
VERIFY(page_directory.get_lock().is_locked_by_current_processor());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
@ -602,7 +597,6 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, IsLastPTERelease is_last_pte_release)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(s_mm_lock.is_locked_by_current_processor());
VERIFY(page_directory.get_lock().is_locked_by_current_processor());
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
@ -1038,7 +1032,6 @@ void MemoryManager::flush_tlb(PageDirectory const* page_directory, VirtualAddres
PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(s_mm_lock.is_locked_by_current_processor());
VirtualAddress vaddr(KERNEL_QUICKMAP_PD_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
size_t pte_index = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
@ -1058,7 +1051,6 @@ PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t
PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(s_mm_lock.is_locked_by_current_processor());
VirtualAddress vaddr(KERNEL_QUICKMAP_PT_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
size_t pte_index = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
@ -1162,7 +1154,6 @@ void MemoryManager::dump_kernel_regions()
void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable)
{
SpinlockLocker page_lock(kernel_page_directory().get_lock());
SpinlockLocker lock(s_mm_lock);
auto* pte = ensure_pte(kernel_page_directory(), vaddr);
VERIFY(pte);
if (pte->is_writable() == writable)

View File

@ -213,7 +213,6 @@ bool Region::map_individual_page_impl(size_t page_index, LockRefPtr<PhysicalPage
PANIC("About to map mmap'ed page at a kernel address");
}
SpinlockLocker lock(s_mm_lock);
auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr);
if (!pte)
return false;