Kernel: Do less unnecessary work when tearing down process address space

When deleting an entire AddressSpace, we don't need to do TLB flushes
at all (since the entire page directory is going away anyway).

We also don't need to deallocate VM ranges one by one, since the entire
VM range allocator will be deleted anyway.
This commit is contained in:
Andreas Kling 2022-01-12 13:48:43 +01:00
parent a702b6ec42
commit 2323cdd914
Notes: sideshowbarker 2024-07-17 21:05:46 +09:00
3 changed files with 7 additions and 3 deletions

View File

@ -320,7 +320,10 @@ void AddressSpace::dump_regions()
void AddressSpace::remove_all_regions(Badge<Process>)
{
VERIFY(Thread::current() == g_finalizer);
SpinlockLocker lock(m_lock);
for (auto& region : m_regions)
(*region).unmap(Region::ShouldDeallocateVirtualRange::No, ShouldFlushTLB::No);
m_regions.clear();
}

View File

@ -234,7 +234,7 @@ bool Region::remap_vmobject_page(size_t page_index, bool with_flush)
return success;
}
void Region::unmap(ShouldDeallocateVirtualRange deallocate_range)
void Region::unmap(ShouldDeallocateVirtualRange deallocate_range, ShouldFlushTLB should_flush_tlb)
{
if (!m_page_directory)
return;
@ -245,7 +245,8 @@ void Region::unmap(ShouldDeallocateVirtualRange deallocate_range)
auto vaddr = vaddr_from_page_index(i);
MM.release_pte(*m_page_directory, vaddr, i == count - 1 ? MemoryManager::IsLastPTERelease::Yes : MemoryManager::IsLastPTERelease::No);
}
MemoryManager::flush_tlb(m_page_directory, vaddr(), page_count());
if (should_flush_tlb == ShouldFlushTLB::Yes)
MemoryManager::flush_tlb(m_page_directory, vaddr(), page_count());
if (deallocate_range == ShouldDeallocateVirtualRange::Yes) {
m_page_directory->range_allocator().deallocate(range());
}

View File

@ -178,7 +178,7 @@ public:
No,
Yes,
};
void unmap(ShouldDeallocateVirtualRange = ShouldDeallocateVirtualRange::Yes);
void unmap(ShouldDeallocateVirtualRange, ShouldFlushTLB = ShouldFlushTLB::Yes);
void remap();