From 2d06f6399f636efa615c3deff2e6267c33dc4117 Mon Sep 17 00:00:00 2001 From: Brian Gianforcaro Date: Mon, 8 Aug 2022 21:50:14 -0700 Subject: [PATCH] Kernel: Fix SMP deadlock in MM::allocate_contiguous_physical_pages This deadlock was introduced with the creation of this API. The lock order is such that we always need to take the page directory lock before we ever take the MM lock. This function violated that, as both Region creation and region destruction require the pd and mm locks, but with the mm lock already acquired we deadlocked with SMP mode enabled while other threads were allocating regions. With this change SMP boots to the desktop successfully for me, (and then subsequently has other issues). :^) --- Kernel/Memory/MemoryManager.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index ef6573a9db4..30950d59bdb 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -949,7 +949,7 @@ ErrorOr> MemoryManager::allocate_physical_page(Shoul ErrorOr> MemoryManager::allocate_contiguous_physical_pages(size_t size) { VERIFY(!(size % PAGE_SIZE)); - SpinlockLocker lock(s_mm_lock); + SpinlockLocker mm_lock(s_mm_lock); size_t page_count = ceil_div(size, static_cast(PAGE_SIZE)); // We need to make sure we don't touch pages that we have committed to @@ -959,6 +959,12 @@ ErrorOr> MemoryManager::allocate_contiguous_ph for (auto& physical_region : m_physical_regions) { auto physical_pages = physical_region.take_contiguous_free_pages(page_count); if (!physical_pages.is_empty()) { + // Note: The locking semantics are a bit awkward here, the region destructor can + // deadlock due to invalid lock ordering if we still hold the s_mm_lock when the + // region is constructed or destructed. Since we are now done enumerating anyway, + // it's safe to just unlock here and have MM.allocate_kernel_region do the right thing. + mm_lock.unlock(); + { auto cleanup_region = TRY(MM.allocate_kernel_region(physical_pages[0].paddr(), PAGE_SIZE * page_count, "MemoryManager Allocation Sanitization"sv, Region::Access::Read | Region::Access::Write)); memset(cleanup_region->vaddr().as_ptr(), 0, PAGE_SIZE * page_count);