mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-01-03 00:36:52 +03:00
Kernel/SMP: Always take PageDirectory lock before the MemoryManager lock
This prevents deadlocking due to inconsistent acquisition order.
This commit is contained in:
parent
d21b8f9013
commit
00bbbdeda6
Notes:
sideshowbarker
2024-07-18 07:11:44 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/00bbbdeda6d
@ -105,8 +105,8 @@ UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
|
||||
|
||||
UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
|
||||
{
|
||||
ScopedSpinLock mm_lock(s_mm_lock);
|
||||
ScopedSpinLock page_lock(kernel_page_directory().get_lock());
|
||||
ScopedSpinLock mm_lock(s_mm_lock);
|
||||
// Disable writing to the .ro_after_init section
|
||||
for (auto i = (FlatPtr)&start_of_ro_after_init; i < (FlatPtr)&end_of_ro_after_init; i += PAGE_SIZE) {
|
||||
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
|
||||
@ -117,8 +117,8 @@ UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
|
||||
|
||||
void MemoryManager::unmap_text_after_init()
|
||||
{
|
||||
ScopedSpinLock mm_lock(s_mm_lock);
|
||||
ScopedSpinLock page_lock(kernel_page_directory().get_lock());
|
||||
ScopedSpinLock mm_lock(s_mm_lock);
|
||||
|
||||
auto start = page_round_down((FlatPtr)&start_of_unmap_after_init);
|
||||
auto end = page_round_up((FlatPtr)&end_of_unmap_after_init);
|
||||
@ -703,7 +703,7 @@ PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
|
||||
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
|
||||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
ScopedSpinLock lock(kernel_page_directory().get_lock());
|
||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
return {};
|
||||
@ -721,7 +721,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, StringView nam
|
||||
auto vm_object = AnonymousVMObject::try_create_with_size(size, strategy);
|
||||
if (!vm_object)
|
||||
return {};
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
ScopedSpinLock lock(kernel_page_directory().get_lock());
|
||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
return {};
|
||||
@ -734,7 +734,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
|
||||
if (!vm_object)
|
||||
return {};
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
ScopedSpinLock lock(kernel_page_directory().get_lock());
|
||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
return {};
|
||||
@ -743,7 +743,6 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
|
||||
|
||||
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
|
||||
{
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
auto region = Region::try_create_kernel_only(range, vmobject, 0, KString::try_create(name), access, cacheable);
|
||||
if (region)
|
||||
region->map(kernel_page_directory());
|
||||
@ -753,7 +752,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange
|
||||
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
|
||||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
ScopedSpinLock lock(kernel_page_directory().get_lock());
|
||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
return {};
|
||||
@ -1109,8 +1108,8 @@ void MemoryManager::dump_kernel_regions()
|
||||
|
||||
void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable)
|
||||
{
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
ScopedSpinLock page_lock(kernel_page_directory().get_lock());
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
auto* pte = ensure_pte(kernel_page_directory(), vaddr);
|
||||
VERIFY(pte);
|
||||
if (pte->is_writable() == writable)
|
||||
|
@ -236,10 +236,10 @@ bool Region::remap_vmobject_page(size_t page_index, bool with_flush)
|
||||
|
||||
void Region::unmap(ShouldDeallocateVirtualRange deallocate_range)
|
||||
{
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
if (!m_page_directory)
|
||||
return;
|
||||
ScopedSpinLock page_lock(m_page_directory->get_lock());
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
size_t count = page_count();
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
auto vaddr = vaddr_from_page_index(i);
|
||||
@ -261,8 +261,8 @@ void Region::set_page_directory(PageDirectory& page_directory)
|
||||
|
||||
bool Region::map(PageDirectory& page_directory, ShouldFlushTLB should_flush_tlb)
|
||||
{
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
ScopedSpinLock page_lock(page_directory.get_lock());
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
|
||||
// FIXME: Find a better place for this sanity check(?)
|
||||
if (is_user() && !is_shared()) {
|
||||
|
Loading…
Reference in New Issue
Block a user