Kernel: Remove global MM lock in favor of SpinlockProtected

Globally shared MemoryManager state is now kept in a GlobalData struct
and wrapped in SpinlockProtected.

A small set of members are left outside the GlobalData struct as they
are only set during boot initialization, and then remain constant.
This allows us to access those members without taking any locks.
This commit is contained in:
Andreas Kling 2022-08-25 16:46:13 +02:00
parent 36225c0ae7
commit a3b2b20782
Notes: sideshowbarker 2024-07-17 07:44:16 +09:00
6 changed files with 470 additions and 456 deletions

View File

@ -61,10 +61,6 @@ ErrorOr<FlatPtr> page_round_up(FlatPtr x)
// the memory manager to be initialized twice!
static MemoryManager* s_the;
// The MM lock protects:
// - all data members of MemoryManager
RecursiveSpinlock s_mm_lock { LockRank::MemoryManager };
MemoryManager& MemoryManager::the()
{
return *s_the;
@ -81,12 +77,16 @@ static UNMAP_AFTER_INIT VirtualRange kernel_virtual_range()
return VirtualRange { VirtualAddress(kernel_range_start), KERNEL_PD_END - kernel_range_start };
}
MemoryManager::GlobalData::GlobalData()
: region_tree(kernel_virtual_range())
{
}
UNMAP_AFTER_INIT MemoryManager::MemoryManager()
: m_region_tree(LockRank::None, kernel_virtual_range())
: m_global_data(LockRank::None)
{
s_the = this;
SpinlockLocker lock(s_mm_lock);
parse_memory_map();
activate_kernel_page_directory(kernel_page_directory());
protect_kernel_image();
@ -127,7 +127,6 @@ UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
UNMAP_AFTER_INIT void MemoryManager::unmap_prekernel()
{
SpinlockLocker page_lock(kernel_page_directory().get_lock());
SpinlockLocker mm_lock(s_mm_lock);
auto start = start_of_prekernel_image.page_base().get();
auto end = end_of_prekernel_image.page_base().get();
@ -183,24 +182,27 @@ UNMAP_AFTER_INIT void MemoryManager::protect_ksyms_after_init()
IterationDecision MemoryManager::for_each_physical_memory_range(Function<IterationDecision(PhysicalMemoryRange const&)> callback)
{
VERIFY(!m_physical_memory_ranges.is_empty());
for (auto& current_range : m_physical_memory_ranges) {
return m_global_data.with([&](auto& global_data) {
VERIFY(!global_data.physical_memory_ranges.is_empty());
for (auto& current_range : global_data.physical_memory_ranges) {
IterationDecision decision = callback(current_range);
if (decision != IterationDecision::Continue)
return decision;
}
return IterationDecision::Continue;
});
}
UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges()
{
VERIFY(!m_physical_memory_ranges.is_empty());
m_global_data.with([&](auto& global_data) {
VERIFY(!global_data.physical_memory_ranges.is_empty());
ContiguousReservedMemoryRange range;
for (auto& current_range : m_physical_memory_ranges) {
for (auto& current_range : global_data.physical_memory_ranges) {
if (current_range.type != PhysicalMemoryRangeType::Reserved) {
if (range.start.is_null())
continue;
m_reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, current_range.start.get() - range.start.get() });
global_data.reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, current_range.start.get() - range.start.get() });
range.start.set((FlatPtr) nullptr);
continue;
}
@ -209,11 +211,12 @@ UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges()
}
range.start = current_range.start;
}
if (m_physical_memory_ranges.last().type != PhysicalMemoryRangeType::Reserved)
if (global_data.physical_memory_ranges.last().type != PhysicalMemoryRangeType::Reserved)
return;
if (range.start.is_null())
return;
m_reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, m_physical_memory_ranges.last().start.get() + m_physical_memory_ranges.last().length - range.start.get() });
global_data.reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, global_data.physical_memory_ranges.last().start.get() + global_data.physical_memory_ranges.last().length - range.start.get() });
});
}
bool MemoryManager::is_allowed_to_read_physical_memory_for_userspace(PhysicalAddress start_address, size_t read_length) const
@ -223,7 +226,9 @@ bool MemoryManager::is_allowed_to_read_physical_memory_for_userspace(PhysicalAdd
if (start_address.offset_addition_would_overflow(read_length))
return false;
auto end_address = start_address.offset(read_length);
for (auto const& current_range : m_reserved_memory_ranges) {
return m_global_data.with([&](auto& global_data) {
for (auto const& current_range : global_data.reserved_memory_ranges) {
if (current_range.start > start_address)
continue;
if (current_range.start.offset(current_range.length) < end_address)
@ -231,21 +236,23 @@ bool MemoryManager::is_allowed_to_read_physical_memory_for_userspace(PhysicalAdd
return true;
}
return false;
});
}
UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
{
// Register used memory regions that we know of.
m_used_memory_ranges.ensure_capacity(4);
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image)).release_value_but_fixme_should_propagate_errors()) });
m_global_data.with([&](auto& global_data) {
global_data.used_memory_ranges.ensure_capacity(4);
global_data.used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
global_data.used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image)).release_value_but_fixme_should_propagate_errors()) });
if (multiboot_flags & 0x4) {
auto* bootmods_start = multiboot_copy_boot_modules_array;
auto* bootmods_end = bootmods_start + multiboot_copy_boot_modules_count;
for (auto* bootmod = bootmods_start; bootmod < bootmods_end; bootmod++) {
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::BootModule, PhysicalAddress(bootmod->start), PhysicalAddress(bootmod->end) });
global_data.used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::BootModule, PhysicalAddress(bootmod->start), PhysicalAddress(bootmod->end) });
}
}
@ -274,24 +281,24 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
auto start_address = PhysicalAddress(address);
switch (mmap->type) {
case (MULTIBOOT_MEMORY_AVAILABLE):
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, start_address, length });
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, start_address, length });
break;
case (MULTIBOOT_MEMORY_RESERVED):
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Reserved, start_address, length });
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Reserved, start_address, length });
break;
case (MULTIBOOT_MEMORY_ACPI_RECLAIMABLE):
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_Reclaimable, start_address, length });
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_Reclaimable, start_address, length });
break;
case (MULTIBOOT_MEMORY_NVS):
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_NVS, start_address, length });
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_NVS, start_address, length });
break;
case (MULTIBOOT_MEMORY_BADRAM):
dmesgln("MM: Warning, detected bad memory range!");
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::BadMemory, start_address, length });
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::BadMemory, start_address, length });
break;
default:
dbgln("MM: Unknown range!");
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Unknown, start_address, length });
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Unknown, start_address, length });
break;
}
@ -320,7 +327,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
// Skip used memory ranges.
bool should_skip = false;
for (auto& used_range : m_used_memory_ranges) {
for (auto& used_range : global_data.used_memory_ranges) {
if (addr.get() >= used_range.start.get() && addr.get() <= used_range.end.get()) {
should_skip = true;
break;
@ -341,43 +348,45 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
}
for (auto& range : contiguous_physical_ranges) {
m_physical_regions.append(PhysicalRegion::try_create(range.lower, range.upper).release_nonnull());
global_data.physical_regions.append(PhysicalRegion::try_create(range.lower, range.upper).release_nonnull());
}
for (auto& region : m_physical_regions)
m_system_memory_info.physical_pages += region.size();
for (auto& region : global_data.physical_regions)
global_data.system_memory_info.physical_pages += region.size();
register_reserved_ranges();
for (auto& range : m_reserved_memory_ranges) {
for (auto& range : global_data.reserved_memory_ranges) {
dmesgln("MM: Contiguous reserved range from {}, length is {}", range.start, range.length);
}
initialize_physical_pages();
VERIFY(m_system_memory_info.physical_pages > 0);
VERIFY(global_data.system_memory_info.physical_pages > 0);
// We start out with no committed pages
m_system_memory_info.physical_pages_uncommitted = m_system_memory_info.physical_pages;
global_data.system_memory_info.physical_pages_uncommitted = global_data.system_memory_info.physical_pages;
for (auto& used_range : m_used_memory_ranges) {
for (auto& used_range : global_data.used_memory_ranges) {
dmesgln("MM: {} range @ {} - {} (size {:#x})", UserMemoryRangeTypeNames[to_underlying(used_range.type)], used_range.start, used_range.end.offset(-1), used_range.end.as_ptr() - used_range.start.as_ptr());
}
for (auto& region : m_physical_regions) {
for (auto& region : global_data.physical_regions) {
dmesgln("MM: User physical region: {} - {} (size {:#x})", region.lower(), region.upper().offset(-1), PAGE_SIZE * region.size());
region.initialize_zones();
}
});
}
UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
{
m_global_data.with([&](auto& global_data) {
// We assume that the physical page range is contiguous and doesn't contain huge gaps!
PhysicalAddress highest_physical_address;
for (auto& range : m_used_memory_ranges) {
for (auto& range : global_data.used_memory_ranges) {
if (range.end.get() > highest_physical_address.get())
highest_physical_address = range.end;
}
for (auto& region : m_physical_memory_ranges) {
for (auto& region : global_data.physical_memory_ranges) {
auto range_end = PhysicalAddress(region.start).offset(region.length);
if (range_end.get() > highest_physical_address.get())
highest_physical_address = range_end;
@ -401,8 +410,8 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
// Now that we know how much memory we need for a contiguous array of PhysicalPage instances, find a memory region that can fit it
PhysicalRegion* found_region { nullptr };
Optional<size_t> found_region_index;
for (size_t i = 0; i < m_physical_regions.size(); ++i) {
auto& region = m_physical_regions[i];
for (size_t i = 0; i < global_data.physical_regions.size(); ++i) {
auto& region = global_data.physical_regions[i];
if (region.size() >= physical_page_array_pages_and_page_tables_count) {
found_region = &region;
found_region_index = i;
@ -415,16 +424,16 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
VERIFY_NOT_REACHED();
}
VERIFY(m_system_memory_info.physical_pages >= physical_page_array_pages_and_page_tables_count);
m_system_memory_info.physical_pages -= physical_page_array_pages_and_page_tables_count;
VERIFY(global_data.system_memory_info.physical_pages >= physical_page_array_pages_and_page_tables_count);
global_data.system_memory_info.physical_pages -= physical_page_array_pages_and_page_tables_count;
if (found_region->size() == physical_page_array_pages_and_page_tables_count) {
// We're stealing the entire region
m_physical_pages_region = m_physical_regions.take(*found_region_index);
global_data.physical_pages_region = global_data.physical_regions.take(*found_region_index);
} else {
m_physical_pages_region = found_region->try_take_pages_from_beginning(physical_page_array_pages_and_page_tables_count);
global_data.physical_pages_region = found_region->try_take_pages_from_beginning(physical_page_array_pages_and_page_tables_count);
}
m_used_memory_ranges.append({ UsedMemoryRangeType::PhysicalPages, m_physical_pages_region->lower(), m_physical_pages_region->upper() });
global_data.used_memory_ranges.append({ UsedMemoryRangeType::PhysicalPages, global_data.physical_pages_region->lower(), global_data.physical_pages_region->upper() });
// Create the bare page directory. This is not a fully constructed page directory and merely contains the allocators!
m_kernel_page_directory = PageDirectory::must_create_kernel_page_directory();
@ -433,23 +442,22 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
// Carve out the whole page directory covering the kernel image to make MemoryManager::initialize_physical_pages() happy
FlatPtr start_of_range = ((FlatPtr)start_of_kernel_image & ~(FlatPtr)0x1fffff);
FlatPtr end_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
MUST(m_region_tree.with([&](auto& region_tree) { return region_tree.place_specifically(*MUST(Region::create_unbacked()).leak_ptr(), VirtualRange { VirtualAddress(start_of_range), end_of_range - start_of_range }); }));
MUST(global_data.region_tree.place_specifically(*MUST(Region::create_unbacked()).leak_ptr(), VirtualRange { VirtualAddress(start_of_range), end_of_range - start_of_range }));
}
// Allocate a virtual address range for our array
// This looks awkward, but it basically creates a dummy region to occupy the address range permanently.
auto& region = *MUST(Region::create_unbacked()).leak_ptr();
MUST(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(region, RandomizeVirtualAddress::No, physical_page_array_pages * PAGE_SIZE); }));
MUST(global_data.region_tree.place_anywhere(region, RandomizeVirtualAddress::No, physical_page_array_pages * PAGE_SIZE));
auto range = region.range();
// Now that we have our special m_physical_pages_region region with enough pages to hold the entire array
// try to map the entire region into kernel space so we always have it
// We can't use ensure_pte here because it would try to allocate a PhysicalPage and we don't have the array
// mapped yet so we can't create them
SpinlockLocker lock(s_mm_lock);
// Create page tables at the beginning of m_physical_pages_region, followed by the PhysicalPageEntry array
auto page_tables_base = m_physical_pages_region->lower();
auto page_tables_base = global_data.physical_pages_region->lower();
auto physical_page_array_base = page_tables_base.offset(needed_page_table_count * PAGE_SIZE);
auto physical_page_array_current_page = physical_page_array_base.get();
auto virtual_page_array_base = range.base().get();
@ -518,11 +526,11 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
}
dmesgln("MM: Physical page entries: {}", range);
});
}
PhysicalPageEntry& MemoryManager::get_physical_page_entry(PhysicalAddress physical_address)
{
VERIFY(m_physical_page_entries);
auto physical_page_entry_index = PhysicalAddress::physical_page_index(physical_address.get());
VERIFY(physical_page_entry_index < m_physical_page_entries_count);
return m_physical_page_entries[physical_page_entry_index];
@ -531,7 +539,6 @@ PhysicalPageEntry& MemoryManager::get_physical_page_entry(PhysicalAddress physic
PhysicalAddress MemoryManager::get_physical_address(PhysicalPage const& physical_page)
{
PhysicalPageEntry const& physical_page_entry = *reinterpret_cast<PhysicalPageEntry const*>((u8 const*)&physical_page - __builtin_offsetof(PhysicalPageEntry, allocated.physical_page));
VERIFY(m_physical_page_entries);
size_t physical_page_entry_index = &physical_page_entry - m_physical_page_entries;
VERIFY(physical_page_entry_index < m_physical_page_entries_count);
return PhysicalAddress((PhysicalPtr)physical_page_entry_index * PAGE_SIZE);
@ -642,7 +649,9 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress address)
if (is_user_address(address))
return nullptr;
return MM.m_region_tree.with([&](auto& region_tree) { return region_tree.find_region_containing(address); });
return MM.m_global_data.with([&](auto& global_data) {
return global_data.region_tree.find_region_containing(address);
});
}
Region* MemoryManager::find_user_region_from_vaddr(AddressSpace& space, VirtualAddress vaddr)
@ -742,7 +751,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(
name_kstring = TRY(KString::try_create(name));
auto vmobject = TRY(AnonymousVMObject::try_create_physically_contiguous_with_size(size));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
TRY(region->map(kernel_page_directory()));
return region;
}
@ -785,7 +794,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size
name_kstring = TRY(KString::try_create(name));
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
TRY(region->map(kernel_page_directory()));
return region;
}
@ -798,7 +807,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(PhysicalAdd
if (!name.is_null())
name_kstring = TRY(KString::try_create(name));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, PAGE_SIZE); }));
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, PAGE_SIZE); }));
TRY(region->map(kernel_page_directory()));
return region;
}
@ -812,7 +821,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobje
name_kstring = TRY(KString::try_create(name));
auto region = TRY(Region::create_unplaced(vmobject, 0, move(name_kstring), access, cacheable));
TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
TRY(region->map(kernel_page_directory()));
return region;
}
@ -820,9 +829,9 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobje
ErrorOr<CommittedPhysicalPageSet> MemoryManager::commit_physical_pages(size_t page_count)
{
VERIFY(page_count > 0);
SpinlockLocker lock(s_mm_lock);
if (m_system_memory_info.physical_pages_uncommitted < page_count) {
dbgln("MM: Unable to commit {} pages, have only {}", page_count, m_system_memory_info.physical_pages_uncommitted);
return m_global_data.with([&](auto& global_data) -> ErrorOr<CommittedPhysicalPageSet> {
if (global_data.system_memory_info.physical_pages_uncommitted < page_count) {
dbgln("MM: Unable to commit {} pages, have only {}", page_count, global_data.system_memory_info.physical_pages_uncommitted);
Process::for_each([&](Process const& process) {
size_t amount_resident = 0;
@ -845,64 +854,67 @@ ErrorOr<CommittedPhysicalPageSet> MemoryManager::commit_physical_pages(size_t pa
return ENOMEM;
}
m_system_memory_info.physical_pages_uncommitted -= page_count;
m_system_memory_info.physical_pages_committed += page_count;
global_data.system_memory_info.physical_pages_uncommitted -= page_count;
global_data.system_memory_info.physical_pages_committed += page_count;
return CommittedPhysicalPageSet { {}, page_count };
});
}
void MemoryManager::uncommit_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count)
{
VERIFY(page_count > 0);
SpinlockLocker lock(s_mm_lock);
VERIFY(m_system_memory_info.physical_pages_committed >= page_count);
m_global_data.with([&](auto& global_data) {
VERIFY(global_data.system_memory_info.physical_pages_committed >= page_count);
m_system_memory_info.physical_pages_uncommitted += page_count;
m_system_memory_info.physical_pages_committed -= page_count;
global_data.system_memory_info.physical_pages_uncommitted += page_count;
global_data.system_memory_info.physical_pages_committed -= page_count;
});
}
void MemoryManager::deallocate_physical_page(PhysicalAddress paddr)
{
SpinlockLocker lock(s_mm_lock);
return m_global_data.with([&](auto& global_data) {
// Are we returning a user page?
for (auto& region : m_physical_regions) {
for (auto& region : global_data.physical_regions) {
if (!region.contains(paddr))
continue;
region.return_page(paddr);
--m_system_memory_info.physical_pages_used;
--global_data.system_memory_info.physical_pages_used;
// Always return pages to the uncommitted pool. Pages that were
// committed and allocated are only freed upon request. Once
// returned there is no guarantee being able to get them back.
++m_system_memory_info.physical_pages_uncommitted;
++global_data.system_memory_info.physical_pages_uncommitted;
return;
}
PANIC("MM: deallocate_physical_page couldn't figure out region for page @ {}", paddr);
});
}
RefPtr<PhysicalPage> MemoryManager::find_free_physical_page(bool committed)
{
SpinlockLocker mm_locker(s_mm_lock);
RefPtr<PhysicalPage> page;
m_global_data.with([&](auto& global_data) {
if (committed) {
// Draw from the committed pages pool. We should always have these pages available
VERIFY(m_system_memory_info.physical_pages_committed > 0);
m_system_memory_info.physical_pages_committed--;
VERIFY(global_data.system_memory_info.physical_pages_committed > 0);
global_data.system_memory_info.physical_pages_committed--;
} else {
// We need to make sure we don't touch pages that we have committed to
if (m_system_memory_info.physical_pages_uncommitted == 0)
return {};
m_system_memory_info.physical_pages_uncommitted--;
if (global_data.system_memory_info.physical_pages_uncommitted == 0)
return;
global_data.system_memory_info.physical_pages_uncommitted--;
}
for (auto& region : m_physical_regions) {
for (auto& region : global_data.physical_regions) {
page = region.take_free_page();
if (!page.is_null()) {
++m_system_memory_info.physical_pages_used;
++global_data.system_memory_info.physical_pages_used;
break;
}
}
});
VERIFY(!page.is_null());
return page;
}
@ -921,7 +933,7 @@ NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_physical_page(Badg
ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
{
SpinlockLocker lock(s_mm_lock);
return m_global_data.with([&](auto&) -> ErrorOr<NonnullRefPtr<PhysicalPage>> {
auto page = find_free_physical_page(false);
bool purged_pages = false;
@ -973,39 +985,36 @@ ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_physical_page(Shoul
if (did_purge)
*did_purge = purged_pages;
return page.release_nonnull();
});
}
ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_physical_pages(size_t size)
{
VERIFY(!(size % PAGE_SIZE));
SpinlockLocker mm_lock(s_mm_lock);
size_t page_count = ceil_div(size, static_cast<size_t>(PAGE_SIZE));
auto physical_pages = TRY(m_global_data.with([&](auto& global_data) -> ErrorOr<NonnullRefPtrVector<PhysicalPage>> {
// We need to make sure we don't touch pages that we have committed to
if (m_system_memory_info.physical_pages_uncommitted < page_count)
if (global_data.system_memory_info.physical_pages_uncommitted < page_count)
return ENOMEM;
for (auto& physical_region : m_physical_regions) {
for (auto& physical_region : global_data.physical_regions) {
auto physical_pages = physical_region.take_contiguous_free_pages(page_count);
if (!physical_pages.is_empty()) {
// Note: The locking semantics are a bit awkward here, the region destructor can
// deadlock due to invalid lock ordering if we still hold the s_mm_lock when the
// region is constructed or destructed. Since we are now done enumerating anyway,
// it's safe to just unlock here and have MM.allocate_kernel_region do the right thing.
mm_lock.unlock();
{
auto cleanup_region = TRY(MM.allocate_kernel_region(physical_pages[0].paddr(), PAGE_SIZE * page_count, "MemoryManager Allocation Sanitization"sv, Region::Access::Read | Region::Access::Write));
memset(cleanup_region->vaddr().as_ptr(), 0, PAGE_SIZE * page_count);
}
m_system_memory_info.physical_pages_uncommitted -= page_count;
m_system_memory_info.physical_pages_used += page_count;
global_data.system_memory_info.physical_pages_uncommitted -= page_count;
global_data.system_memory_info.physical_pages_used += page_count;
return physical_pages;
}
}
dmesgln("MM: no contiguous physical pages available");
return ENOMEM;
}));
{
auto cleanup_region = TRY(MM.allocate_kernel_region(physical_pages[0].paddr(), PAGE_SIZE * page_count, {}, Region::Access::Read | Region::Access::Write));
memset(cleanup_region->vaddr().as_ptr(), 0, PAGE_SIZE * page_count);
}
return physical_pages;
}
void MemoryManager::enter_process_address_space(Process& process)
@ -1114,7 +1123,7 @@ bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vadd
void MemoryManager::unregister_kernel_region(Region& region)
{
VERIFY(region.is_kernel());
m_region_tree.with([&](auto& region_tree) { region_tree.remove(region); });
m_global_data.with([&](auto& global_data) { global_data.region_tree.remove(region); });
}
void MemoryManager::dump_kernel_regions()
@ -1127,8 +1136,8 @@ void MemoryManager::dump_kernel_regions()
#endif
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
addr_padding, addr_padding, addr_padding);
m_region_tree.with([&](auto& region_tree) {
for (auto& region : region_tree.regions()) {
m_global_data.with([&](auto& global_data) {
for (auto& region : global_data.region_tree.regions()) {
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
region.vaddr().get(),
region.vaddr().offset(region.size() - 1).get(),
@ -1195,8 +1204,16 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::create_identity_mapped_reg
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_unbacked_region_anywhere(size_t size, size_t alignment)
{
auto region = TRY(Region::create_unbacked());
TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, alignment); }));
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, alignment); }));
return region;
}
MemoryManager::SystemMemoryInfo MemoryManager::get_system_memory_info()
{
return m_global_data.with([&](auto& global_data) {
auto physical_pages_unused = global_data.system_memory_info.physical_pages_committed + global_data.system_memory_info.physical_pages_uncommitted;
VERIFY(global_data.system_memory_info.physical_pages == (global_data.system_memory_info.physical_pages_used + physical_pages_unused));
return global_data.system_memory_info;
});
}
}

View File

@ -93,9 +93,6 @@ struct MemoryManagerData {
u32 m_quickmap_prev_flags;
};
// NOLINTNEXTLINE(readability-redundant-declaration) FIXME: Why do we declare this here *and* in Thread.h?
extern RecursiveSpinlock s_mm_lock;
// This class represents a set of committed physical pages.
// When you ask MemoryManager to commit pages for you, you get one of these in return.
// You can allocate pages from it via `take_one()`
@ -192,12 +189,7 @@ public:
PhysicalSize physical_pages_uncommitted { 0 };
};
SystemMemoryInfo get_system_memory_info()
{
SpinlockLocker lock(s_mm_lock);
verify_system_memory_info_consistency();
return m_system_memory_info;
}
SystemMemoryInfo get_system_memory_info();
template<IteratorFunction<VMObject&> Callback>
static void for_each_vmobject(Callback callback)
@ -230,7 +222,14 @@ public:
PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
Vector<UsedMemoryRange> const& used_memory_ranges() { return m_used_memory_ranges; }
template<typename Callback>
void for_each_used_memory_range(Callback callback)
{
m_global_data.template with([&](auto& global_data) {
for (auto& range : global_data.used_memory_ranges)
callback(range);
});
}
bool is_allowed_to_read_physical_memory_for_userspace(PhysicalAddress, size_t read_length) const;
PhysicalPageEntry& get_physical_page_entry(PhysicalAddress);
@ -278,29 +277,34 @@ private:
};
void release_pte(PageDirectory&, VirtualAddress, IsLastPTERelease);
ALWAYS_INLINE void verify_system_memory_info_consistency() const
{
auto physical_pages_unused = m_system_memory_info.physical_pages_committed + m_system_memory_info.physical_pages_uncommitted;
VERIFY(m_system_memory_info.physical_pages == (m_system_memory_info.physical_pages_used + physical_pages_unused));
}
// NOTE: These are outside of GlobalData as they are only assigned on startup,
// and then never change. Atomic ref-counting covers that case without
// the need for additional synchronization.
LockRefPtr<PageDirectory> m_kernel_page_directory;
RefPtr<PhysicalPage> m_shared_zero_page;
RefPtr<PhysicalPage> m_lazy_committed_page;
SystemMemoryInfo m_system_memory_info;
NonnullOwnPtrVector<PhysicalRegion> m_physical_regions;
OwnPtr<PhysicalRegion> m_physical_pages_region;
// NOTE: These are outside of GlobalData as they are initialized on startup,
// and then never change.
PhysicalPageEntry* m_physical_page_entries { nullptr };
size_t m_physical_page_entries_count { 0 };
SpinlockProtected<RegionTree> m_region_tree;
struct GlobalData {
GlobalData();
Vector<UsedMemoryRange> m_used_memory_ranges;
Vector<PhysicalMemoryRange> m_physical_memory_ranges;
Vector<ContiguousReservedMemoryRange> m_reserved_memory_ranges;
SystemMemoryInfo system_memory_info;
NonnullOwnPtrVector<PhysicalRegion> physical_regions;
OwnPtr<PhysicalRegion> physical_pages_region;
RegionTree region_tree;
Vector<UsedMemoryRange> used_memory_ranges;
Vector<PhysicalMemoryRange> physical_memory_ranges;
Vector<ContiguousReservedMemoryRange> reserved_memory_ranges;
};
SpinlockProtected<GlobalData> m_global_data;
};
inline bool is_user_address(VirtualAddress vaddr)

View File

@ -465,7 +465,6 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
{
VERIFY(vmobject().is_inode());
VERIFY(!s_mm_lock.is_locked_by_current_processor());
VERIFY(!g_scheduler_lock.is_locked_by_current_processor());
auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());

View File

@ -261,10 +261,6 @@ void Scheduler::yield()
void Scheduler::context_switch(Thread* thread)
{
if (Memory::s_mm_lock.is_locked_by_current_processor()) {
PANIC("In context switch while holding Memory::s_mm_lock");
}
thread->did_schedule();
auto* from_thread = Thread::current();

View File

@ -41,7 +41,7 @@ RamdiskController::RamdiskController()
{
// Populate ramdisk controllers from Multiboot boot modules, if any.
size_t count = 0;
for (auto& used_memory_range : MM.used_memory_ranges()) {
MM.for_each_used_memory_range([&](auto& used_memory_range) {
if (used_memory_range.type == Memory::UsedMemoryRangeType::BootModule) {
size_t length = Memory::page_round_up(used_memory_range.end.get()).release_value_but_fixme_should_propagate_errors() - used_memory_range.start.get();
auto region_or_error = MM.allocate_kernel_region(used_memory_range.start, length, "Ramdisk"sv, Memory::Region::Access::ReadWrite);
@ -52,7 +52,7 @@ RamdiskController::RamdiskController()
}
count++;
}
}
});
}
RamdiskController::~RamdiskController() = default;

View File

@ -144,7 +144,6 @@ Thread::BlockResult Thread::block_impl(BlockTimeout const& timeout, Blocker& blo
VERIFY(!Processor::current_in_irq());
VERIFY(this == Thread::current());
ScopedCritical critical;
VERIFY(!Memory::s_mm_lock.is_locked_by_current_processor());
SpinlockLocker scheduler_lock(g_scheduler_lock);
@ -259,7 +258,6 @@ void Thread::block(Kernel::Mutex& lock, SpinlockLocker<Spinlock>& lock_lock, u32
VERIFY(!Processor::current_in_irq());
VERIFY(this == Thread::current());
ScopedCritical critical;
VERIFY(!Memory::s_mm_lock.is_locked_by_current_processor());
SpinlockLocker scheduler_lock(g_scheduler_lock);
SpinlockLocker block_lock(m_block_lock);