Kernel: Make MM.commit_user_physical_pages() return KResultOr

..and use TRY() at call sites. :^)
This commit is contained in:
Andreas Kling 2021-09-05 21:54:12 +02:00
parent 98dc08fe56
commit 2f790cf78f
Notes: sideshowbarker 2024-07-18 04:39:09 +09:00
3 changed files with 11 additions and 20 deletions

View File

@ -34,9 +34,7 @@ KResultOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone()
dbgln_if(COMMIT_DEBUG, "Cloning {:p}, need {} committed cow pages", this, new_cow_pages_needed);
auto committed_pages = MM.commit_user_physical_pages(new_cow_pages_needed);
if (!committed_pages.has_value())
return ENOMEM;
auto committed_pages = TRY(MM.commit_user_physical_pages(new_cow_pages_needed));
// Create or replace the committed cow pages. When cloning a previously
// cloned vmobject, we want to essentially "fork", leaving us and the
@ -44,8 +42,7 @@ KResultOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone()
// one would keep the one it still has. This ensures that the original
// one and this one, as well as the clone have sufficient resources
// to cow all pages as needed
auto new_shared_committed_cow_pages = try_make_ref_counted<SharedCommittedCowPages>(committed_pages.release_value());
auto new_shared_committed_cow_pages = try_make_ref_counted<SharedCommittedCowPages>(move(committed_pages));
if (!new_shared_committed_cow_pages)
return ENOMEM;
@ -79,9 +76,7 @@ KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_s
{
Optional<CommittedPhysicalPageSet> committed_pages;
if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
if (!committed_pages.has_value())
return ENOMEM;
committed_pages = TRY(MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE))));
}
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages)));
@ -100,9 +95,7 @@ KResultOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_purgea
{
Optional<CommittedPhysicalPageSet> committed_pages;
if (strategy == AllocationStrategy::Reserve || strategy == AllocationStrategy::AllocateNow) {
committed_pages = MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE)));
if (!committed_pages.has_value())
return ENOMEM;
committed_pages = TRY(MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE))));
}
auto vmobject = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, strategy, move(committed_pages)));
@ -242,9 +235,7 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged)
return KSuccess;
}
m_unused_committed_pages = MM.commit_user_physical_pages(committed_pages_needed);
if (!m_unused_committed_pages.has_value())
return ENOMEM;
m_unused_committed_pages = TRY(MM.commit_user_physical_pages(committed_pages_needed));
for (auto& page : m_physical_pages) {
if (page->is_shared_zero_page())

View File

@ -69,9 +69,9 @@ UNMAP_AFTER_INIT MemoryManager::MemoryManager()
protect_kernel_image();
// We're temporarily "committing" to two pages that we need to allocate below
auto committed_pages = commit_user_physical_pages(2);
auto committed_pages = commit_user_physical_pages(2).release_value();
m_shared_zero_page = committed_pages->take_one();
m_shared_zero_page = committed_pages.take_one();
// We're wasting a page here, we just need a special tag (physical
// address) so that we know when we need to lazily allocate a page
@ -79,7 +79,7 @@ UNMAP_AFTER_INIT MemoryManager::MemoryManager()
// than potentially failing if no pages are available anymore.
// By using a tag we don't have to query the VMObject for every page
// whether it was committed or not
m_lazy_committed_page = committed_pages->take_one();
m_lazy_committed_page = committed_pages.take_one();
}
UNMAP_AFTER_INIT MemoryManager::~MemoryManager()
@ -763,12 +763,12 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo
return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, cacheable);
}
Optional<CommittedPhysicalPageSet> MemoryManager::commit_user_physical_pages(size_t page_count)
KResultOr<CommittedPhysicalPageSet> MemoryManager::commit_user_physical_pages(size_t page_count)
{
VERIFY(page_count > 0);
SpinlockLocker lock(s_mm_lock);
if (m_system_memory_info.user_physical_pages_uncommitted < page_count)
return {};
return ENOMEM;
m_system_memory_info.user_physical_pages_uncommitted -= page_count;
m_system_memory_info.user_physical_pages_committed += page_count;

View File

@ -171,7 +171,7 @@ public:
Yes
};
Optional<CommittedPhysicalPageSet> commit_user_physical_pages(size_t page_count);
KResultOr<CommittedPhysicalPageSet> commit_user_physical_pages(size_t page_count);
void uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count);
NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);