Kernel: Make AnonymousVMObject COW-Bitmap allocation OOM-fallible

This commit is contained in:
Idan Horowitz 2022-02-10 19:17:27 +02:00
parent 871a53db76
commit 8030e2a88f
Notes: sideshowbarker 2024-07-17 19:01:47 +09:00
5 changed files with 35 additions and 21 deletions

View File

@ -153,7 +153,7 @@ ErrorOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_re
size_t page_offset_in_source_region = (offset_in_vmobject - source_region.offset_in_vmobject()) / PAGE_SIZE;
for (size_t i = 0; i < region->page_count(); ++i) {
if (source_region.should_cow(page_offset_in_source_region + i))
region->set_should_cow(i, true);
TRY(region->set_should_cow(i, true));
}
return region;
}

View File

@ -45,13 +45,13 @@ ErrorOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone()
// to cow all pages as needed
auto new_shared_committed_cow_pages = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) SharedCommittedCowPages(move(committed_pages))));
auto new_physical_pages = TRY(this->try_clone_physical_pages());
auto clone = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(*this, *new_shared_committed_cow_pages, move(new_physical_pages))));
m_shared_committed_cow_pages = move(new_shared_committed_cow_pages);
auto clone = TRY(try_create_with_shared_cow(*this, *new_shared_committed_cow_pages, move(new_physical_pages)));
// Both original and clone become COW. So create a COW map for ourselves
// or reset all pages to be copied again if we were previously cloned
ensure_or_reset_cow_map();
TRY(ensure_or_reset_cow_map());
m_shared_committed_cow_pages = move(new_shared_committed_cow_pages);
if (m_unused_committed_pages.has_value() && !m_unused_committed_pages->is_empty()) {
// The parent vmobject didn't use up all committed pages. When
@ -122,6 +122,15 @@ ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_for_phys
return adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(paddr, move(new_physical_pages)));
}
ErrorOr<NonnullRefPtr<AnonymousVMObject>> AnonymousVMObject::try_create_with_shared_cow(AnonymousVMObject const& other, NonnullRefPtr<SharedCommittedCowPages> shared_committed_cow_pages, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
{
auto vmobject = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) AnonymousVMObject(other, move(shared_committed_cow_pages), move(new_physical_pages))));
TRY(vmobject->ensure_cow_map());
return vmobject;
}
AnonymousVMObject::AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, AllocationStrategy strategy, Optional<CommittedPhysicalPageSet> committed_pages)
: VMObject(move(new_physical_pages))
, m_unused_committed_pages(move(committed_pages))
@ -155,7 +164,6 @@ AnonymousVMObject::AnonymousVMObject(AnonymousVMObject const& other, NonnullRefP
, m_shared_committed_cow_pages(move(shared_committed_cow_pages))
, m_purgeable(other.m_purgeable)
{
ensure_cow_map();
}
AnonymousVMObject::~AnonymousVMObject()
@ -250,19 +258,20 @@ NonnullRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Reg
return m_unused_committed_pages->take_one();
}
Bitmap& AnonymousVMObject::ensure_cow_map()
ErrorOr<void> AnonymousVMObject::ensure_cow_map()
{
if (m_cow_map.is_null())
m_cow_map = Bitmap::try_create(page_count(), true).release_value_but_fixme_should_propagate_errors();
return m_cow_map;
m_cow_map = TRY(Bitmap::try_create(page_count(), true));
return {};
}
void AnonymousVMObject::ensure_or_reset_cow_map()
ErrorOr<void> AnonymousVMObject::ensure_or_reset_cow_map()
{
if (m_cow_map.is_null())
ensure_cow_map();
TRY(ensure_cow_map());
else
m_cow_map.fill(true);
return {};
}
bool AnonymousVMObject::should_cow(size_t page_index, bool is_shared) const
@ -275,9 +284,11 @@ bool AnonymousVMObject::should_cow(size_t page_index, bool is_shared) const
return !m_cow_map.is_null() && m_cow_map.get(page_index);
}
void AnonymousVMObject::set_should_cow(size_t page_index, bool cow)
ErrorOr<void> AnonymousVMObject::set_should_cow(size_t page_index, bool cow)
{
ensure_cow_map().set(page_index, cow);
TRY(ensure_cow_map());
m_cow_map.set(page_index, cow);
return {};
}
size_t AnonymousVMObject::cow_pages() const
@ -307,7 +318,7 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
if (page_slot->ref_count() == 1) {
dbgln_if(PAGE_FAULT_DEBUG, " >> It's a COW page but nobody is sharing it anymore. Remap r/w");
set_should_cow(page_index, false);
MUST(set_should_cow(page_index, false)); // If we received a COW fault, we already have a cow map allocated, so this is infallible
if (m_shared_committed_cow_pages) {
m_shared_committed_cow_pages->uncommit_one();
@ -350,7 +361,7 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
MM.unquickmap_page();
}
page_slot = move(page);
set_should_cow(page_index, false);
MUST(set_should_cow(page_index, false)); // If we received a COW fault, we already have a cow map allocated, so this is infallible
return PageFaultResponse::Continue;
}

View File

@ -29,7 +29,7 @@ public:
PageFaultResponse handle_cow_fault(size_t, VirtualAddress);
size_t cow_pages() const;
bool should_cow(size_t page_index, bool) const;
void set_should_cow(size_t page_index, bool);
ErrorOr<void> set_should_cow(size_t page_index, bool);
bool is_purgeable() const { return m_purgeable; }
bool is_volatile() const { return m_volatile; }
@ -41,6 +41,8 @@ public:
private:
class SharedCommittedCowPages;
static ErrorOr<NonnullRefPtr<AnonymousVMObject>> try_create_with_shared_cow(AnonymousVMObject const&, NonnullRefPtr<SharedCommittedCowPages>, FixedArray<RefPtr<PhysicalPage>>&&);
explicit AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&&, AllocationStrategy, Optional<CommittedPhysicalPageSet>);
explicit AnonymousVMObject(PhysicalAddress, FixedArray<RefPtr<PhysicalPage>>&&);
explicit AnonymousVMObject(FixedArray<RefPtr<PhysicalPage>>&&);
@ -54,8 +56,8 @@ private:
virtual bool is_anonymous() const override { return true; }
Bitmap& ensure_cow_map();
void ensure_or_reset_cow_map();
ErrorOr<void> ensure_cow_map();
ErrorOr<void> ensure_or_reset_cow_map();
Optional<CommittedPhysicalPageSet> m_unused_committed_pages;
Bitmap m_cow_map;

View File

@ -178,11 +178,12 @@ bool Region::should_cow(size_t page_index) const
return static_cast<AnonymousVMObject const&>(vmobject()).should_cow(first_page_index() + page_index, m_shared);
}
void Region::set_should_cow(size_t page_index, bool cow)
ErrorOr<void> Region::set_should_cow(size_t page_index, bool cow)
{
VERIFY(!m_shared);
if (vmobject().is_anonymous())
static_cast<AnonymousVMObject&>(vmobject()).set_should_cow(first_page_index() + page_index, cow);
TRY(static_cast<AnonymousVMObject&>(vmobject()).set_should_cow(first_page_index() + page_index, cow));
return {};
}
bool Region::map_individual_page_impl(size_t page_index)

View File

@ -167,7 +167,7 @@ public:
[[nodiscard]] size_t amount_dirty() const;
[[nodiscard]] bool should_cow(size_t page_index) const;
void set_should_cow(size_t page_index, bool);
ErrorOr<void> set_should_cow(size_t page_index, bool);
[[nodiscard]] size_t cow_pages() const;