mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-01-08 04:15:23 +03:00
Kernel: Use a shared physical page for zero-filled pages until written
This patch adds a globally shared zero-filled PhysicalPage that will be mapped into every slot of every zero-filled AnonymousVMObject until that page is written to, achieving CoW-like zero-filled pages. Initial testing show that this doesn't actually achieve any sharing yet but it seems like a good design regardless, since it may reduce the number of page faults taken by programs. If you look at the refcount of MM.shared_zero_page() it will have quite a high refcount, but that's just because everything maps it everywhere. If you want to see the "real" refcount, you can build with the MAP_SHARED_ZERO_PAGE_LAZILY flag, and we'll defer mapping of the shared zero page until the first NP read fault. I've left this behavior behind a flag for future testing of this code.
This commit is contained in:
parent
a4d857e3c5
commit
c624d3875e
Notes:
sideshowbarker
2024-07-19 09:19:00 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/c624d3875ec
@ -25,6 +25,7 @@
|
||||
*/
|
||||
|
||||
#include <Kernel/VM/AnonymousVMObject.h>
|
||||
#include <Kernel/VM/MemoryManager.h>
|
||||
#include <Kernel/VM/PhysicalPage.h>
|
||||
|
||||
NonnullRefPtr<AnonymousVMObject> AnonymousVMObject::create_with_size(size_t size)
|
||||
@ -51,6 +52,10 @@ NonnullRefPtr<AnonymousVMObject> AnonymousVMObject::create_with_physical_page(Ph
|
||||
AnonymousVMObject::AnonymousVMObject(size_t size)
|
||||
: VMObject(size)
|
||||
{
|
||||
#ifndef MAP_SHARED_ZERO_PAGE_LAZILY
|
||||
for (size_t i = 0; i < page_count(); ++i)
|
||||
physical_pages()[i] = MM.shared_zero_page();
|
||||
#endif
|
||||
}
|
||||
|
||||
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size)
|
||||
|
@ -53,6 +53,8 @@ MemoryManager::MemoryManager()
|
||||
write_cr3(kernel_page_directory().cr3());
|
||||
setup_low_identity_mapping();
|
||||
protect_kernel_image();
|
||||
|
||||
m_shared_zero_page = allocate_user_physical_page();
|
||||
}
|
||||
|
||||
MemoryManager::~MemoryManager()
|
||||
@ -297,7 +299,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringVi
|
||||
region = Region::create_user_accessible(range, name, access, cacheable);
|
||||
else
|
||||
region = Region::create_kernel_only(range, name, access, cacheable);
|
||||
region->set_page_directory(kernel_page_directory());
|
||||
region->map(kernel_page_directory());
|
||||
if (should_commit)
|
||||
region->commit();
|
||||
return region;
|
||||
|
@ -132,6 +132,8 @@ public:
|
||||
|
||||
void dump_kernel_regions();
|
||||
|
||||
PhysicalPage& shared_zero_page() { return *m_shared_zero_page; }
|
||||
|
||||
private:
|
||||
MemoryManager();
|
||||
~MemoryManager();
|
||||
@ -172,6 +174,8 @@ private:
|
||||
RefPtr<PageDirectory> m_kernel_page_directory;
|
||||
RefPtr<PhysicalPage> m_low_page_table;
|
||||
|
||||
RefPtr<PhysicalPage> m_shared_zero_page;
|
||||
|
||||
unsigned m_user_physical_pages { 0 };
|
||||
unsigned m_user_physical_pages_used { 0 };
|
||||
unsigned m_super_physical_pages { 0 };
|
||||
@ -223,3 +227,8 @@ inline bool is_user_range(VirtualAddress vaddr, size_t size)
|
||||
return false;
|
||||
return is_user_address(vaddr) && is_user_address(vaddr.offset(size));
|
||||
}
|
||||
|
||||
inline bool PhysicalPage::is_shared_zero_page() const
|
||||
{
|
||||
return this == &MM.shared_zero_page();
|
||||
}
|
||||
|
@ -60,6 +60,8 @@ public:
|
||||
|
||||
u16 ref_count() const { return m_retain_count; }
|
||||
|
||||
bool is_shared_zero_page() const;
|
||||
|
||||
private:
|
||||
PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_return_to_freelist = true);
|
||||
~PhysicalPage() {}
|
||||
|
@ -144,7 +144,7 @@ bool Region::commit(size_t page_index)
|
||||
dbgprintf("MM: commit single page (%zu) in Region %p (VMO=%p) at V%p\n", page_index, vmobject().page_count(), this, &vmobject(), vaddr().get());
|
||||
#endif
|
||||
auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index];
|
||||
if (!vmobject_physical_page_entry.is_null())
|
||||
if (!vmobject_physical_page_entry.is_null() && !vmobject_physical_page_entry->is_shared_zero_page())
|
||||
return true;
|
||||
auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
|
||||
if (!physical_page) {
|
||||
@ -177,7 +177,8 @@ size_t Region::amount_resident() const
|
||||
{
|
||||
size_t bytes = 0;
|
||||
for (size_t i = 0; i < page_count(); ++i) {
|
||||
if (m_vmobject->physical_pages()[first_page_index() + i])
|
||||
auto& physical_page = m_vmobject->physical_pages()[first_page_index() + i];
|
||||
if (physical_page && !physical_page->is_shared_zero_page())
|
||||
bytes += PAGE_SIZE;
|
||||
}
|
||||
return bytes;
|
||||
@ -188,7 +189,7 @@ size_t Region::amount_shared() const
|
||||
size_t bytes = 0;
|
||||
for (size_t i = 0; i < page_count(); ++i) {
|
||||
auto& physical_page = m_vmobject->physical_pages()[first_page_index() + i];
|
||||
if (physical_page && physical_page->ref_count() > 1)
|
||||
if (physical_page && physical_page->ref_count() > 1 && !physical_page->is_shared_zero_page())
|
||||
bytes += PAGE_SIZE;
|
||||
}
|
||||
return bytes;
|
||||
@ -231,6 +232,8 @@ NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, const Strin
|
||||
|
||||
bool Region::should_cow(size_t page_index) const
|
||||
{
|
||||
if (vmobject().physical_pages()[page_index]->is_shared_zero_page())
|
||||
return true;
|
||||
if (m_shared)
|
||||
return false;
|
||||
return m_cow_map && m_cow_map->get(page_index);
|
||||
@ -339,16 +342,28 @@ PageFaultResponse Region::handle_fault(const PageFault& fault)
|
||||
#endif
|
||||
return handle_inode_fault(page_index_in_region);
|
||||
}
|
||||
#ifdef PAGE_FAULT_DEBUG
|
||||
dbgprintf("NP(zero) fault in Region{%p}[%u]\n", this, page_index_in_region);
|
||||
#endif
|
||||
#ifdef MAP_SHARED_ZERO_PAGE_LAZILY
|
||||
if (fault.is_read()) {
|
||||
vmobject().physical_pages()[first_page_index() + page_index_in_region] = MM.shared_zero_page();
|
||||
remap_page(page_index_in_region);
|
||||
return PageFaultResponse::Continue;
|
||||
}
|
||||
return handle_zero_fault(page_index_in_region);
|
||||
#else
|
||||
ASSERT_NOT_REACHED();
|
||||
#endif
|
||||
}
|
||||
ASSERT(fault.type() == PageFault::Type::ProtectionViolation);
|
||||
if (fault.access() == PageFault::Access::Write && is_writable() && should_cow(page_index_in_region)) {
|
||||
#ifdef PAGE_FAULT_DEBUG
|
||||
dbgprintf("PV(cow) fault in Region{%p}[%u]\n", this, page_index_in_region);
|
||||
#endif
|
||||
if (vmobject().physical_pages()[first_page_index() + page_index_in_region]->is_shared_zero_page()) {
|
||||
#ifdef PAGE_FAULT_DEBUG
|
||||
dbgprintf("NP(zero) fault in Region{%p}[%u]\n", this, page_index_in_region);
|
||||
#endif
|
||||
return handle_zero_fault(page_index_in_region);
|
||||
}
|
||||
return handle_cow_fault(page_index_in_region);
|
||||
}
|
||||
kprintf("PV(error) fault in Region{%p}[%u] at V%p\n", this, page_index_in_region, fault.vaddr().get());
|
||||
@ -366,7 +381,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
|
||||
|
||||
auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index_in_region];
|
||||
|
||||
if (!vmobject_physical_page_entry.is_null()) {
|
||||
if (!vmobject_physical_page_entry.is_null() && !vmobject_physical_page_entry->is_shared_zero_page()) {
|
||||
#ifdef PAGE_FAULT_DEBUG
|
||||
dbgprintf("MM: zero_page() but page already present. Fine with me!\n");
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user