Kernel: Add MAP_NORESERVE support to mmap

Rather than lazily committing regions by default, we now commit
the entire region unless MAP_NORESERVE is specified.

This solves random crashes in low-memory situations where e.g. the
malloc heap allocated memory, but using pages that haven't been
used before triggers a crash when no more physical memory is available.

Use this flag to create large regions without actually committing
the backing memory. madvise() can be used to commit arbitrary areas
of such regions after creating them.
This commit is contained in:
Tom 2020-09-03 15:06:25 -06:00 committed by Andreas Kling
parent bc5d6992a4
commit c3451899bc
Notes: sideshowbarker 2024-07-19 00:14:41 +09:00
7 changed files with 30 additions and 15 deletions

View File

@ -146,8 +146,9 @@ Region* Process::allocate_region(const Range& range, const String& name, int pro
ASSERT(range.is_valid());
auto vmobject = AnonymousVMObject::create_with_size(range.size());
auto region = Region::create_user_accessible(this, range, vmobject, 0, name, prot_to_region_access_flags(prot));
region->map(page_directory());
if (should_commit && !region->commit())
if (!region->map(page_directory()))
return nullptr;
if (should_commit && region->can_commit() && !region->commit())
return nullptr;
return &add_region(move(region));
}
@ -160,7 +161,7 @@ Region* Process::allocate_region(VirtualAddress vaddr, size_t size, const String
return allocate_region(range, name, prot, should_commit);
}
Region* Process::allocate_region_with_vmobject(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, int prot)
Region* Process::allocate_region_with_vmobject(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, int prot, bool should_commit)
{
ASSERT(range.is_valid());
size_t end_in_vmobject = offset_in_vmobject + range.size();
@ -178,16 +179,19 @@ Region* Process::allocate_region_with_vmobject(const Range& range, NonnullRefPtr
}
offset_in_vmobject &= PAGE_MASK;
auto& region = add_region(Region::create_user_accessible(this, range, move(vmobject), offset_in_vmobject, name, prot_to_region_access_flags(prot)));
region.map(page_directory());
if (!region.map(page_directory()))
return nullptr;
if (should_commit && region.can_commit() && !region.commit())
return nullptr;
return &region;
}
Region* Process::allocate_region_with_vmobject(VirtualAddress vaddr, size_t size, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, int prot)
Region* Process::allocate_region_with_vmobject(VirtualAddress vaddr, size_t size, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, int prot, bool should_commit)
{
auto range = allocate_range(vaddr, size);
if (!range.is_valid())
return nullptr;
return allocate_region_with_vmobject(range, move(vmobject), offset_in_vmobject, name, prot);
return allocate_region_with_vmobject(range, move(vmobject), offset_in_vmobject, name, prot, should_commit);
}
bool Process::deallocate_region(Region& region)

View File

@ -436,9 +436,9 @@ public:
return m_euid == 0;
}
Region* allocate_region_with_vmobject(VirtualAddress, size_t, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String& name, int prot);
Region* allocate_region_with_vmobject(VirtualAddress, size_t, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String& name, int prot, bool should_commit = true);
Region* allocate_region(VirtualAddress, size_t, const String& name, int prot = PROT_READ | PROT_WRITE, bool should_commit = true);
Region* allocate_region_with_vmobject(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String& name, int prot);
Region* allocate_region_with_vmobject(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String& name, int prot, bool should_commit = true);
Region* allocate_region(const Range&, const String& name, int prot = PROT_READ | PROT_WRITE, bool should_commit = true);
bool deallocate_region(Region& region);

View File

@ -120,6 +120,7 @@ void* Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> user_params)
bool map_private = flags & MAP_PRIVATE;
bool map_stack = flags & MAP_STACK;
bool map_fixed = flags & MAP_FIXED;
bool map_noreserve = flags & MAP_NORESERVE;
if (map_shared && map_private)
return (void*)-EINVAL;
@ -148,10 +149,9 @@ void* Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> user_params)
if (!region && (!map_fixed && addr != 0))
region = allocate_region_with_vmobject({}, size, vmobject, 0, !name.is_null() ? name : "mmap (purgeable)", prot);
} else if (map_anonymous) {
region = allocate_region(range.value(), !name.is_null() ? name : "mmap", prot, false);
region = allocate_region(range.value(), !name.is_null() ? name : "mmap", prot, !map_noreserve);
if (!region && (!map_fixed && addr != 0))
region = allocate_region(allocate_range({}, size), !name.is_null() ? name : "mmap", prot, false);
region = allocate_region(allocate_range({}, size), !name.is_null() ? name : "mmap", prot, !map_noreserve);
} else {
if (offset < 0)
return (void*)-EINVAL;

View File

@ -93,6 +93,7 @@ enum {
#define MAP_ANON MAP_ANONYMOUS
#define MAP_STACK 0x40
#define MAP_PURGEABLE 0x80
#define MAP_NORESERVE 0x100
#define PROT_READ 0x1
#define PROT_WRITE 0x2

View File

@ -291,7 +291,7 @@ int PurgeableVMObject::purge_impl()
} else {
klog() << "Purged " << purged_in_range << " pages from region " << region.name() << " (no ownership) at " << region.vaddr_from_page_index(range.base) << " - " << region.vaddr_from_page_index(range.base + range.count);
}
region.remap_page_range(range.base, range.count);
region.remap_page_range(range.base, range.count, false);
}
});
}

View File

@ -187,13 +187,18 @@ auto Region::set_volatile(VirtualAddress vaddr, size_t size, bool is_volatile, b
// Attempt to remap the page range. We want to make sure we have
// enough memory, if not we need to inform the caller of that
// fact
if (!remap_page_range(first_page_index, last_page_index - first_page_index))
if (!remap_page_range(first_page_index, last_page_index - first_page_index, true))
return SetVolatileError::OutOfMemory;
}
}
return SetVolatileError::Success;
}
bool Region::can_commit() const
{
return vmobject().is_anonymous() || vmobject().is_purgeable();
}
bool Region::commit()
{
ScopedSpinLock lock(s_mm_lock);
@ -339,7 +344,7 @@ bool Region::map_individual_page_impl(size_t page_index)
return true;
}
bool Region::remap_page_range(size_t page_index, size_t page_count)
bool Region::remap_page_range(size_t page_index, size_t page_count, bool do_commit)
{
bool success = true;
ScopedSpinLock lock(s_mm_lock);
@ -347,6 +352,10 @@ bool Region::remap_page_range(size_t page_index, size_t page_count)
ScopedSpinLock page_lock(m_page_directory->get_lock());
size_t index = page_index;
while (index < page_index + page_count) {
if (do_commit && !commit(index)) {
success = false;
break;
}
if (!map_individual_page_impl(index)) {
success = false;
break;

View File

@ -159,6 +159,7 @@ public:
return m_offset_in_vmobject;
}
bool can_commit() const;
bool commit();
size_t amount_resident() const;
@ -193,7 +194,7 @@ public:
void set_inherit_mode(InheritMode inherit_mode) { m_inherit_mode = inherit_mode; }
bool remap_page_range(size_t page_index, size_t page_count);
bool remap_page_range(size_t page_index, size_t page_count, bool do_commit);
bool is_volatile(VirtualAddress vaddr, size_t size) const;
enum class SetVolatileError {