mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-01-06 19:19:44 +03:00
Kernel: Remove old region from process' regions vector before splitting
This does not affect functionality right now, but it means that the regions vector will now never have any overlapping regions, which will allow the use of balance binary search trees instead of a vector in the future. (since they require keys to be exclusive)
This commit is contained in:
parent
f8a3da46fd
commit
497c759ab7
Notes:
sideshowbarker
2024-07-18 20:28:23 +09:00
Author: https://github.com/IdanHo Commit: https://github.com/SerenityOS/serenity/commit/497c759ab77 Pull-request: https://github.com/SerenityOS/serenity/pull/6167 Reviewed-by: https://github.com/awesomekling Reviewed-by: https://github.com/bgianfo
@ -341,20 +341,24 @@ KResultOr<int> Process::sys$mprotect(Userspace<void*> addr, size_t size, int pro
|
||||
return EACCES;
|
||||
}
|
||||
|
||||
// Remove the old region from our regions tree, since were going to add another region
|
||||
// with the exact same start address, but dont deallocate it yet
|
||||
auto region = space().take_region(*old_region);
|
||||
VERIFY(region);
|
||||
|
||||
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
|
||||
|
||||
// This vector is the region(s) adjacent to our range.
|
||||
// We need to allocate a new region for the range we wanted to change permission bits on.
|
||||
auto adjacent_regions = space().split_region_around_range(*old_region, range_to_mprotect);
|
||||
auto adjacent_regions = space().split_region_around_range(*region, range_to_mprotect);
|
||||
|
||||
size_t new_range_offset_in_vmobject = old_region->offset_in_vmobject() + (range_to_mprotect.base().get() - old_region->range().base().get());
|
||||
auto& new_region = space().allocate_split_region(*old_region, range_to_mprotect, new_range_offset_in_vmobject);
|
||||
size_t new_range_offset_in_vmobject = region->offset_in_vmobject() + (range_to_mprotect.base().get() - region->range().base().get());
|
||||
auto& new_region = space().allocate_split_region(*region, range_to_mprotect, new_range_offset_in_vmobject);
|
||||
new_region.set_readable(prot & PROT_READ);
|
||||
new_region.set_writable(prot & PROT_WRITE);
|
||||
new_region.set_executable(prot & PROT_EXEC);
|
||||
|
||||
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||
old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
|
||||
space().deallocate_region(*old_region);
|
||||
|
||||
// Map the new regions using our page directory (they were just allocated and don't have one).
|
||||
for (auto* adjacent_region : adjacent_regions) {
|
||||
adjacent_region->map(space().page_directory());
|
||||
@ -475,11 +479,15 @@ KResultOr<int> Process::sys$munmap(Userspace<void*> addr, size_t size)
|
||||
if (!old_region->is_mmap())
|
||||
return EPERM;
|
||||
|
||||
auto new_regions = space().split_region_around_range(*old_region, range_to_unmap);
|
||||
// Remove the old region from our regions tree, since were going to add another region
|
||||
// with the exact same start address, but dont deallocate it yet
|
||||
auto region = space().take_region(*old_region);
|
||||
VERIFY(region);
|
||||
|
||||
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||
old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
|
||||
space().deallocate_region(*old_region);
|
||||
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
|
||||
|
||||
auto new_regions = space().split_region_around_range(*region, range_to_unmap);
|
||||
|
||||
// Instead we give back the unwanted VM manually.
|
||||
space().page_directory().range_allocator().deallocate(range_to_unmap);
|
||||
@ -512,13 +520,16 @@ KResultOr<int> Process::sys$munmap(Userspace<void*> addr, size_t size)
|
||||
continue;
|
||||
}
|
||||
|
||||
// otherwise just split the regions and collect them for future mapping
|
||||
new_regions.append(space().split_region_around_range(*old_region, range_to_unmap));
|
||||
// Remove the old region from our regions tree, since were going to add another region
|
||||
// with the exact same start address, but dont deallocate it yet
|
||||
auto region = space().take_region(*old_region);
|
||||
VERIFY(region);
|
||||
|
||||
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||
old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
|
||||
bool res = space().deallocate_region(*old_region);
|
||||
VERIFY(res);
|
||||
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
|
||||
|
||||
// otherwise just split the regions and collect them for future mapping
|
||||
new_regions.append(space().split_region_around_range(*region, range_to_unmap));
|
||||
}
|
||||
// Instead we give back the unwanted VM manually at the end.
|
||||
space().page_directory().range_allocator().deallocate(range_to_unmap);
|
||||
@ -559,7 +570,8 @@ KResultOr<FlatPtr> Process::sys$mremap(Userspace<const Syscall::SC_mremap_params
|
||||
|
||||
// Unmap without deallocating the VM range since we're going to reuse it.
|
||||
old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
|
||||
space().deallocate_region(*old_region);
|
||||
bool success = space().deallocate_region(*old_region);
|
||||
VERIFY(success);
|
||||
|
||||
auto new_vmobject = PrivateInodeVMObject::create_with_inode(inode);
|
||||
|
||||
|
@ -118,18 +118,21 @@ KResultOr<Region*> Space::allocate_region_with_vmobject(const Range& range, Nonn
|
||||
|
||||
bool Space::deallocate_region(Region& region)
|
||||
{
|
||||
OwnPtr<Region> region_protector;
|
||||
return take_region(region);
|
||||
}
|
||||
|
||||
OwnPtr<Region> Space::take_region(Region& region)
|
||||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
|
||||
if (m_region_lookup_cache.region.unsafe_ptr() == ®ion)
|
||||
m_region_lookup_cache.region = nullptr;
|
||||
for (size_t i = 0; i < m_regions.size(); ++i) {
|
||||
if (&m_regions[i] == ®ion) {
|
||||
region_protector = m_regions.unstable_take(i);
|
||||
return true;
|
||||
return m_regions.unstable_take(i);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
return {};
|
||||
}
|
||||
|
||||
Region* Space::find_region_from_range(const Range& range)
|
||||
|
@ -58,6 +58,7 @@ public:
|
||||
KResultOr<Region*> allocate_region_with_vmobject(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String& name, int prot, bool shared);
|
||||
KResultOr<Region*> allocate_region(const Range&, const String& name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
|
||||
bool deallocate_region(Region& region);
|
||||
OwnPtr<Region> take_region(Region& region);
|
||||
|
||||
Region& allocate_split_region(const Region& source_region, const Range&, size_t offset_in_vmobject);
|
||||
Vector<Region*, 2> split_region_around_range(const Region& source_region, const Range&);
|
||||
|
Loading…
Reference in New Issue
Block a user