Kernel: Pad virtual address space allocations with guard pages

Put one unused page on each side of VM allocations to make invalid
accesses more likely to generate crashes.

Note that we will not add this guard padding for mmap() at a specific
memory address, only to "mmap it anywhere" requests.
This commit is contained in:
Andreas Kling 2019-09-22 15:12:29 +02:00
parent bd1e8bf166
commit 9c549c178a
Notes: sideshowbarker 2024-07-19 12:00:30 +09:00

View File

@ -59,12 +59,14 @@ void RangeAllocator::carve_at_index(int index, const Range& range)
Range RangeAllocator::allocate_anywhere(size_t size)
{
// NOTE: We pad VM allocations with a guard page on each side.
size_t padded_size = size + PAGE_SIZE * 2;
for (int i = 0; i < m_available_ranges.size(); ++i) {
auto& available_range = m_available_ranges[i];
if (available_range.size() < size)
if (available_range.size() < padded_size)
continue;
Range allocated_range(available_range.base(), size);
if (available_range.size() == size) {
Range allocated_range(available_range.base().offset(PAGE_SIZE), size);
if (available_range.size() == padded_size) {
#ifdef VRA_DEBUG
dbgprintf("VRA: Allocated perfect-fit anywhere(%u): %x\n", size, allocated_range.base().get());
#endif