LibJS: Never give back virtual memory once it belongs to a cell type

Instead of returning HeapBlock memory to the kernel (or a non-type
specific shared cache), we now keep a BlockAllocator per CellAllocator
and implement "deallocation" by basically informing the kernel that we
don't need the physical memory right now.

This is done with MADV_FREE or MADV_DONTNEED if available, but for other
platforms (including SerenityOS) we munmap and then re-mmap the memory
to achieve the same effect. It's definitely clunky, so I've added a
FIXME about implementing the madvise options on SerenityOS too.

The important outcome of this change is that GC types that use a
type-specific allocator become immune to use-after-free type confusion
attacks, since their virtual addresses will only ever be re-used for
the same exact type again and again.

Fixes #22274
This commit is contained in:
Andreas Kling 2023-12-31 11:36:18 +01:00
parent bcb1e548f1
commit b6d4eea7ac
Notes: sideshowbarker 2024-07-17 01:11:48 +09:00
10 changed files with 47 additions and 36 deletions

View File

@ -146,7 +146,7 @@ private:
ErrorOr<int> serenity_main(Main::Arguments arguments)
{
TRY(Core::System::pledge("stdio recvfd sendfd rpath cpath unix proc exec thread"));
TRY(Core::System::pledge("stdio recvfd sendfd rpath cpath unix proc exec thread map_fixed"));
Core::LockFile lockfile("/tmp/lock/assistant.lock");

View File

@ -23,7 +23,7 @@
ErrorOr<int> serenity_main(Main::Arguments arguments)
{
TRY(Core::System::pledge("stdio recvfd sendfd rpath fattr unix cpath wpath thread"));
TRY(Core::System::pledge("stdio recvfd sendfd rpath fattr unix cpath wpath thread map_fixed"));
auto app = TRY(GUI::Application::create(arguments));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2021-2023, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
@ -15,20 +15,24 @@
# include <sanitizer/asan_interface.h>
#endif
// FIXME: Implement MADV_FREE and/or MADV_DONTNEED on SerenityOS.
#if defined(AK_OS_SERENITY) || (!defined(MADV_FREE) && !defined(MADV_DONTNEED))
# define USE_FALLBACK_BLOCK_DEALLOCATION
#endif
namespace JS {
// NOTE: If this changes, we need to update the mmap() code to ensure correct alignment.
static_assert(HeapBlock::block_size == 4096);
BlockAllocator::~BlockAllocator()
{
for (auto* block : m_blocks) {
ASAN_UNPOISON_MEMORY_REGION(block, HeapBlock::block_size);
#ifdef AK_OS_SERENITY
if (munmap(block, HeapBlock::block_size) < 0) {
perror("munmap");
VERIFY_NOT_REACHED();
}
#else
free(block);
#endif
}
}
@ -50,28 +54,38 @@ void* BlockAllocator::allocate_block([[maybe_unused]] char const* name)
#ifdef AK_OS_SERENITY
auto* block = (HeapBlock*)serenity_mmap(nullptr, HeapBlock::block_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_RANDOMIZED | MAP_PRIVATE, 0, 0, HeapBlock::block_size, name);
VERIFY(block != MAP_FAILED);
#else
auto* block = (HeapBlock*)aligned_alloc(HeapBlock::block_size, HeapBlock::block_size);
VERIFY(block);
auto* block = (HeapBlock*)mmap(nullptr, HeapBlock::block_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
#endif
VERIFY(block != MAP_FAILED);
return block;
}
void BlockAllocator::deallocate_block(void* block)
{
VERIFY(block);
if (m_blocks.size() >= max_cached_blocks) {
#ifdef AK_OS_SERENITY
if (munmap(block, HeapBlock::block_size) < 0) {
perror("munmap");
VERIFY_NOT_REACHED();
}
#else
free(block);
#endif
return;
#if defined(USE_FALLBACK_BLOCK_DEALLOCATION)
// If we can't use any of the nicer techniques, unmap and remap the block to return the physical pages while keeping the VM.
if (munmap(block, HeapBlock::block_size) < 0) {
perror("munmap");
VERIFY_NOT_REACHED();
}
if (mmap(block, HeapBlock::block_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, 0, 0) != block) {
perror("mmap");
VERIFY_NOT_REACHED();
}
#elif defined(MADV_FREE)
if (madvise(block, HeapBlock::block_size, MADV_FREE) < 0) {
perror("madvise(MADV_FREE)");
VERIFY_NOT_REACHED();
}
#elif defined(MADV_DONTNEED)
if (madvise(block, HeapBlock::block_size, MADV_DONTNEED) < 0) {
perror("madvise(MADV_DONTNEED)");
VERIFY_NOT_REACHED();
}
#endif
ASAN_POISON_MEMORY_REGION(block, HeapBlock::block_size);
m_blocks.append(block);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2021-2023, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
@ -20,9 +20,7 @@ public:
void deallocate_block(void*);
private:
static constexpr size_t max_cached_blocks = 512;
Vector<void*, max_cached_blocks> m_blocks;
Vector<void*> m_blocks;
};
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2020-2023, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
@ -37,11 +37,10 @@ Cell* CellAllocator::allocate_cell(Heap& heap)
void CellAllocator::block_did_become_empty(Badge<Heap>, HeapBlock& block)
{
auto& heap = block.heap();
block.m_list_node.remove();
// NOTE: HeapBlocks are managed by the BlockAllocator, so we don't want to `delete` the block here.
block.~HeapBlock();
heap.block_allocator().deallocate_block(&block);
m_block_allocator.deallocate_block(&block);
}
void CellAllocator::block_did_become_usable(Badge<Heap>, HeapBlock& block)

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2020-2023, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
@ -10,6 +10,7 @@
#include <AK/NeverDestroyed.h>
#include <AK/NonnullOwnPtr.h>
#include <LibJS/Forward.h>
#include <LibJS/Heap/BlockAllocator.h>
#include <LibJS/Heap/HeapBlock.h>
#define JS_DECLARE_ALLOCATOR(ClassName) \
@ -49,9 +50,13 @@ public:
IntrusiveListNode<CellAllocator> m_list_node;
using List = IntrusiveList<&CellAllocator::m_list_node>;
BlockAllocator& block_allocator() { return m_block_allocator; }
private:
size_t const m_cell_size;
BlockAllocator m_block_allocator;
using BlockList = IntrusiveList<&HeapBlock::m_list_node>;
BlockList m_full_blocks;
BlockList m_usable_blocks;

View File

@ -15,7 +15,6 @@
#include <AK/Vector.h>
#include <LibCore/Forward.h>
#include <LibJS/Forward.h>
#include <LibJS/Heap/BlockAllocator.h>
#include <LibJS/Heap/Cell.h>
#include <LibJS/Heap/CellAllocator.h>
#include <LibJS/Heap/Handle.h>
@ -83,8 +82,6 @@ public:
void register_cell_allocator(Badge<CellAllocator>, CellAllocator&);
BlockAllocator& block_allocator() { return m_block_allocator; }
void uproot_cell(Cell* cell);
private:
@ -154,8 +151,6 @@ private:
Vector<GCPtr<Cell>> m_uprooted_cells;
BlockAllocator m_block_allocator;
size_t m_gc_deferrals { 0 };
bool m_should_gc_when_deferral_ends { false };

View File

@ -26,7 +26,7 @@ NonnullOwnPtr<HeapBlock> HeapBlock::create_with_cell_size(Heap& heap, CellAlloca
#else
char const* name = nullptr;
#endif
auto* block = static_cast<HeapBlock*>(heap.block_allocator().allocate_block(name));
auto* block = static_cast<HeapBlock*>(cell_allocator.block_allocator().allocate_block(name));
new (block) HeapBlock(heap, cell_allocator, cell_size);
return NonnullOwnPtr<HeapBlock>(NonnullOwnPtr<HeapBlock>::Adopt, *block);
}

View File

@ -27,7 +27,7 @@
ErrorOr<int> serenity_main(Main::Arguments)
{
Core::EventLoop event_loop;
TRY(Core::System::pledge("stdio recvfd sendfd accept unix rpath thread proc"));
TRY(Core::System::pledge("stdio recvfd sendfd accept unix rpath thread proc map_fixed"));
// This must be first; we can't check if /tmp/webdriver exists once we've unveiled other paths.
auto webdriver_socket_path = ByteString::formatted("{}/webdriver", TRY(Core::StandardPaths::runtime_directory()));

View File

@ -531,7 +531,7 @@ private:
ErrorOr<int> serenity_main(Main::Arguments arguments)
{
TRY(Core::System::pledge("stdio rpath wpath cpath tty sigaction"));
TRY(Core::System::pledge("stdio rpath wpath cpath tty sigaction map_fixed"));
bool gc_on_every_allocation = false;
bool disable_syntax_highlight = false;