LibJS: Recycle up to 64 HeapBlocks to improve performance :^)

This patch adds a BlockAllocator to the GC heap where we now cache up to
64 HeapBlock-sized mmap's that get recycled when allocating HeapBlocks.

This improves test-js runtime performance by ~35%, pretty cool! :^)
This commit is contained in:
Andreas Kling 2021-05-27 19:01:26 +02:00
parent d2149c153c
commit e9081a2644
Notes: sideshowbarker 2024-07-18 17:18:07 +09:00
6 changed files with 101 additions and 8 deletions

View File

@ -2,7 +2,8 @@ set(SOURCES
AST.cpp
Console.cpp
Heap/Allocator.cpp
Heap/Handle.cpp
Heap/BlockAllocator.cpp
Heap/Handle.cpp
Heap/HeapBlock.cpp
Heap/Heap.cpp
Interpreter.cpp

View File

@ -6,6 +6,8 @@
#include <AK/Badge.h>
#include <LibJS/Heap/Allocator.h>
#include <LibJS/Heap/BlockAllocator.h>
#include <LibJS/Heap/Heap.h>
#include <LibJS/Heap/HeapBlock.h>
namespace JS {
@ -36,8 +38,11 @@ Cell* Allocator::allocate_cell(Heap& heap)
void Allocator::block_did_become_empty(Badge<Heap>, HeapBlock& block)
{
auto& heap = block.heap();
block.m_list_node.remove();
delete &block;
// NOTE: HeapBlocks are managed by the BlockAllocator, so we don't want to `delete` the block here.
block.~HeapBlock();
heap.block_allocator().deallocate_block(&block);
}
void Allocator::block_did_become_usable(Badge<Heap>, HeapBlock& block)

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Vector.h>
#include <LibJS/Forward.h>
#include <LibJS/Heap/BlockAllocator.h>
#include <LibJS/Heap/HeapBlock.h>
#include <stdlib.h>
#include <sys/mman.h>
namespace JS {
BlockAllocator::BlockAllocator()
{
}
BlockAllocator::~BlockAllocator()
{
for (auto* block : m_blocks) {
if (munmap(block, HeapBlock::block_size) < 0) {
perror("munmap");
VERIFY_NOT_REACHED();
}
}
}
void* BlockAllocator::allocate_block([[maybe_unused]] char const* name)
{
if (!m_blocks.is_empty())
return m_blocks.take_last();
#ifdef __serenity__
auto* block = (HeapBlock*)serenity_mmap(nullptr, HeapBlock::block_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_RANDOMIZED | MAP_PRIVATE, 0, 0, HeapBlock::block_size, name);
#else
auto* block = (HeapBlock*)aligned_alloc(HeapBlock::block_size, HeapBlock::block_size);
#endif
VERIFY(block != MAP_FAILED);
return block;
}
void BlockAllocator::deallocate_block(void* block)
{
VERIFY(block);
if (m_blocks.size() >= max_cached_blocks) {
if (munmap(block, HeapBlock::block_size) < 0) {
perror("munmap");
VERIFY_NOT_REACHED();
}
return;
}
m_blocks.append(block);
}
}

View File

@ -0,0 +1,28 @@
/*
* Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Vector.h>
#include <LibJS/Forward.h>
namespace JS {
class BlockAllocator {
public:
BlockAllocator();
~BlockAllocator();
void* allocate_block(char const* name);
void deallocate_block(void*);
private:
static constexpr size_t max_cached_blocks = 64;
Vector<void*> m_blocks;
};
}

View File

@ -14,6 +14,7 @@
#include <LibCore/Forward.h>
#include <LibJS/Forward.h>
#include <LibJS/Heap/Allocator.h>
#include <LibJS/Heap/BlockAllocator.h>
#include <LibJS/Heap/Cell.h>
#include <LibJS/Heap/Handle.h>
#include <LibJS/Runtime/Object.h>
@ -72,6 +73,8 @@ public:
void defer_gc(Badge<DeferGC>);
void undefer_gc(Badge<DeferGC>);
BlockAllocator& block_allocator() { return m_block_allocator; }
private:
Cell* allocate_cell(size_t);
@ -103,6 +106,8 @@ private:
HashTable<MarkedValueList*> m_marked_value_lists;
BlockAllocator m_block_allocator;
size_t m_gc_deferrals { 0 };
bool m_should_gc_when_deferral_ends { false };

View File

@ -6,6 +6,7 @@
#include <AK/Assertions.h>
#include <AK/NonnullOwnPtr.h>
#include <LibJS/Heap/Heap.h>
#include <LibJS/Heap/HeapBlock.h>
#include <stdio.h>
#include <sys/mman.h>
@ -16,12 +17,7 @@ NonnullOwnPtr<HeapBlock> HeapBlock::create_with_cell_size(Heap& heap, size_t cel
{
char name[64];
snprintf(name, sizeof(name), "LibJS: HeapBlock(%zu)", cell_size);
#ifdef __serenity__
auto* block = (HeapBlock*)serenity_mmap(nullptr, block_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_RANDOMIZED | MAP_PRIVATE, 0, 0, block_size, name);
#else
auto* block = (HeapBlock*)aligned_alloc(block_size, block_size);
#endif
VERIFY(block != MAP_FAILED);
auto* block = static_cast<HeapBlock*>(heap.block_allocator().allocate_block(name));
new (block) HeapBlock(heap, cell_size);
return NonnullOwnPtr<HeapBlock>(NonnullOwnPtr<HeapBlock>::Adopt, *block);
}