2021-07-31 17:19:48 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2021, Ali Mohammad Pur <mpfard@serenityos.org>
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
|
|
*/
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
2021-10-29 01:49:11 +03:00
|
|
|
#include <AK/Atomic.h>
|
2021-07-31 17:19:48 +03:00
|
|
|
#include <AK/StdLibExtras.h>
|
|
|
|
#include <AK/Types.h>
|
|
|
|
#include <AK/kmalloc.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
|
|
|
|
namespace AK {
|
|
|
|
|
2021-08-02 22:04:15 +03:00
|
|
|
template<bool use_mmap = false, size_t chunk_size = use_mmap ? 4 * MiB : 4 * KiB>
|
2021-07-31 17:19:48 +03:00
|
|
|
class BumpAllocator {
|
|
|
|
public:
|
|
|
|
BumpAllocator()
|
|
|
|
{
|
|
|
|
if constexpr (use_mmap)
|
2021-08-02 22:04:15 +03:00
|
|
|
m_chunk_size = chunk_size;
|
2021-07-31 17:19:48 +03:00
|
|
|
else
|
2021-08-02 22:04:15 +03:00
|
|
|
m_chunk_size = kmalloc_good_size(chunk_size);
|
2021-07-31 17:19:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
~BumpAllocator()
|
|
|
|
{
|
|
|
|
deallocate_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
void* allocate(size_t size, size_t align)
|
|
|
|
{
|
|
|
|
VERIFY(size < m_chunk_size - sizeof(ChunkHeader));
|
|
|
|
if (!m_current_chunk) {
|
|
|
|
if (!allocate_a_chunk())
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
allocate_again:;
|
|
|
|
VERIFY(m_current_chunk != 0);
|
|
|
|
|
|
|
|
auto aligned_ptr = align_up_to(m_byte_offset_into_current_chunk + m_current_chunk, align);
|
|
|
|
auto next_offset = aligned_ptr + size - m_current_chunk;
|
|
|
|
if (next_offset > m_chunk_size) {
|
|
|
|
if (!allocate_a_chunk())
|
|
|
|
return nullptr;
|
|
|
|
goto allocate_again;
|
|
|
|
}
|
|
|
|
m_byte_offset_into_current_chunk = next_offset;
|
|
|
|
return (void*)aligned_ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void deallocate_all()
|
|
|
|
{
|
|
|
|
if (!m_head_chunk)
|
|
|
|
return;
|
2021-10-29 01:49:11 +03:00
|
|
|
// Note that 'cache_filled' is just an educated guess, and we don't rely on it.
|
|
|
|
// If we determine 'cache_filled=true' and the cache becomes empty in the meantime,
|
|
|
|
// then we haven't lost much; it was a close call anyway.
|
|
|
|
// If we determine 'cache_filled=false' and the cache becomes full in the meantime,
|
|
|
|
// then we'll end up with a different chunk to munmap(), no big difference.
|
|
|
|
bool cache_filled = s_unused_allocation_cache.load(MemoryOrder::memory_order_relaxed);
|
|
|
|
for_each_chunk([&](auto chunk) {
|
|
|
|
if (!cache_filled) {
|
|
|
|
cache_filled = true;
|
2023-03-07 17:28:21 +03:00
|
|
|
(reinterpret_cast<ChunkHeader*>(chunk))->next_chunk = 0;
|
2021-10-29 01:49:11 +03:00
|
|
|
chunk = s_unused_allocation_cache.exchange(chunk);
|
|
|
|
if (!chunk)
|
2021-09-12 16:41:35 +03:00
|
|
|
return;
|
2021-10-29 01:49:11 +03:00
|
|
|
// The cache got filled in the meantime. Oh well, we have to call munmap() anyway.
|
2021-09-12 16:41:35 +03:00
|
|
|
}
|
2021-10-29 01:49:11 +03:00
|
|
|
|
2021-07-31 17:19:48 +03:00
|
|
|
if constexpr (use_mmap) {
|
|
|
|
munmap((void*)chunk, m_chunk_size);
|
|
|
|
} else {
|
|
|
|
kfree_sized((void*)chunk, m_chunk_size);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
template<typename TFn>
|
|
|
|
void for_each_chunk(TFn&& fn)
|
|
|
|
{
|
|
|
|
auto head_chunk = m_head_chunk;
|
|
|
|
while (head_chunk) {
|
2023-03-07 17:28:21 +03:00
|
|
|
auto& chunk_header = *reinterpret_cast<ChunkHeader const*>(head_chunk);
|
2021-07-31 17:19:48 +03:00
|
|
|
VERIFY(chunk_header.magic == chunk_magic);
|
|
|
|
if (head_chunk == m_current_chunk)
|
|
|
|
VERIFY(chunk_header.next_chunk == 0);
|
|
|
|
auto next_chunk = chunk_header.next_chunk;
|
2022-11-21 02:14:36 +03:00
|
|
|
fn(head_chunk);
|
2021-07-31 17:19:48 +03:00
|
|
|
head_chunk = next_chunk;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool allocate_a_chunk()
|
|
|
|
{
|
|
|
|
// dbgln("Allocated {} entries in previous chunk and have {} unusable bytes", m_allocations_in_previous_chunk, m_chunk_size - m_byte_offset_into_current_chunk);
|
|
|
|
// m_allocations_in_previous_chunk = 0;
|
2023-03-07 17:28:21 +03:00
|
|
|
void* new_chunk = reinterpret_cast<void*>(s_unused_allocation_cache.exchange(0));
|
2021-10-29 01:49:11 +03:00
|
|
|
if (!new_chunk) {
|
2021-09-12 16:41:35 +03:00
|
|
|
if constexpr (use_mmap) {
|
2022-10-10 00:23:23 +03:00
|
|
|
#ifdef AK_OS_SERENITY
|
2021-09-12 16:41:35 +03:00
|
|
|
new_chunk = serenity_mmap(nullptr, m_chunk_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_RANDOMIZED | MAP_PRIVATE, 0, 0, m_chunk_size, "BumpAllocator Chunk");
|
2021-07-31 17:19:48 +03:00
|
|
|
#else
|
2022-07-10 16:30:08 +03:00
|
|
|
new_chunk = mmap(nullptr, m_chunk_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
2021-07-31 17:19:48 +03:00
|
|
|
#endif
|
2021-09-12 16:41:35 +03:00
|
|
|
if (new_chunk == MAP_FAILED)
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
new_chunk = kmalloc(m_chunk_size);
|
|
|
|
if (!new_chunk)
|
|
|
|
return false;
|
|
|
|
}
|
2021-07-31 17:19:48 +03:00
|
|
|
}
|
|
|
|
|
2023-03-07 17:28:21 +03:00
|
|
|
auto& new_header = *reinterpret_cast<ChunkHeader*>(new_chunk);
|
2021-07-31 17:19:48 +03:00
|
|
|
new_header.magic = chunk_magic;
|
|
|
|
new_header.next_chunk = 0;
|
|
|
|
m_byte_offset_into_current_chunk = sizeof(ChunkHeader);
|
|
|
|
|
|
|
|
if (!m_head_chunk) {
|
|
|
|
VERIFY(!m_current_chunk);
|
2023-03-07 17:28:21 +03:00
|
|
|
m_head_chunk = reinterpret_cast<FlatPtr>(new_chunk);
|
|
|
|
m_current_chunk = reinterpret_cast<FlatPtr>(new_chunk);
|
2021-07-31 17:19:48 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
VERIFY(m_current_chunk);
|
2023-03-07 17:28:21 +03:00
|
|
|
auto& old_header = *reinterpret_cast<ChunkHeader*>(m_current_chunk);
|
2021-10-22 21:01:34 +03:00
|
|
|
VERIFY(old_header.magic == chunk_magic);
|
2021-07-31 17:19:48 +03:00
|
|
|
VERIFY(old_header.next_chunk == 0);
|
2023-03-07 17:28:21 +03:00
|
|
|
old_header.next_chunk = reinterpret_cast<FlatPtr>(new_chunk);
|
|
|
|
m_current_chunk = reinterpret_cast<FlatPtr>(new_chunk);
|
2021-07-31 17:19:48 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-08-04 14:34:45 +03:00
|
|
|
constexpr static FlatPtr chunk_magic = explode_byte(0xdf);
|
2021-07-31 17:19:48 +03:00
|
|
|
struct ChunkHeader {
|
|
|
|
FlatPtr magic;
|
|
|
|
FlatPtr next_chunk;
|
|
|
|
};
|
|
|
|
FlatPtr m_head_chunk { 0 };
|
|
|
|
FlatPtr m_current_chunk { 0 };
|
|
|
|
size_t m_byte_offset_into_current_chunk { 0 };
|
|
|
|
size_t m_chunk_size { 0 };
|
2021-10-29 01:49:11 +03:00
|
|
|
static Atomic<FlatPtr> s_unused_allocation_cache;
|
2021-07-31 17:19:48 +03:00
|
|
|
};
|
|
|
|
|
2021-08-02 22:04:15 +03:00
|
|
|
template<typename T, bool use_mmap = false, size_t chunk_size = use_mmap ? 4 * MiB : 4 * KiB>
|
|
|
|
class UniformBumpAllocator : protected BumpAllocator<use_mmap, chunk_size> {
|
|
|
|
using Allocator = BumpAllocator<use_mmap, chunk_size>;
|
|
|
|
|
2021-07-31 17:19:48 +03:00
|
|
|
public:
|
|
|
|
UniformBumpAllocator() = default;
|
|
|
|
~UniformBumpAllocator()
|
|
|
|
{
|
|
|
|
destroy_all();
|
|
|
|
}
|
|
|
|
|
2021-10-22 21:15:47 +03:00
|
|
|
template<typename... Args>
|
|
|
|
T* allocate(Args&&... args)
|
2021-07-31 17:19:48 +03:00
|
|
|
{
|
2021-10-22 21:15:47 +03:00
|
|
|
auto ptr = (T*)Allocator::allocate(sizeof(T), alignof(T));
|
|
|
|
if (!ptr)
|
|
|
|
return nullptr;
|
|
|
|
return new (ptr) T { forward<Args>(args)... };
|
2021-07-31 17:19:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void deallocate_all()
|
|
|
|
{
|
|
|
|
destroy_all();
|
2021-08-02 22:04:15 +03:00
|
|
|
Allocator::deallocate_all();
|
2021-07-31 17:19:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void destroy_all()
|
|
|
|
{
|
|
|
|
this->for_each_chunk([&](auto chunk) {
|
2022-11-21 02:14:36 +03:00
|
|
|
auto base_ptr = align_up_to(chunk + sizeof(typename Allocator::ChunkHeader), alignof(T));
|
2021-10-22 21:08:44 +03:00
|
|
|
// Compute the offset of the first byte *after* this chunk:
|
2022-12-04 23:43:47 +03:00
|
|
|
FlatPtr end_offset = base_ptr + this->m_chunk_size - chunk - sizeof(typename Allocator::ChunkHeader);
|
2022-11-21 02:14:36 +03:00
|
|
|
if (chunk == this->m_current_chunk)
|
2022-11-03 09:55:11 +03:00
|
|
|
end_offset = this->m_byte_offset_into_current_chunk;
|
2021-10-22 21:08:44 +03:00
|
|
|
// Compute the offset of the first byte *after* the last valid object, in case the end of the chunk does not align with the end of an object:
|
|
|
|
end_offset = (end_offset / sizeof(T)) * sizeof(T);
|
2021-07-31 17:19:48 +03:00
|
|
|
for (; base_ptr - chunk < end_offset; base_ptr += sizeof(T))
|
|
|
|
reinterpret_cast<T*>(base_ptr)->~T();
|
|
|
|
});
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-09-12 16:41:35 +03:00
|
|
|
template<bool use_mmap, size_t size>
|
2021-10-29 01:49:11 +03:00
|
|
|
inline Atomic<FlatPtr> BumpAllocator<use_mmap, size>::s_unused_allocation_cache { 0 };
|
2021-09-12 16:41:35 +03:00
|
|
|
|
2021-07-31 17:19:48 +03:00
|
|
|
}
|
|
|
|
|
2022-11-26 14:18:30 +03:00
|
|
|
#if USING_AK_GLOBALLY
|
2021-07-31 17:19:48 +03:00
|
|
|
using AK::BumpAllocator;
|
|
|
|
using AK::UniformBumpAllocator;
|
2022-11-26 14:18:30 +03:00
|
|
|
#endif
|