2020-01-18 11:38:21 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
*
|
2021-04-22 11:24:48 +03:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 11:38:21 +03:00
|
|
|
*/
|
|
|
|
|
2019-09-16 11:19:44 +03:00
|
|
|
#include <AK/Assertions.h>
|
2020-03-08 14:33:14 +03:00
|
|
|
#include <AK/Memory.h>
|
2019-09-16 11:19:44 +03:00
|
|
|
#include <Kernel/Heap/SlabAllocator.h>
|
|
|
|
#include <Kernel/Heap/kmalloc.h>
|
2021-06-22 18:40:16 +03:00
|
|
|
#include <Kernel/Sections.h>
|
2020-06-06 07:01:30 +03:00
|
|
|
#include <Kernel/SpinLock.h>
|
2019-09-16 11:19:44 +03:00
|
|
|
#include <Kernel/VM/Region.h>
|
|
|
|
|
2020-02-26 15:21:31 +03:00
|
|
|
#define SANITIZE_SLABS
|
|
|
|
|
2020-02-16 03:27:42 +03:00
|
|
|
namespace Kernel {
|
|
|
|
|
2019-09-16 11:19:44 +03:00
|
|
|
template<size_t templated_slab_size>
|
|
|
|
class SlabAllocator {
|
|
|
|
public:
|
2021-02-28 16:42:08 +03:00
|
|
|
SlabAllocator() = default;
|
2019-09-16 11:19:44 +03:00
|
|
|
|
|
|
|
void init(size_t size)
|
|
|
|
{
|
2019-10-10 12:56:57 +03:00
|
|
|
m_base = kmalloc_eternal(size);
|
|
|
|
m_end = (u8*)m_base + size;
|
|
|
|
FreeSlab* slabs = (FreeSlab*)m_base;
|
2020-08-25 06:32:55 +03:00
|
|
|
m_slab_count = size / templated_slab_size;
|
|
|
|
for (size_t i = 1; i < m_slab_count; ++i) {
|
2019-09-16 11:19:44 +03:00
|
|
|
slabs[i].next = &slabs[i - 1];
|
|
|
|
}
|
|
|
|
slabs[0].next = nullptr;
|
2020-08-25 06:32:55 +03:00
|
|
|
m_freelist = &slabs[m_slab_count - 1];
|
2021-01-04 02:58:50 +03:00
|
|
|
m_num_allocated = 0;
|
2019-09-16 11:19:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
constexpr size_t slab_size() const { return templated_slab_size; }
|
2020-08-25 06:32:55 +03:00
|
|
|
size_t slab_count() const { return m_slab_count; }
|
2019-09-16 11:19:44 +03:00
|
|
|
|
|
|
|
void* alloc()
|
|
|
|
{
|
2020-08-25 06:32:55 +03:00
|
|
|
FreeSlab* free_slab;
|
|
|
|
{
|
|
|
|
// We want to avoid being swapped out in the middle of this
|
|
|
|
ScopedCritical critical;
|
|
|
|
FreeSlab* next_free;
|
|
|
|
free_slab = m_freelist.load(AK::memory_order_consume);
|
|
|
|
do {
|
|
|
|
if (!free_slab)
|
|
|
|
return kmalloc(slab_size());
|
|
|
|
// It's possible another processor is doing the same thing at
|
|
|
|
// the same time, so next_free *can* be a bogus pointer. However,
|
|
|
|
// in that case compare_exchange_strong would fail and we would
|
|
|
|
// try again.
|
|
|
|
next_free = free_slab->next;
|
|
|
|
} while (!m_freelist.compare_exchange_strong(free_slab, next_free, AK::memory_order_acq_rel));
|
|
|
|
|
2021-01-04 02:58:50 +03:00
|
|
|
m_num_allocated++;
|
2020-08-25 06:32:55 +03:00
|
|
|
}
|
|
|
|
|
2020-02-26 15:21:31 +03:00
|
|
|
#ifdef SANITIZE_SLABS
|
2020-08-25 06:32:55 +03:00
|
|
|
memset(free_slab, SLAB_ALLOC_SCRUB_BYTE, slab_size());
|
2020-02-01 12:36:25 +03:00
|
|
|
#endif
|
2020-08-25 06:32:55 +03:00
|
|
|
return free_slab;
|
2019-09-16 11:19:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void dealloc(void* ptr)
|
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(ptr);
|
2019-10-10 12:56:57 +03:00
|
|
|
if (ptr < m_base || ptr >= m_end) {
|
|
|
|
kfree(ptr);
|
|
|
|
return;
|
|
|
|
}
|
2020-08-25 06:32:55 +03:00
|
|
|
FreeSlab* free_slab = (FreeSlab*)ptr;
|
2020-02-26 15:21:31 +03:00
|
|
|
#ifdef SANITIZE_SLABS
|
2020-02-01 12:36:25 +03:00
|
|
|
if (slab_size() > sizeof(FreeSlab*))
|
2020-08-25 06:32:55 +03:00
|
|
|
memset(free_slab->padding, SLAB_DEALLOC_SCRUB_BYTE, sizeof(FreeSlab::padding));
|
2020-02-01 12:36:25 +03:00
|
|
|
#endif
|
2020-08-25 06:32:55 +03:00
|
|
|
|
|
|
|
// We want to avoid being swapped out in the middle of this
|
|
|
|
ScopedCritical critical;
|
|
|
|
FreeSlab* next_free = m_freelist.load(AK::memory_order_consume);
|
|
|
|
do {
|
|
|
|
free_slab->next = next_free;
|
|
|
|
} while (!m_freelist.compare_exchange_strong(next_free, free_slab, AK::memory_order_acq_rel));
|
|
|
|
|
2021-01-04 02:58:50 +03:00
|
|
|
m_num_allocated--;
|
2019-09-16 11:19:44 +03:00
|
|
|
}
|
|
|
|
|
2021-01-04 02:58:50 +03:00
|
|
|
size_t num_allocated() const { return m_num_allocated; }
|
|
|
|
size_t num_free() const { return m_slab_count - m_num_allocated; }
|
2019-09-16 11:19:44 +03:00
|
|
|
|
|
|
|
private:
|
|
|
|
struct FreeSlab {
|
2020-08-25 06:32:55 +03:00
|
|
|
FreeSlab* next;
|
2019-09-16 11:19:44 +03:00
|
|
|
char padding[templated_slab_size - sizeof(FreeSlab*)];
|
|
|
|
};
|
|
|
|
|
2020-08-25 06:32:55 +03:00
|
|
|
Atomic<FreeSlab*> m_freelist { nullptr };
|
2021-06-16 17:44:15 +03:00
|
|
|
Atomic<size_t, AK::MemoryOrder::memory_order_relaxed> m_num_allocated;
|
2020-08-25 06:32:55 +03:00
|
|
|
size_t m_slab_count;
|
2020-08-10 18:44:35 +03:00
|
|
|
void* m_base { nullptr };
|
|
|
|
void* m_end { nullptr };
|
2019-09-16 11:19:44 +03:00
|
|
|
|
|
|
|
static_assert(sizeof(FreeSlab) == templated_slab_size);
|
|
|
|
};
|
|
|
|
|
|
|
|
static SlabAllocator<16> s_slab_allocator_16;
|
|
|
|
static SlabAllocator<32> s_slab_allocator_32;
|
2020-02-22 16:36:45 +03:00
|
|
|
static SlabAllocator<64> s_slab_allocator_64;
|
2020-07-05 01:00:57 +03:00
|
|
|
static SlabAllocator<128> s_slab_allocator_128;
|
2021-06-26 01:57:19 +03:00
|
|
|
static SlabAllocator<256> s_slab_allocator_256;
|
2019-09-16 11:19:44 +03:00
|
|
|
|
2021-02-25 18:18:36 +03:00
|
|
|
#if ARCH(I386)
|
2020-09-03 07:57:09 +03:00
|
|
|
static_assert(sizeof(Region) <= s_slab_allocator_128.slab_size());
|
2021-02-25 18:18:36 +03:00
|
|
|
#endif
|
2019-09-16 11:19:44 +03:00
|
|
|
|
|
|
|
template<typename Callback>
|
|
|
|
void for_each_allocator(Callback callback)
|
|
|
|
{
|
|
|
|
callback(s_slab_allocator_16);
|
|
|
|
callback(s_slab_allocator_32);
|
2020-02-22 16:36:45 +03:00
|
|
|
callback(s_slab_allocator_64);
|
2020-08-10 18:47:09 +03:00
|
|
|
callback(s_slab_allocator_128);
|
2021-06-26 01:57:19 +03:00
|
|
|
callback(s_slab_allocator_256);
|
2019-09-16 11:19:44 +03:00
|
|
|
}
|
|
|
|
|
2021-02-19 20:41:50 +03:00
|
|
|
UNMAP_AFTER_INIT void slab_alloc_init()
|
2019-09-16 11:19:44 +03:00
|
|
|
{
|
AK: Rename KB, MB, GB to KiB, MiB, GiB
The SI prefixes "k", "M", "G" mean "10^3", "10^6", "10^9".
The IEC prefixes "Ki", "Mi", "Gi" mean "2^10", "2^20", "2^30".
Let's use the correct name, at least in code.
Only changes the name of the constants, no other behavior change.
2020-08-15 20:55:00 +03:00
|
|
|
s_slab_allocator_16.init(128 * KiB);
|
|
|
|
s_slab_allocator_32.init(128 * KiB);
|
|
|
|
s_slab_allocator_64.init(512 * KiB);
|
|
|
|
s_slab_allocator_128.init(512 * KiB);
|
2021-06-26 01:57:19 +03:00
|
|
|
s_slab_allocator_256.init(128 * KiB);
|
2019-09-16 11:19:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void* slab_alloc(size_t slab_size)
|
|
|
|
{
|
|
|
|
if (slab_size <= 16)
|
|
|
|
return s_slab_allocator_16.alloc();
|
|
|
|
if (slab_size <= 32)
|
|
|
|
return s_slab_allocator_32.alloc();
|
2020-02-22 16:36:45 +03:00
|
|
|
if (slab_size <= 64)
|
|
|
|
return s_slab_allocator_64.alloc();
|
2020-07-05 01:00:57 +03:00
|
|
|
if (slab_size <= 128)
|
|
|
|
return s_slab_allocator_128.alloc();
|
2021-06-26 01:57:19 +03:00
|
|
|
if (slab_size <= 256)
|
|
|
|
return s_slab_allocator_256.alloc();
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY_NOT_REACHED();
|
2019-09-16 11:19:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void slab_dealloc(void* ptr, size_t slab_size)
|
|
|
|
{
|
|
|
|
if (slab_size <= 16)
|
|
|
|
return s_slab_allocator_16.dealloc(ptr);
|
|
|
|
if (slab_size <= 32)
|
|
|
|
return s_slab_allocator_32.dealloc(ptr);
|
2020-02-22 16:36:45 +03:00
|
|
|
if (slab_size <= 64)
|
|
|
|
return s_slab_allocator_64.dealloc(ptr);
|
2020-07-05 01:00:57 +03:00
|
|
|
if (slab_size <= 128)
|
|
|
|
return s_slab_allocator_128.dealloc(ptr);
|
2021-06-26 01:57:19 +03:00
|
|
|
if (slab_size <= 256)
|
|
|
|
return s_slab_allocator_256.dealloc(ptr);
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY_NOT_REACHED();
|
2019-09-16 11:19:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void slab_alloc_stats(Function<void(size_t slab_size, size_t allocated, size_t free)> callback)
|
|
|
|
{
|
|
|
|
for_each_allocator([&](auto& allocator) {
|
2020-08-25 06:32:55 +03:00
|
|
|
auto num_allocated = allocator.num_allocated();
|
|
|
|
auto num_free = allocator.slab_count() - num_allocated;
|
|
|
|
callback(allocator.slab_size(), num_allocated, num_free);
|
2019-09-16 11:19:44 +03:00
|
|
|
});
|
|
|
|
}
|
2020-02-16 03:27:42 +03:00
|
|
|
|
|
|
|
}
|