/* * Copyright (c) 2018-2020, Andreas Kling * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #define SANITIZE_SLABS namespace Kernel { template class SlabAllocator { public: SlabAllocator() {} void init(size_t size) { m_base = kmalloc_eternal(size); m_end = (u8*)m_base + size; FreeSlab* slabs = (FreeSlab*)m_base; size_t slab_count = size / templated_slab_size; for (size_t i = 1; i < slab_count; ++i) { slabs[i].next = &slabs[i - 1]; } slabs[0].next = nullptr; m_freelist = &slabs[slab_count - 1]; m_num_allocated.store(0, AK::MemoryOrder::memory_order_release); m_num_free.store(slab_count, AK::MemoryOrder::memory_order_release); } constexpr size_t slab_size() const { return templated_slab_size; } void* alloc() { ScopedSpinLock lock(m_lock); if (!m_freelist) return kmalloc(slab_size()); ASSERT(m_freelist); void* ptr = m_freelist; m_freelist = m_freelist->next; m_num_allocated.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel); m_num_free.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel); #ifdef SANITIZE_SLABS memset(ptr, SLAB_ALLOC_SCRUB_BYTE, slab_size()); #endif return ptr; } void dealloc(void* ptr) { ScopedSpinLock lock(m_lock); ASSERT(ptr); if (ptr < m_base || ptr >= m_end) { kfree(ptr); return; } ((FreeSlab*)ptr)->next = m_freelist; #ifdef SANITIZE_SLABS if (slab_size() > sizeof(FreeSlab*)) memset(((FreeSlab*)ptr)->padding, SLAB_DEALLOC_SCRUB_BYTE, sizeof(FreeSlab::padding)); #endif m_freelist = (FreeSlab*)ptr; m_num_allocated.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel); m_num_free.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel); } size_t num_allocated() const { return m_num_allocated.load(AK::MemoryOrder::memory_order_consume); } size_t num_free() const { return m_num_free.load(AK::MemoryOrder::memory_order_consume); } private: struct FreeSlab { FreeSlab* next { nullptr }; char padding[templated_slab_size - sizeof(FreeSlab*)]; }; FreeSlab* m_freelist { nullptr }; Atomic m_num_allocated; Atomic m_num_free; void* m_base { nullptr }; void* m_end { nullptr }; SpinLock m_lock; static_assert(sizeof(FreeSlab) == templated_slab_size); }; static SlabAllocator<16> s_slab_allocator_16; static SlabAllocator<32> s_slab_allocator_32; static SlabAllocator<64> s_slab_allocator_64; static SlabAllocator<128> s_slab_allocator_128; static_assert(sizeof(Region) <= s_slab_allocator_64.slab_size()); template void for_each_allocator(Callback callback) { callback(s_slab_allocator_16); callback(s_slab_allocator_32); callback(s_slab_allocator_64); callback(s_slab_allocator_128); } void slab_alloc_init() { s_slab_allocator_16.init(128 * KiB); s_slab_allocator_32.init(128 * KiB); s_slab_allocator_64.init(512 * KiB); s_slab_allocator_128.init(512 * KiB); } void* slab_alloc(size_t slab_size) { if (slab_size <= 16) return s_slab_allocator_16.alloc(); if (slab_size <= 32) return s_slab_allocator_32.alloc(); if (slab_size <= 64) return s_slab_allocator_64.alloc(); if (slab_size <= 128) return s_slab_allocator_128.alloc(); ASSERT_NOT_REACHED(); } void slab_dealloc(void* ptr, size_t slab_size) { if (slab_size <= 16) return s_slab_allocator_16.dealloc(ptr); if (slab_size <= 32) return s_slab_allocator_32.dealloc(ptr); if (slab_size <= 64) return s_slab_allocator_64.dealloc(ptr); if (slab_size <= 128) return s_slab_allocator_128.dealloc(ptr); ASSERT_NOT_REACHED(); } void slab_alloc_stats(Function callback) { for_each_allocator([&](auto& allocator) { callback(allocator.slab_size(), allocator.num_allocated(), allocator.num_free()); }); } }