2020-01-18 11:38:21 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright notice, this
|
|
|
|
* list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
|
|
* and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
|
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
|
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2018-10-16 12:01:38 +03:00
|
|
|
/*
|
|
|
|
* Really really *really* Q&D malloc() and free() implementations
|
|
|
|
* just to get going. Don't ever let anyone see this shit. :^)
|
|
|
|
*/
|
|
|
|
|
2019-06-07 12:43:58 +03:00
|
|
|
#include <AK/Assertions.h>
|
2020-03-10 13:14:45 +03:00
|
|
|
#include <AK/Bitmap.h>
|
|
|
|
#include <AK/Optional.h>
|
2019-04-06 15:29:29 +03:00
|
|
|
#include <AK/Types.h>
|
2019-06-07 21:02:01 +03:00
|
|
|
#include <Kernel/Arch/i386/CPU.h>
|
2020-02-09 17:47:15 +03:00
|
|
|
#include <Kernel/Heap/kmalloc.h>
|
2019-06-07 12:43:58 +03:00
|
|
|
#include <Kernel/KSyms.h>
|
2019-04-03 15:41:40 +03:00
|
|
|
#include <Kernel/Process.h>
|
|
|
|
#include <Kernel/Scheduler.h>
|
2020-06-06 07:01:30 +03:00
|
|
|
#include <Kernel/SpinLock.h>
|
2020-05-16 13:00:04 +03:00
|
|
|
#include <Kernel/StdLib.h>
|
2020-08-22 06:55:08 +03:00
|
|
|
#include <Kernel/VM/MemoryManager.h>
|
2018-10-16 12:01:38 +03:00
|
|
|
|
|
|
|
#define SANITIZE_KMALLOC
|
|
|
|
|
2020-02-25 02:03:39 +03:00
|
|
|
struct AllocationHeader {
|
2020-02-22 17:09:18 +03:00
|
|
|
size_t allocation_size_in_chunks;
|
|
|
|
u8 data[0];
|
2019-02-15 14:30:48 +03:00
|
|
|
};
|
2018-10-16 12:01:38 +03:00
|
|
|
|
2020-02-22 16:18:34 +03:00
|
|
|
#define CHUNK_SIZE 32
|
AK: Rename KB, MB, GB to KiB, MiB, GiB
The SI prefixes "k", "M", "G" mean "10^3", "10^6", "10^9".
The IEC prefixes "Ki", "Mi", "Gi" mean "2^10", "2^20", "2^30".
Let's use the correct name, at least in code.
Only changes the name of the constants, no other behavior change.
2020-08-15 20:55:00 +03:00
|
|
|
#define POOL_SIZE (3 * MiB)
|
|
|
|
#define ETERNAL_RANGE_SIZE (2 * MiB)
|
2018-10-16 12:01:38 +03:00
|
|
|
|
2020-08-22 06:55:08 +03:00
|
|
|
// We need to make sure to not stomp on global variables or other parts
|
|
|
|
// of the kernel image!
|
|
|
|
extern u32 end_of_kernel_bss;
|
|
|
|
#define ETERNAL_BASE_PHYSICAL ((u8*)PAGE_ROUND_UP(&end_of_kernel_bss))
|
|
|
|
#define BASE_PHYSICAL (ETERNAL_BASE_PHYSICAL + ETERNAL_RANGE_SIZE)
|
|
|
|
|
2019-07-03 22:17:35 +03:00
|
|
|
static u8 alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
|
2018-10-16 12:01:38 +03:00
|
|
|
|
2020-05-16 11:37:31 +03:00
|
|
|
size_t g_kmalloc_bytes_allocated = 0;
|
|
|
|
size_t g_kmalloc_bytes_free = POOL_SIZE;
|
|
|
|
size_t g_kmalloc_bytes_eternal = 0;
|
|
|
|
size_t g_kmalloc_call_count;
|
|
|
|
size_t g_kfree_call_count;
|
2019-04-16 00:58:48 +03:00
|
|
|
bool g_dump_kmalloc_stacks;
|
2019-04-15 20:43:12 +03:00
|
|
|
|
2019-07-03 22:17:35 +03:00
|
|
|
static u8* s_next_eternal_ptr;
|
|
|
|
static u8* s_end_of_eternal_range;
|
2018-11-02 22:41:58 +03:00
|
|
|
|
2020-07-02 17:34:11 +03:00
|
|
|
static RecursiveSpinLock s_lock; // needs to be recursive because of dump_backtrace()
|
2020-06-06 07:01:30 +03:00
|
|
|
|
2018-11-09 03:25:31 +03:00
|
|
|
void kmalloc_init()
|
2018-10-16 12:01:38 +03:00
|
|
|
{
|
2019-01-16 02:44:09 +03:00
|
|
|
memset(&alloc_map, 0, sizeof(alloc_map));
|
2019-11-23 19:27:09 +03:00
|
|
|
memset((void*)BASE_PHYSICAL, 0, POOL_SIZE);
|
2020-06-06 07:01:30 +03:00
|
|
|
s_lock.initialize();
|
2018-10-16 12:01:38 +03:00
|
|
|
|
2020-05-16 11:37:31 +03:00
|
|
|
g_kmalloc_bytes_eternal = 0;
|
|
|
|
g_kmalloc_bytes_allocated = 0;
|
|
|
|
g_kmalloc_bytes_free = POOL_SIZE;
|
2018-11-01 01:19:15 +03:00
|
|
|
|
2019-11-23 19:27:09 +03:00
|
|
|
s_next_eternal_ptr = (u8*)ETERNAL_BASE_PHYSICAL;
|
2019-01-16 02:44:09 +03:00
|
|
|
s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE;
|
2018-11-01 01:19:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void* kmalloc_eternal(size_t size)
|
|
|
|
{
|
2020-06-06 07:01:30 +03:00
|
|
|
ScopedSpinLock lock(s_lock);
|
2018-11-01 01:19:15 +03:00
|
|
|
void* ptr = s_next_eternal_ptr;
|
|
|
|
s_next_eternal_ptr += size;
|
2018-11-02 22:41:58 +03:00
|
|
|
ASSERT(s_next_eternal_ptr < s_end_of_eternal_range);
|
2020-05-16 11:37:31 +03:00
|
|
|
g_kmalloc_bytes_eternal += size;
|
2018-11-01 01:19:15 +03:00
|
|
|
return ptr;
|
2018-10-16 12:01:38 +03:00
|
|
|
}
|
|
|
|
|
2018-12-26 23:31:46 +03:00
|
|
|
void* kmalloc_aligned(size_t size, size_t alignment)
|
|
|
|
{
|
|
|
|
void* ptr = kmalloc(size + alignment + sizeof(void*));
|
2019-02-22 12:23:06 +03:00
|
|
|
size_t max_addr = (size_t)ptr + alignment;
|
2018-12-26 23:31:46 +03:00
|
|
|
void* aligned_ptr = (void*)(max_addr - (max_addr % alignment));
|
|
|
|
((void**)aligned_ptr)[-1] = ptr;
|
|
|
|
return aligned_ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void kfree_aligned(void* ptr)
|
|
|
|
{
|
|
|
|
kfree(((void**)ptr)[-1]);
|
|
|
|
}
|
|
|
|
|
2018-11-01 11:01:51 +03:00
|
|
|
void* kmalloc_page_aligned(size_t size)
|
|
|
|
{
|
2018-12-26 23:31:46 +03:00
|
|
|
void* ptr = kmalloc_aligned(size, PAGE_SIZE);
|
2019-02-22 12:23:06 +03:00
|
|
|
size_t d = (size_t)ptr;
|
2018-12-26 23:31:46 +03:00
|
|
|
ASSERT((d & PAGE_MASK) == d);
|
2018-11-01 11:01:51 +03:00
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2020-03-10 13:14:45 +03:00
|
|
|
inline void* kmalloc_allocate(size_t first_chunk, size_t chunks_needed)
|
|
|
|
{
|
|
|
|
auto* a = (AllocationHeader*)(BASE_PHYSICAL + (first_chunk * CHUNK_SIZE));
|
|
|
|
u8* ptr = a->data;
|
|
|
|
a->allocation_size_in_chunks = chunks_needed;
|
|
|
|
|
|
|
|
Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE);
|
|
|
|
bitmap_wrapper.set_range(first_chunk, chunks_needed, true);
|
|
|
|
|
2020-05-16 11:37:31 +03:00
|
|
|
g_kmalloc_bytes_allocated += a->allocation_size_in_chunks * CHUNK_SIZE;
|
|
|
|
g_kmalloc_bytes_free -= a->allocation_size_in_chunks * CHUNK_SIZE;
|
2020-03-10 13:14:45 +03:00
|
|
|
#ifdef SANITIZE_KMALLOC
|
|
|
|
memset(ptr, KMALLOC_SCRUB_BYTE, (a->allocation_size_in_chunks * CHUNK_SIZE) - sizeof(AllocationHeader));
|
|
|
|
#endif
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2019-02-22 12:23:06 +03:00
|
|
|
void* kmalloc_impl(size_t size)
|
2018-10-16 12:01:38 +03:00
|
|
|
{
|
2020-06-06 07:01:30 +03:00
|
|
|
ScopedSpinLock lock(s_lock);
|
2019-04-15 20:43:12 +03:00
|
|
|
++g_kmalloc_call_count;
|
2018-10-24 01:51:19 +03:00
|
|
|
|
2020-04-08 14:30:50 +03:00
|
|
|
if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available) {
|
2020-02-25 02:02:52 +03:00
|
|
|
dbg() << "kmalloc(" << size << ")";
|
2020-02-16 03:27:42 +03:00
|
|
|
Kernel::dump_backtrace();
|
2019-04-16 00:58:48 +03:00
|
|
|
}
|
|
|
|
|
2020-02-22 17:09:18 +03:00
|
|
|
// We need space for the AllocationHeader at the head of the block.
|
|
|
|
size_t real_size = size + sizeof(AllocationHeader);
|
2018-10-16 12:01:38 +03:00
|
|
|
|
2020-05-16 11:37:31 +03:00
|
|
|
if (g_kmalloc_bytes_free < real_size) {
|
2020-02-16 03:27:42 +03:00
|
|
|
Kernel::dump_backtrace();
|
2020-06-17 19:20:28 +03:00
|
|
|
klog() << "kmalloc(): PANIC! Out of memory\nsum_free=" << g_kmalloc_bytes_free << ", real_size=" << real_size;
|
2020-07-06 16:27:22 +03:00
|
|
|
Processor::halt();
|
2018-10-16 12:01:38 +03:00
|
|
|
}
|
|
|
|
|
2020-03-10 13:14:45 +03:00
|
|
|
size_t chunks_needed = (real_size + CHUNK_SIZE - 1) / CHUNK_SIZE;
|
|
|
|
|
|
|
|
Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE);
|
2020-03-10 14:36:20 +03:00
|
|
|
Optional<size_t> first_chunk;
|
|
|
|
|
|
|
|
// Choose the right politic for allocation.
|
|
|
|
constexpr u32 best_fit_threshold = 128;
|
|
|
|
if (chunks_needed < best_fit_threshold) {
|
|
|
|
first_chunk = bitmap_wrapper.find_first_fit(chunks_needed);
|
|
|
|
} else {
|
|
|
|
first_chunk = bitmap_wrapper.find_best_fit(chunks_needed);
|
|
|
|
}
|
|
|
|
|
2020-03-10 13:14:45 +03:00
|
|
|
if (!first_chunk.has_value()) {
|
|
|
|
klog() << "kmalloc(): PANIC! Out of memory (no suitable block for size " << size << ")";
|
|
|
|
Kernel::dump_backtrace();
|
2020-07-06 16:27:22 +03:00
|
|
|
Processor::halt();
|
2018-10-16 12:01:38 +03:00
|
|
|
}
|
|
|
|
|
2020-03-10 13:14:45 +03:00
|
|
|
return kmalloc_allocate(first_chunk.value(), chunks_needed);
|
2018-10-16 12:01:38 +03:00
|
|
|
}
|
|
|
|
|
2020-06-06 07:01:30 +03:00
|
|
|
static inline void kfree_impl(void* ptr)
|
2018-10-16 12:01:38 +03:00
|
|
|
{
|
2019-04-26 00:18:11 +03:00
|
|
|
++g_kfree_call_count;
|
2018-10-24 01:51:19 +03:00
|
|
|
|
2020-02-22 17:09:18 +03:00
|
|
|
auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader)));
|
2020-03-08 12:36:51 +03:00
|
|
|
FlatPtr start = ((FlatPtr)a - (FlatPtr)BASE_PHYSICAL) / CHUNK_SIZE;
|
2018-10-16 12:01:38 +03:00
|
|
|
|
2020-03-10 13:14:45 +03:00
|
|
|
Bitmap bitmap_wrapper = Bitmap::wrap(alloc_map, POOL_SIZE / CHUNK_SIZE);
|
|
|
|
bitmap_wrapper.set_range(start, a->allocation_size_in_chunks, false);
|
2018-10-16 12:01:38 +03:00
|
|
|
|
2020-05-16 11:37:31 +03:00
|
|
|
g_kmalloc_bytes_allocated -= a->allocation_size_in_chunks * CHUNK_SIZE;
|
|
|
|
g_kmalloc_bytes_free += a->allocation_size_in_chunks * CHUNK_SIZE;
|
2018-10-16 12:01:38 +03:00
|
|
|
|
|
|
|
#ifdef SANITIZE_KMALLOC
|
2020-02-22 17:09:18 +03:00
|
|
|
memset(a, KFREE_SCRUB_BYTE, a->allocation_size_in_chunks * CHUNK_SIZE);
|
2018-10-16 12:01:38 +03:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-06-06 07:01:30 +03:00
|
|
|
void kfree(void* ptr)
|
|
|
|
{
|
|
|
|
if (!ptr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ScopedSpinLock lock(s_lock);
|
|
|
|
kfree_impl(ptr);
|
|
|
|
}
|
|
|
|
|
2019-11-27 16:06:24 +03:00
|
|
|
void* krealloc(void* ptr, size_t new_size)
|
|
|
|
{
|
|
|
|
if (!ptr)
|
|
|
|
return kmalloc(new_size);
|
|
|
|
|
2020-06-06 07:01:30 +03:00
|
|
|
ScopedSpinLock lock(s_lock);
|
2019-11-27 16:06:24 +03:00
|
|
|
|
2020-02-22 17:09:18 +03:00
|
|
|
auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader)));
|
|
|
|
size_t old_size = a->allocation_size_in_chunks * CHUNK_SIZE;
|
2019-11-27 16:06:24 +03:00
|
|
|
|
|
|
|
if (old_size == new_size)
|
|
|
|
return ptr;
|
|
|
|
|
|
|
|
auto* new_ptr = kmalloc(new_size);
|
|
|
|
memcpy(new_ptr, ptr, min(old_size, new_size));
|
2020-06-06 07:01:30 +03:00
|
|
|
kfree_impl(ptr);
|
2019-11-27 16:06:24 +03:00
|
|
|
return new_ptr;
|
|
|
|
}
|
|
|
|
|
2018-12-03 01:34:50 +03:00
|
|
|
void* operator new(size_t size)
|
2018-10-16 12:01:38 +03:00
|
|
|
{
|
|
|
|
return kmalloc(size);
|
|
|
|
}
|
|
|
|
|
2018-12-03 01:34:50 +03:00
|
|
|
void* operator new[](size_t size)
|
2018-10-16 12:01:38 +03:00
|
|
|
{
|
|
|
|
return kmalloc(size);
|
|
|
|
}
|