2020-01-18 11:38:21 +03:00
/*
* Copyright ( c ) 2018 - 2020 , Andreas Kling < kling @ serenityos . org >
* All rights reserved .
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions are met :
*
* 1. Redistributions of source code must retain the above copyright notice , this
* list of conditions and the following disclaimer .
*
* 2. Redistributions in binary form must reproduce the above copyright notice ,
* this list of conditions and the following disclaimer in the documentation
* and / or other materials provided with the distribution .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS "
* AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL
* DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY ,
* OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*/
2018-10-16 12:01:38 +03:00
/*
* Really really * really * Q & D malloc ( ) and free ( ) implementations
* just to get going . Don ' t ever let anyone see this shit . : ^ )
*/
2019-06-07 12:43:58 +03:00
# include <AK/Assertions.h>
2019-04-06 15:29:29 +03:00
# include <AK/Types.h>
2019-06-07 21:02:01 +03:00
# include <Kernel/Arch/i386/CPU.h>
2020-02-09 17:47:15 +03:00
# include <Kernel/Heap/kmalloc.h>
2019-06-07 12:43:58 +03:00
# include <Kernel/KSyms.h>
2019-04-03 15:41:40 +03:00
# include <Kernel/Process.h>
# include <Kernel/Scheduler.h>
2020-02-09 17:47:15 +03:00
# include <LibBareMetal/StdLib.h>
2018-10-16 12:01:38 +03:00
# define SANITIZE_KMALLOC
2019-06-07 12:43:58 +03:00
struct [[gnu::packed]] allocation_t
{
2019-02-22 12:23:06 +03:00
size_t start ;
size_t nchunk ;
2019-02-15 14:30:48 +03:00
} ;
2018-10-16 12:01:38 +03:00
2020-01-17 21:59:20 +03:00
# define BASE_PHYSICAL (0xc0000000 + (4 * MB))
2019-09-16 10:01:44 +03:00
# define CHUNK_SIZE 8
2019-11-04 14:00:29 +03:00
# define POOL_SIZE (3 * MB)
2018-10-16 12:01:38 +03:00
2020-01-17 21:59:20 +03:00
# define ETERNAL_BASE_PHYSICAL (0xc0000000 + (2 * MB))
2019-06-09 12:48:58 +03:00
# define ETERNAL_RANGE_SIZE (2 * MB)
2018-10-16 12:01:38 +03:00
2019-07-03 22:17:35 +03:00
static u8 alloc_map [ POOL_SIZE / CHUNK_SIZE / 8 ] ;
2018-10-16 12:01:38 +03:00
2018-12-03 01:34:50 +03:00
volatile size_t sum_alloc = 0 ;
volatile size_t sum_free = POOL_SIZE ;
2018-11-01 01:19:15 +03:00
volatile size_t kmalloc_sum_eternal = 0 ;
2019-07-03 22:17:35 +03:00
u32 g_kmalloc_call_count ;
u32 g_kfree_call_count ;
2019-04-16 00:58:48 +03:00
bool g_dump_kmalloc_stacks ;
2019-04-15 20:43:12 +03:00
2019-07-03 22:17:35 +03:00
static u8 * s_next_eternal_ptr ;
static u8 * s_end_of_eternal_range ;
2018-11-02 22:41:58 +03:00
2018-11-09 03:25:31 +03:00
void kmalloc_init ( )
2018-10-16 12:01:38 +03:00
{
2019-01-16 02:44:09 +03:00
memset ( & alloc_map , 0 , sizeof ( alloc_map ) ) ;
2019-11-23 19:27:09 +03:00
memset ( ( void * ) BASE_PHYSICAL , 0 , POOL_SIZE ) ;
2018-10-16 12:01:38 +03:00
2018-11-01 01:19:15 +03:00
kmalloc_sum_eternal = 0 ;
2018-10-16 12:01:38 +03:00
sum_alloc = 0 ;
sum_free = POOL_SIZE ;
2018-11-01 01:19:15 +03:00
2019-11-23 19:27:09 +03:00
s_next_eternal_ptr = ( u8 * ) ETERNAL_BASE_PHYSICAL ;
2019-01-16 02:44:09 +03:00
s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE ;
2018-11-01 01:19:15 +03:00
}
void * kmalloc_eternal ( size_t size )
{
void * ptr = s_next_eternal_ptr ;
s_next_eternal_ptr + = size ;
2018-11-02 22:41:58 +03:00
ASSERT ( s_next_eternal_ptr < s_end_of_eternal_range ) ;
2018-11-01 01:19:15 +03:00
kmalloc_sum_eternal + = size ;
return ptr ;
2018-10-16 12:01:38 +03:00
}
2018-12-26 23:31:46 +03:00
void * kmalloc_aligned ( size_t size , size_t alignment )
{
void * ptr = kmalloc ( size + alignment + sizeof ( void * ) ) ;
2019-02-22 12:23:06 +03:00
size_t max_addr = ( size_t ) ptr + alignment ;
2018-12-26 23:31:46 +03:00
void * aligned_ptr = ( void * ) ( max_addr - ( max_addr % alignment ) ) ;
( ( void * * ) aligned_ptr ) [ - 1 ] = ptr ;
return aligned_ptr ;
}
void kfree_aligned ( void * ptr )
{
kfree ( ( ( void * * ) ptr ) [ - 1 ] ) ;
}
2018-11-01 11:01:51 +03:00
void * kmalloc_page_aligned ( size_t size )
{
2018-12-26 23:31:46 +03:00
void * ptr = kmalloc_aligned ( size , PAGE_SIZE ) ;
2019-02-22 12:23:06 +03:00
size_t d = ( size_t ) ptr ;
2018-12-26 23:31:46 +03:00
ASSERT ( ( d & PAGE_MASK ) = = d ) ;
2018-11-01 11:01:51 +03:00
return ptr ;
}
2019-02-22 12:23:06 +03:00
void * kmalloc_impl ( size_t size )
2018-10-16 12:01:38 +03:00
{
2020-02-16 03:27:42 +03:00
Kernel : : InterruptDisabler disabler ;
2019-04-15 20:43:12 +03:00
+ + g_kmalloc_call_count ;
2018-10-24 01:51:19 +03:00
2020-02-16 03:27:42 +03:00
if ( g_dump_kmalloc_stacks & & Kernel : : ksyms_ready ) {
2019-04-16 00:58:48 +03:00
dbgprintf ( " kmalloc(%u) \n " , size ) ;
2020-02-16 03:27:42 +03:00
Kernel : : dump_backtrace ( ) ;
2019-04-16 00:58:48 +03:00
}
2019-04-03 15:41:40 +03:00
// We need space for the allocation_t structure at the head of the block.
size_t real_size = size + sizeof ( allocation_t ) ;
2018-10-16 12:01:38 +03:00
if ( sum_free < real_size ) {
2020-02-16 03:27:42 +03:00
Kernel : : dump_backtrace ( ) ;
2020-02-17 17:04:27 +03:00
kprintf ( " %s(%u) kmalloc(): PANIC! Out of memory (sucks, dude) \n sum_free=%u, real_size=%u \n " , Kernel : : Process : : current - > name ( ) . characters ( ) , Kernel : : Process : : current - > pid ( ) , sum_free , real_size ) ;
2020-02-16 03:27:42 +03:00
Kernel : : hang ( ) ;
2018-10-16 12:01:38 +03:00
}
2019-04-03 15:41:40 +03:00
size_t chunks_needed = real_size / CHUNK_SIZE ;
if ( real_size % CHUNK_SIZE )
+ + chunks_needed ;
2018-10-16 12:01:38 +03:00
2019-04-03 15:41:40 +03:00
size_t chunks_here = 0 ;
size_t first_chunk = 0 ;
2018-10-16 12:01:38 +03:00
2019-04-03 15:41:40 +03:00
for ( size_t i = 0 ; i < ( POOL_SIZE / CHUNK_SIZE / 8 ) ; + + i ) {
2018-11-12 17:25:57 +03:00
if ( alloc_map [ i ] = = 0xff ) {
// Skip over completely full bucket.
chunks_here = 0 ;
continue ;
}
// FIXME: This scan can be optimized further with LZCNT.
2019-04-03 15:41:40 +03:00
for ( size_t j = 0 ; j < 8 ; + + j ) {
2019-06-07 12:43:58 +03:00
if ( ! ( alloc_map [ i ] & ( 1 < < j ) ) ) {
2019-04-03 15:41:40 +03:00
if ( chunks_here = = 0 ) {
// Mark where potential allocation starts.
2018-10-16 12:01:38 +03:00
first_chunk = i * 8 + j ;
}
2019-04-03 15:41:40 +03:00
+ + chunks_here ;
2018-10-16 12:01:38 +03:00
2019-04-03 15:41:40 +03:00
if ( chunks_here = = chunks_needed ) {
2019-11-23 19:27:09 +03:00
auto * a = ( allocation_t * ) ( BASE_PHYSICAL + ( first_chunk * CHUNK_SIZE ) ) ;
2019-07-03 22:17:35 +03:00
u8 * ptr = ( u8 * ) a ;
2018-10-16 12:01:38 +03:00
ptr + = sizeof ( allocation_t ) ;
a - > nchunk = chunks_needed ;
a - > start = first_chunk ;
2019-04-03 15:41:40 +03:00
for ( size_t k = first_chunk ; k < ( first_chunk + chunks_needed ) ; + + k ) {
2018-10-16 12:01:38 +03:00
alloc_map [ k / 8 ] | = 1 < < ( k % 8 ) ;
}
sum_alloc + = a - > nchunk * CHUNK_SIZE ;
2019-04-03 15:41:40 +03:00
sum_free - = a - > nchunk * CHUNK_SIZE ;
2018-10-16 12:01:38 +03:00
# ifdef SANITIZE_KMALLOC
2020-02-01 12:26:05 +03:00
memset ( ptr , KMALLOC_SCRUB_BYTE , ( a - > nchunk * CHUNK_SIZE ) - sizeof ( allocation_t ) ) ;
2018-10-16 12:01:38 +03:00
# endif
return ptr ;
}
2019-04-03 15:41:40 +03:00
} else {
// This is in use, so restart chunks_here counter.
2018-10-16 12:01:38 +03:00
chunks_here = 0 ;
}
}
}
2020-02-17 17:04:27 +03:00
kprintf ( " %s(%u) kmalloc(): PANIC! Out of memory (no suitable block for size %u) \n " , Kernel : : Process : : current - > name ( ) . characters ( ) , Kernel : : Process : : current - > pid ( ) , size ) ;
2020-02-16 03:27:42 +03:00
Kernel : : dump_backtrace ( ) ;
Kernel : : hang ( ) ;
2018-10-16 12:01:38 +03:00
}
2019-06-07 12:43:58 +03:00
void kfree ( void * ptr )
2018-10-16 12:01:38 +03:00
{
2019-04-03 15:41:40 +03:00
if ( ! ptr )
2018-10-16 12:01:38 +03:00
return ;
2020-02-16 03:27:42 +03:00
Kernel : : InterruptDisabler disabler ;
2019-04-26 00:18:11 +03:00
+ + g_kfree_call_count ;
2018-10-24 01:51:19 +03:00
2019-07-03 22:17:35 +03:00
auto * a = ( allocation_t * ) ( ( ( ( u8 * ) ptr ) - sizeof ( allocation_t ) ) ) ;
2018-10-16 12:01:38 +03:00
2019-02-22 12:23:06 +03:00
for ( size_t k = a - > start ; k < ( a - > start + a - > nchunk ) ; + + k )
2018-10-16 12:01:38 +03:00
alloc_map [ k / 8 ] & = ~ ( 1 < < ( k % 8 ) ) ;
sum_alloc - = a - > nchunk * CHUNK_SIZE ;
2019-04-03 15:41:40 +03:00
sum_free + = a - > nchunk * CHUNK_SIZE ;
2018-10-16 12:01:38 +03:00
# ifdef SANITIZE_KMALLOC
2020-02-01 12:26:05 +03:00
memset ( a , KFREE_SCRUB_BYTE , a - > nchunk * CHUNK_SIZE ) ;
2018-10-16 12:01:38 +03:00
# endif
}
2019-11-27 16:06:24 +03:00
void * krealloc ( void * ptr , size_t new_size )
{
if ( ! ptr )
return kmalloc ( new_size ) ;
2020-02-16 03:27:42 +03:00
Kernel : : InterruptDisabler disabler ;
2019-11-27 16:06:24 +03:00
auto * a = ( allocation_t * ) ( ( ( ( u8 * ) ptr ) - sizeof ( allocation_t ) ) ) ;
size_t old_size = a - > nchunk * CHUNK_SIZE ;
if ( old_size = = new_size )
return ptr ;
auto * new_ptr = kmalloc ( new_size ) ;
memcpy ( new_ptr , ptr , min ( old_size , new_size ) ) ;
kfree ( ptr ) ;
return new_ptr ;
}
2018-12-03 01:34:50 +03:00
void * operator new ( size_t size )
2018-10-16 12:01:38 +03:00
{
return kmalloc ( size ) ;
}
2018-12-03 01:34:50 +03:00
void * operator new [ ] ( size_t size )
2018-10-16 12:01:38 +03:00
{
return kmalloc ( size ) ;
}