2018-10-16 12:01:38 +03:00
/*
* Really really * really * Q & D malloc ( ) and free ( ) implementations
* just to get going . Don ' t ever let anyone see this shit . : ^ )
*/
2019-06-07 12:43:58 +03:00
# include <AK/Assertions.h>
2019-04-06 15:29:29 +03:00
# include <AK/Types.h>
2019-06-07 12:43:58 +03:00
# include <Kernel/KSyms.h>
2019-04-03 15:41:40 +03:00
# include <Kernel/Process.h>
# include <Kernel/Scheduler.h>
2019-06-07 12:43:58 +03:00
# include <Kernel/StdLib.h>
# include <Kernel/i386.h>
# include <Kernel/kmalloc.h>
2018-10-16 12:01:38 +03:00
# define SANITIZE_KMALLOC
2019-06-07 12:43:58 +03:00
struct [[gnu::packed]] allocation_t
{
2019-02-22 12:23:06 +03:00
size_t start ;
size_t nchunk ;
2019-02-15 14:30:48 +03:00
} ;
2018-10-16 12:01:38 +03:00
2019-04-03 15:41:40 +03:00
# define CHUNK_SIZE 32
# define POOL_SIZE (1024 * 1024)
2018-10-16 12:01:38 +03:00
2019-01-16 02:44:09 +03:00
# define ETERNAL_BASE_PHYSICAL 0x100000
# define ETERNAL_RANGE_SIZE 0x100000
2018-10-16 12:01:38 +03:00
2019-01-16 02:44:09 +03:00
# define BASE_PHYSICAL 0x200000
2018-11-02 22:41:58 +03:00
# define RANGE_SIZE 0x100000
2018-11-09 03:25:31 +03:00
static byte alloc_map [ POOL_SIZE / CHUNK_SIZE / 8 ] ;
2018-10-16 12:01:38 +03:00
2018-12-03 01:34:50 +03:00
volatile size_t sum_alloc = 0 ;
volatile size_t sum_free = POOL_SIZE ;
2018-11-01 01:19:15 +03:00
volatile size_t kmalloc_sum_eternal = 0 ;
2019-04-15 20:43:12 +03:00
dword g_kmalloc_call_count ;
dword g_kfree_call_count ;
2019-04-16 00:58:48 +03:00
bool g_dump_kmalloc_stacks ;
2019-04-15 20:43:12 +03:00
2018-11-01 01:19:15 +03:00
static byte * s_next_eternal_ptr ;
2018-11-02 22:41:58 +03:00
static byte * s_end_of_eternal_range ;
2019-01-27 12:17:56 +03:00
bool is_kmalloc_address ( const void * ptr )
2018-10-27 01:14:24 +03:00
{
2018-11-01 01:19:15 +03:00
if ( ptr > = ( byte * ) ETERNAL_BASE_PHYSICAL & & ptr < s_next_eternal_ptr )
return true ;
2019-02-22 12:23:06 +03:00
return ( size_t ) ptr > = BASE_PHYSICAL & & ( size_t ) ptr < = ( BASE_PHYSICAL + POOL_SIZE ) ;
2018-10-27 01:14:24 +03:00
}
2018-11-09 03:25:31 +03:00
void kmalloc_init ( )
2018-10-16 12:01:38 +03:00
{
2019-01-16 02:44:09 +03:00
memset ( & alloc_map , 0 , sizeof ( alloc_map ) ) ;
2019-06-07 12:43:58 +03:00
memset ( ( void * ) BASE_PHYSICAL , 0 , POOL_SIZE ) ;
2018-10-16 12:01:38 +03:00
2018-11-01 01:19:15 +03:00
kmalloc_sum_eternal = 0 ;
2018-10-16 12:01:38 +03:00
sum_alloc = 0 ;
sum_free = POOL_SIZE ;
2018-11-01 01:19:15 +03:00
s_next_eternal_ptr = ( byte * ) ETERNAL_BASE_PHYSICAL ;
2019-01-16 02:44:09 +03:00
s_end_of_eternal_range = s_next_eternal_ptr + ETERNAL_RANGE_SIZE ;
2018-11-01 01:19:15 +03:00
}
void * kmalloc_eternal ( size_t size )
{
void * ptr = s_next_eternal_ptr ;
s_next_eternal_ptr + = size ;
2018-11-02 22:41:58 +03:00
ASSERT ( s_next_eternal_ptr < s_end_of_eternal_range ) ;
2018-11-01 01:19:15 +03:00
kmalloc_sum_eternal + = size ;
return ptr ;
2018-10-16 12:01:38 +03:00
}
2018-12-26 23:31:46 +03:00
void * kmalloc_aligned ( size_t size , size_t alignment )
{
void * ptr = kmalloc ( size + alignment + sizeof ( void * ) ) ;
2019-02-22 12:23:06 +03:00
size_t max_addr = ( size_t ) ptr + alignment ;
2018-12-26 23:31:46 +03:00
void * aligned_ptr = ( void * ) ( max_addr - ( max_addr % alignment ) ) ;
( ( void * * ) aligned_ptr ) [ - 1 ] = ptr ;
return aligned_ptr ;
}
void kfree_aligned ( void * ptr )
{
kfree ( ( ( void * * ) ptr ) [ - 1 ] ) ;
}
2018-11-01 11:01:51 +03:00
void * kmalloc_page_aligned ( size_t size )
{
2018-12-26 23:31:46 +03:00
void * ptr = kmalloc_aligned ( size , PAGE_SIZE ) ;
2019-02-22 12:23:06 +03:00
size_t d = ( size_t ) ptr ;
2018-12-26 23:31:46 +03:00
ASSERT ( ( d & PAGE_MASK ) = = d ) ;
2018-11-01 11:01:51 +03:00
return ptr ;
}
2019-02-22 12:23:06 +03:00
void * kmalloc_impl ( size_t size )
2018-10-16 12:01:38 +03:00
{
2018-10-24 12:07:53 +03:00
InterruptDisabler disabler ;
2019-04-15 20:43:12 +03:00
+ + g_kmalloc_call_count ;
2018-10-24 01:51:19 +03:00
2019-04-16 00:58:48 +03:00
if ( g_dump_kmalloc_stacks & & ksyms_ready ) {
dbgprintf ( " kmalloc(%u) \n " , size ) ;
2019-05-16 14:41:16 +03:00
dump_backtrace ( ) ;
2019-04-16 00:58:48 +03:00
}
2019-04-03 15:41:40 +03:00
// We need space for the allocation_t structure at the head of the block.
size_t real_size = size + sizeof ( allocation_t ) ;
2018-10-16 12:01:38 +03:00
if ( sum_free < real_size ) {
2019-05-16 14:41:16 +03:00
dump_backtrace ( ) ;
2019-04-03 15:41:40 +03:00
kprintf ( " %s(%u) kmalloc(): PANIC! Out of memory (sucks, dude) \n sum_free=%u, real_size=%u \n " , current - > process ( ) . name ( ) . characters ( ) , current - > pid ( ) , sum_free , real_size ) ;
2019-02-15 14:30:48 +03:00
hang ( ) ;
2018-10-16 12:01:38 +03:00
}
2019-04-03 15:41:40 +03:00
size_t chunks_needed = real_size / CHUNK_SIZE ;
if ( real_size % CHUNK_SIZE )
+ + chunks_needed ;
2018-10-16 12:01:38 +03:00
2019-04-03 15:41:40 +03:00
size_t chunks_here = 0 ;
size_t first_chunk = 0 ;
2018-10-16 12:01:38 +03:00
2019-04-03 15:41:40 +03:00
for ( size_t i = 0 ; i < ( POOL_SIZE / CHUNK_SIZE / 8 ) ; + + i ) {
2018-11-12 17:25:57 +03:00
if ( alloc_map [ i ] = = 0xff ) {
// Skip over completely full bucket.
chunks_here = 0 ;
continue ;
}
// FIXME: This scan can be optimized further with LZCNT.
2019-04-03 15:41:40 +03:00
for ( size_t j = 0 ; j < 8 ; + + j ) {
2019-06-07 12:43:58 +03:00
if ( ! ( alloc_map [ i ] & ( 1 < < j ) ) ) {
2019-04-03 15:41:40 +03:00
if ( chunks_here = = 0 ) {
// Mark where potential allocation starts.
2018-10-16 12:01:38 +03:00
first_chunk = i * 8 + j ;
}
2019-04-03 15:41:40 +03:00
+ + chunks_here ;
2018-10-16 12:01:38 +03:00
2019-04-03 15:41:40 +03:00
if ( chunks_here = = chunks_needed ) {
2019-06-07 12:43:58 +03:00
auto * a = ( allocation_t * ) ( BASE_PHYSICAL + ( first_chunk * CHUNK_SIZE ) ) ;
byte * ptr = ( byte * ) a ;
2018-10-16 12:01:38 +03:00
ptr + = sizeof ( allocation_t ) ;
a - > nchunk = chunks_needed ;
a - > start = first_chunk ;
2019-04-03 15:41:40 +03:00
for ( size_t k = first_chunk ; k < ( first_chunk + chunks_needed ) ; + + k ) {
2018-10-16 12:01:38 +03:00
alloc_map [ k / 8 ] | = 1 < < ( k % 8 ) ;
}
sum_alloc + = a - > nchunk * CHUNK_SIZE ;
2019-04-03 15:41:40 +03:00
sum_free - = a - > nchunk * CHUNK_SIZE ;
2018-10-16 12:01:38 +03:00
# ifdef SANITIZE_KMALLOC
memset ( ptr , 0xbb , ( a - > nchunk * CHUNK_SIZE ) - sizeof ( allocation_t ) ) ;
# endif
return ptr ;
}
2019-04-03 15:41:40 +03:00
} else {
// This is in use, so restart chunks_here counter.
2018-10-16 12:01:38 +03:00
chunks_here = 0 ;
}
}
}
2019-04-03 15:41:40 +03:00
kprintf ( " %s(%u) kmalloc(): PANIC! Out of memory (no suitable block for size %u) \n " , current - > process ( ) . name ( ) . characters ( ) , current - > pid ( ) , size ) ;
2019-05-16 14:41:16 +03:00
dump_backtrace ( ) ;
2019-02-15 14:30:48 +03:00
hang ( ) ;
2018-10-16 12:01:38 +03:00
}
2019-06-07 12:43:58 +03:00
void kfree ( void * ptr )
2018-10-16 12:01:38 +03:00
{
2019-04-03 15:41:40 +03:00
if ( ! ptr )
2018-10-16 12:01:38 +03:00
return ;
2018-10-24 12:07:53 +03:00
InterruptDisabler disabler ;
2019-04-26 00:18:11 +03:00
+ + g_kfree_call_count ;
2018-10-24 01:51:19 +03:00
2019-04-03 15:41:40 +03:00
auto * a = ( allocation_t * ) ( ( ( ( byte * ) ptr ) - sizeof ( allocation_t ) ) ) ;
2018-10-16 12:01:38 +03:00
2019-02-22 12:23:06 +03:00
for ( size_t k = a - > start ; k < ( a - > start + a - > nchunk ) ; + + k )
2018-10-16 12:01:38 +03:00
alloc_map [ k / 8 ] & = ~ ( 1 < < ( k % 8 ) ) ;
sum_alloc - = a - > nchunk * CHUNK_SIZE ;
2019-04-03 15:41:40 +03:00
sum_free + = a - > nchunk * CHUNK_SIZE ;
2018-10-16 12:01:38 +03:00
# ifdef SANITIZE_KMALLOC
memset ( a , 0xaa , a - > nchunk * CHUNK_SIZE ) ;
# endif
}
2018-12-03 01:34:50 +03:00
void * operator new ( size_t size )
2018-10-16 12:01:38 +03:00
{
return kmalloc ( size ) ;
}
2018-12-03 01:34:50 +03:00
void * operator new [ ] ( size_t size )
2018-10-16 12:01:38 +03:00
{
return kmalloc ( size ) ;
}
void operator delete ( void * ptr )
{
return kfree ( ptr ) ;
}
void operator delete [ ] ( void * ptr )
{
return kfree ( ptr ) ;
}
2019-02-22 12:23:06 +03:00
void operator delete ( void * ptr , size_t )
2018-10-16 12:01:38 +03:00
{
return kfree ( ptr ) ;
}
2019-02-22 12:23:06 +03:00
void operator delete [ ] ( void * ptr , size_t )
2018-10-16 12:01:38 +03:00
{
return kfree ( ptr ) ;
}