Kernel: Allow preventing kmalloc and kfree

For "destructive" disallowance of allocations throughout the system,
Thread gains a member that controls whether allocations are currently
allowed or not. kmalloc checks this member on both allocations and
deallocations (with the exception of early boot) and panics the kernel
if allocations are disabled. This will allow for critical sections that
can't be allowed to allocate to fail-fast, making for easier debugging.

PS: My first proper Kernel commit :^)
This commit is contained in:
kleines Filmröllchen 2022-01-08 23:36:13 +01:00 committed by Linus Groh
parent b8d640c3f9
commit e2c9578390
Notes: sideshowbarker 2024-07-17 21:14:34 +09:00
2 changed files with 12 additions and 2 deletions

View File

@ -362,8 +362,12 @@ void* kmalloc(size_t size)
Thread* current_thread = Thread::current();
if (!current_thread)
current_thread = Processor::idle_thread();
if (current_thread)
if (current_thread) {
// FIXME: By the time we check this, we have already allocated above.
// This means that in the case of an infinite recursion, we can't catch it this way.
VERIFY(current_thread->is_allocation_enabled());
PerformanceManager::add_kmalloc_perf_event(*current_thread, size, (FlatPtr)ptr);
}
return ptr;
}
@ -384,8 +388,10 @@ void kfree_sized(void* ptr, size_t size)
Thread* current_thread = Thread::current();
if (!current_thread)
current_thread = Processor::idle_thread();
if (current_thread)
if (current_thread) {
VERIFY(current_thread->is_allocation_enabled());
PerformanceManager::add_kfree_perf_event(*current_thread, 0, (FlatPtr)ptr);
}
}
g_kmalloc_global->deallocate(ptr, size);

View File

@ -1244,6 +1244,9 @@ public:
bool is_promise_violation_pending() const { return m_is_promise_violation_pending; }
void set_promise_violation_pending(bool value) { m_is_promise_violation_pending = value; }
bool is_allocation_enabled() const { return m_allocation_enabled; }
void set_allocation_enabled(bool value) { m_allocation_enabled = value; }
String backtrace();
private:
@ -1348,6 +1351,7 @@ private:
u32 m_lock_requested_count { 0 };
IntrusiveListNode<Thread> m_blocked_threads_list_node;
LockRank m_lock_rank_mask { LockRank::None };
bool m_allocation_enabled { true };
#if LOCK_DEBUG
struct HoldingLockInfo {