Kernel: Use the Function class for deferred_call_queue()

This avoids allocations for deferred_call_queue().
This commit is contained in:
Gunnar Beutner 2021-05-20 01:30:36 +02:00 committed by Andreas Kling
parent 7557f2db90
commit cac7a8ced9
Notes: sideshowbarker 2024-07-18 17:43:24 +09:00
2 changed files with 25 additions and 55 deletions

View File

@ -2234,6 +2234,7 @@ UNMAP_AFTER_INIT void Processor::deferred_call_pool_init()
for (size_t i = 0; i < pool_count; i++) { for (size_t i = 0; i < pool_count; i++) {
auto& entry = m_deferred_call_pool[i]; auto& entry = m_deferred_call_pool[i];
entry.next = i < pool_count - 1 ? &m_deferred_call_pool[i + 1] : nullptr; entry.next = i < pool_count - 1 ? &m_deferred_call_pool[i + 1] : nullptr;
new (entry.handler_storage) DeferredCallEntry::HandlerFunction;
entry.was_allocated = false; entry.was_allocated = false;
} }
m_pending_deferred_calls = nullptr; m_pending_deferred_calls = nullptr;
@ -2245,6 +2246,8 @@ void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry)
VERIFY(m_in_critical); VERIFY(m_in_critical);
VERIFY(!entry->was_allocated); VERIFY(!entry->was_allocated);
entry->handler_value() = {};
entry->next = m_free_deferred_call_pool_entry; entry->next = m_free_deferred_call_pool_entry;
m_free_deferred_call_pool_entry = entry; m_free_deferred_call_pool_entry = entry;
} }
@ -2262,6 +2265,7 @@ DeferredCallEntry* Processor::deferred_call_get_free()
} }
auto* entry = new DeferredCallEntry; auto* entry = new DeferredCallEntry;
new (entry->handler_storage) DeferredCallEntry::HandlerFunction;
entry->was_allocated = true; entry->was_allocated = true;
return entry; return entry;
} }
@ -2290,20 +2294,14 @@ void Processor::deferred_call_execute_pending()
pending_list = reverse_list(pending_list); pending_list = reverse_list(pending_list);
do { do {
// Call the appropriate callback handler pending_list->invoke_handler();
if (pending_list->have_data) {
pending_list->callback_with_data.handler(pending_list->callback_with_data.data);
if (pending_list->callback_with_data.free)
pending_list->callback_with_data.free(pending_list->callback_with_data.data);
} else {
pending_list->callback.handler();
}
// Return the entry back to the pool, or free it // Return the entry back to the pool, or free it
auto* next = pending_list->next; auto* next = pending_list->next;
if (pending_list->was_allocated) if (pending_list->was_allocated) {
pending_list->handler_value().~Function();
delete pending_list; delete pending_list;
else } else
deferred_call_return_to_pool(pending_list); deferred_call_return_to_pool(pending_list);
pending_list = next; pending_list = next;
} while (pending_list); } while (pending_list);
@ -2316,7 +2314,7 @@ void Processor::deferred_call_queue_entry(DeferredCallEntry* entry)
m_pending_deferred_calls = entry; m_pending_deferred_calls = entry;
} }
void Processor::deferred_call_queue(void (*callback)()) void Processor::deferred_call_queue(Function<void()> callback)
{ {
// NOTE: If we are called outside of a critical section and outside // NOTE: If we are called outside of a critical section and outside
// of an irq handler, the function will be executed before we return! // of an irq handler, the function will be executed before we return!
@ -2324,24 +2322,7 @@ void Processor::deferred_call_queue(void (*callback)())
auto& cur_proc = Processor::current(); auto& cur_proc = Processor::current();
auto* entry = cur_proc.deferred_call_get_free(); auto* entry = cur_proc.deferred_call_get_free();
entry->have_data = false; entry->handler_value() = move(callback);
entry->callback.handler = callback;
cur_proc.deferred_call_queue_entry(entry);
}
void Processor::deferred_call_queue(void (*callback)(void*), void* data, void (*free_data)(void*))
{
// NOTE: If we are called outside of a critical section and outside
// of an irq handler, the function will be executed before we return!
ScopedCritical critical;
auto& cur_proc = Processor::current();
auto* entry = cur_proc.deferred_call_get_free();
entry->have_data = true;
entry->callback_with_data.handler = callback;
entry->callback_with_data.data = data;
entry->callback_with_data.free = free_data;
cur_proc.deferred_call_queue_entry(entry); cur_proc.deferred_call_queue_entry(entry);
} }

View File

@ -9,6 +9,7 @@
#include <AK/Atomic.h> #include <AK/Atomic.h>
#include <AK/Badge.h> #include <AK/Badge.h>
#include <AK/Concepts.h> #include <AK/Concepts.h>
#include <AK/Function.h>
#include <AK/Noncopyable.h> #include <AK/Noncopyable.h>
#include <AK/Vector.h> #include <AK/Vector.h>
@ -608,19 +609,21 @@ struct ProcessorMessageEntry {
}; };
struct DeferredCallEntry { struct DeferredCallEntry {
using HandlerFunction = Function<void()>;
DeferredCallEntry* next; DeferredCallEntry* next;
union { alignas(HandlerFunction) u8 handler_storage[sizeof(HandlerFunction)];
struct {
void (*handler)();
} callback;
struct {
void* data;
void (*handler)(void*);
void (*free)(void*);
} callback_with_data;
};
bool have_data;
bool was_allocated; bool was_allocated;
HandlerFunction& handler_value()
{
return *bit_cast<HandlerFunction*>(&handler_storage);
}
void invoke_handler()
{
handler_value()();
}
}; };
class Processor; class Processor;
@ -975,21 +978,7 @@ public:
static void smp_broadcast_flush_tlb(const PageDirectory*, VirtualAddress, size_t); static void smp_broadcast_flush_tlb(const PageDirectory*, VirtualAddress, size_t);
static u32 smp_wake_n_idle_processors(u32 wake_count); static u32 smp_wake_n_idle_processors(u32 wake_count);
template<typename Callback> static void deferred_call_queue(Function<void()> callback);
static void deferred_call_queue(Callback callback)
{
auto* data = new Callback(move(callback));
deferred_call_queue(
[](void* data) {
(*reinterpret_cast<Callback*>(data))();
},
data,
[](void* data) {
delete reinterpret_cast<Callback*>(data);
});
}
static void deferred_call_queue(void (*callback)());
static void deferred_call_queue(void (*callback)(void*), void* data, void (*free_data)(void*));
ALWAYS_INLINE bool has_feature(CPUFeature f) const ALWAYS_INLINE bool has_feature(CPUFeature f) const
{ {