From 01c75e3a34c463230583ac7979164159ad8ed306 Mon Sep 17 00:00:00 2001 From: Gunnar Beutner Date: Sun, 30 May 2021 16:24:53 +0200 Subject: [PATCH] Kernel: Don't log profile data before/after the process/thread lifetime There were a few cases where we could end up logging profiling events before or after the associated process or thread exists in the profile: After enabling profiling we might end up with CPU samples before we had a chance to synthesize process/thread creation events. After a thread exits we would still log associated kmalloc/kfree events. Instead we now just ignore those events. --- Kernel/Heap/kmalloc.cpp | 20 ++++++++++---------- Kernel/PerformanceManager.h | 22 ++++++++++++++++++---- Kernel/Scheduler.cpp | 5 ----- Kernel/Scheduler.h | 1 - Kernel/Syscalls/exit.cpp | 1 + Kernel/Syscalls/profiling.cpp | 6 ++++-- Kernel/Syscalls/thread.cpp | 1 + Kernel/Thread.h | 5 +++++ 8 files changed, 39 insertions(+), 22 deletions(-) diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp index 5c245fc7a7f..4562e829362 100644 --- a/Kernel/Heap/kmalloc.cpp +++ b/Kernel/Heap/kmalloc.cpp @@ -257,11 +257,11 @@ void* kmalloc(size_t size) PANIC("kmalloc: Out of memory (requested size: {})", size); } - Process* current_process = Process::current(); - if (!current_process && Scheduler::colonel_initialized()) - current_process = Scheduler::colonel(); - if (current_process) - PerformanceManager::add_kmalloc_perf_event(*current_process, size, (FlatPtr)ptr); + Thread* current_thread = Thread::current(); + if (!current_thread) + current_thread = Processor::idle_thread(); + if (current_thread) + PerformanceManager::add_kmalloc_perf_event(*current_thread, size, (FlatPtr)ptr); return ptr; } @@ -277,11 +277,11 @@ void kfree(void* ptr) ++g_nested_kfree_calls; if (g_nested_kfree_calls == 1) { - Process* current_process = Process::current(); - if (!current_process && Scheduler::colonel_initialized()) - current_process = Scheduler::colonel(); - if (current_process) - PerformanceManager::add_kfree_perf_event(*current_process, 0, (FlatPtr)ptr); + Thread* current_thread = Thread::current(); + if (!current_thread) + current_thread = Processor::idle_thread(); + if (current_thread) + PerformanceManager::add_kfree_perf_event(*current_thread, 0, (FlatPtr)ptr); } g_kmalloc_global->m_heap.deallocate(ptr); diff --git a/Kernel/PerformanceManager.h b/Kernel/PerformanceManager.h index 6ef78ab7fe3..2f480c05900 100644 --- a/Kernel/PerformanceManager.h +++ b/Kernel/PerformanceManager.h @@ -40,6 +40,8 @@ public: inline static void add_thread_created_event(Thread& thread) { + if (thread.is_profiling_suppressed()) + return; if (auto* event_buffer = thread.process().current_perf_events_buffer()) { [[maybe_unused]] auto rc = event_buffer->append(PERF_EVENT_THREAD_CREATE, thread.tid().value(), 0, nullptr, &thread); } @@ -47,6 +49,8 @@ public: inline static void add_thread_exit_event(Thread& thread) { + // As an exception this doesn't check whether profiling is suppressed for + // the thread so we can record the thread_exit event anyway. if (auto* event_buffer = thread.process().current_perf_events_buffer()) { [[maybe_unused]] auto rc = event_buffer->append(PERF_EVENT_THREAD_EXIT, thread.tid().value(), 0, nullptr, &thread); } @@ -54,6 +58,8 @@ public: inline static void add_cpu_sample_event(Thread& current_thread, const RegisterState& regs, u32 lost_time) { + if (current_thread.is_profiling_suppressed()) + return; if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) { [[maybe_unused]] auto rc = event_buffer->append_with_eip_and_ebp( current_thread.pid(), current_thread.tid(), @@ -77,27 +83,35 @@ public: inline static void add_context_switch_perf_event(Thread& current_thread, Thread& next_thread) { + if (current_thread.is_profiling_suppressed()) + return; if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) { [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_CONTEXT_SWITCH, next_thread.pid().value(), next_thread.tid().value(), nullptr); } } - inline static void add_kmalloc_perf_event(Process& current_process, size_t size, FlatPtr ptr) + inline static void add_kmalloc_perf_event(Thread& current_thread, size_t size, FlatPtr ptr) { - if (auto* event_buffer = current_process.current_perf_events_buffer()) { + if (current_thread.is_profiling_suppressed()) + return; + if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) { [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_KMALLOC, size, ptr, nullptr); } } - inline static void add_kfree_perf_event(Process& current_process, size_t size, FlatPtr ptr) + inline static void add_kfree_perf_event(Thread& current_thread, size_t size, FlatPtr ptr) { - if (auto* event_buffer = current_process.current_perf_events_buffer()) { + if (current_thread.is_profiling_suppressed()) + return; + if (auto* event_buffer = current_thread.process().current_perf_events_buffer()) { [[maybe_unused]] auto res = event_buffer->append(PERF_EVENT_KFREE, size, ptr, nullptr); } } inline static void add_page_fault_event(Thread& thread, const RegisterState& regs) { + if (thread.is_profiling_suppressed()) + return; if (auto* event_buffer = thread.process().current_perf_events_buffer()) { [[maybe_unused]] auto rc = event_buffer->append_with_eip_and_ebp( thread.pid(), thread.tid(), diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index 7788ece9fcc..04f9d0a18eb 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -445,11 +445,6 @@ void Scheduler::prepare_for_idle_loop() scheduler_data.m_in_scheduler = true; } -bool Scheduler::colonel_initialized() -{ - return !!s_colonel_process; -} - Process* Scheduler::colonel() { VERIFY(s_colonel_process); diff --git a/Kernel/Scheduler.h b/Kernel/Scheduler.h index 766c27c91ad..ffd2cf0456c 100644 --- a/Kernel/Scheduler.h +++ b/Kernel/Scheduler.h @@ -43,7 +43,6 @@ public: static void leave_on_first_switch(u32 flags); static void prepare_after_exec(); static void prepare_for_idle_loop(); - static bool colonel_initialized(); static Process* colonel(); static void idle_loop(void*); static void invoke_async(); diff --git a/Kernel/Syscalls/exit.cpp b/Kernel/Syscalls/exit.cpp index fa2faf357ba..3dcaea120f9 100644 --- a/Kernel/Syscalls/exit.cpp +++ b/Kernel/Syscalls/exit.cpp @@ -19,6 +19,7 @@ void Process::sys$exit(int status) } auto* current_thread = Thread::current(); + current_thread->set_profiling_suppressed(); PerformanceManager::add_thread_exit_event(*current_thread); die(); diff --git a/Kernel/Syscalls/profiling.cpp b/Kernel/Syscalls/profiling.cpp index 81b8e4690d3..0b29e4ae51d 100644 --- a/Kernel/Syscalls/profiling.cpp +++ b/Kernel/Syscalls/profiling.cpp @@ -25,7 +25,7 @@ KResultOr Process::sys$profiling_enable(pid_t pid, u64 event_mask) if (!is_superuser()) return EPERM; ScopedCritical critical; - g_profiling_event_mask = event_mask; + g_profiling_event_mask = PERF_EVENT_PROCESS_CREATE | PERF_EVENT_THREAD_CREATE | PERF_EVENT_MMAP; if (g_global_perf_events) g_global_perf_events->clear(); else @@ -40,6 +40,7 @@ KResultOr Process::sys$profiling_enable(pid_t pid, u64 event_mask) PerformanceManager::add_process_created_event(process); return IterationDecision::Continue; }); + g_profiling_event_mask = event_mask; return 0; } @@ -51,12 +52,13 @@ KResultOr Process::sys$profiling_enable(pid_t pid, u64 event_mask) return ESRCH; if (!is_superuser() && process->uid() != euid()) return EPERM; - g_profiling_event_mask = event_mask; + g_profiling_event_mask = PERF_EVENT_PROCESS_CREATE | PERF_EVENT_THREAD_CREATE | PERF_EVENT_MMAP; process->set_profiling(true); if (!process->create_perf_events_buffer_if_needed()) { process->set_profiling(false); return ENOMEM; } + g_profiling_event_mask = event_mask; if (!TimeManagement::the().enable_profile_timer()) { process->set_profiling(false); return ENOTSUP; diff --git a/Kernel/Syscalls/thread.cpp b/Kernel/Syscalls/thread.cpp index 3dc4852ac4f..a57fc30e05b 100644 --- a/Kernel/Syscalls/thread.cpp +++ b/Kernel/Syscalls/thread.cpp @@ -88,6 +88,7 @@ void Process::sys$exit_thread(Userspace exit_value, Userspace stac } auto current_thread = Thread::current(); + current_thread->set_profiling_suppressed(); PerformanceManager::add_thread_exit_event(*current_thread); if (stack_location) { diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 028237538e3..4f6e2bc7587 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -1131,6 +1131,9 @@ public: return m_nested_profiler_calls.fetch_sub(1, AK::MemoryOrder::memory_order_acquire); } + bool is_profiling_suppressed() const { return m_is_profiling_suppressed; } + void set_profiling_suppressed() { m_is_profiling_suppressed = true; } + private: Thread(NonnullRefPtr, NonnullOwnPtr, NonnullRefPtr); @@ -1273,6 +1276,8 @@ private: RefPtr m_block_timer; + bool m_is_profiling_suppressed { false }; + void yield_without_holding_big_lock(); void donate_without_holding_big_lock(RefPtr&, const char*); void yield_while_not_holding_big_lock();