diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index 64d929e33b0..31cd4eb898b 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -167,7 +167,7 @@ bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity) }); } -void Scheduler::queue_runnable_thread(Thread& thread) +void Scheduler::enqueue_runnable_thread(Thread& thread) { VERIFY(g_scheduler_lock.own_lock()); if (thread.is_idle_thread()) diff --git a/Kernel/Scheduler.h b/Kernel/Scheduler.h index ed35ad94490..e7ec2da65f0 100644 --- a/Kernel/Scheduler.h +++ b/Kernel/Scheduler.h @@ -50,7 +50,7 @@ public: static Thread& pull_next_runnable_thread(); static Thread* peek_next_runnable_thread(); static bool dequeue_runnable_thread(Thread&, bool = false); - static void queue_runnable_thread(Thread&); + static void enqueue_runnable_thread(Thread&); static void dump_scheduler_state(bool = false); static bool is_initialized(); static TotalTimeScheduled get_total_time_scheduled(); diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index efc76f2d6a6..8dd4a499515 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -1120,7 +1120,7 @@ void Thread::set_state(State new_state, u8 stop_signal) } if (m_state == Runnable) { - Scheduler::queue_runnable_thread(*this); + Scheduler::enqueue_runnable_thread(*this); Processor::smp_wake_n_idle_processors(1); } else if (m_state == Stopped) { // We don't want to restore to Running state, only Runnable!