ladybird/Kernel/Scheduler.cpp

540 lines
20 KiB
C++
Raw Normal View History

/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <AK/QuickSort.h>
#include <AK/ScopeGuard.h>
#include <AK/TemporaryChange.h>
#include <AK/Time.h>
#include <Kernel/Process.h>
#include <Kernel/Profiling.h>
#include <Kernel/RTC.h>
#include <Kernel/Scheduler.h>
Kernel: Introduce the new Time management subsystem This new subsystem includes better abstractions of how time will be handled in the OS. We take advantage of the existing RTC timer to aid in keeping time synchronized. This is standing in contrast to how we handled time-keeping in the kernel, where the PIT was responsible for that function in addition to update the scheduler about ticks. With that new advantage, we can easily change the ticking dynamically and still keep the time synchronized. In the process context, we no longer use a fixed declaration of TICKS_PER_SECOND, but we call the TimeManagement singleton class to provide us the right value. This allows us to use dynamic ticking in the future, a feature known as tickless kernel. The scheduler no longer does by himself the calculation of real time (Unix time), and just calls the TimeManagment singleton class to provide the value. Also, we can use 2 new boot arguments: - the "time" boot argument accpets either the value "modern", or "legacy". If "modern" is specified, the time management subsystem will try to setup HPET. Otherwise, for "legacy" value, the time subsystem will revert to use the PIT & RTC, leaving HPET disabled. If this boot argument is not specified, the default pattern is to try to setup HPET. - the "hpet" boot argumet accepts either the value "periodic" or "nonperiodic". If "periodic" is specified, the HPET will scan for periodic timers, and will assert if none are found. If only one is found, that timer will be assigned for the time-keeping task. If more than one is found, both time-keeping task & scheduler-ticking task will be assigned to periodic timers. If this boot argument is not specified, the default pattern is to try to scan for HPET periodic timers. This boot argument has no effect if HPET is disabled. In hardware context, PIT & RealTimeClock classes are merely inheriting from the HardwareTimer class, and they allow to use the old i8254 (PIT) and RTC devices, managing them via IO ports. By default, the RTC will be programmed to a frequency of 1024Hz. The PIT will be programmed to a frequency close to 1000Hz. About HPET, depending if we need to scan for periodic timers or not, we try to set a frequency close to 1000Hz for the time-keeping timer and scheduler-ticking timer. Also, if possible, we try to enable the Legacy replacement feature of the HPET. This feature if exists, instructs the chipset to disconnect both i8254 (PIT) and RTC. This behavior is observable on QEMU, and was verified against the source code: https://github.com/qemu/qemu/commit/ce967e2f33861b0e17753f97fa4527b5943c94b6 The HPETComparator class is inheriting from HardwareTimer class, and is responsible for an individual HPET comparator, which is essentially a timer. Therefore, it needs to call the singleton HPET class to perform HPET-related operations. The new abstraction of Hardware timers brings an opportunity of more new features in the foreseeable future. For example, we can change the callback function of each hardware timer, thus it makes it possible to swap missions between hardware timers, or to allow to use a hardware timer for other temporary missions (e.g. calibrating the LAPIC timer, measuring the CPU frequency, etc).
2020-03-09 18:03:27 +03:00
#include <Kernel/Time/TimeManagement.h>
#include <Kernel/TimerQueue.h>
//#define LOG_EVERY_CONTEXT_SWITCH
//#define SCHEDULER_DEBUG
//#define SCHEDULER_RUNNABLE_DEBUG
namespace Kernel {
class SchedulerPerProcessorData {
AK_MAKE_NONCOPYABLE(SchedulerPerProcessorData);
AK_MAKE_NONMOVABLE(SchedulerPerProcessorData);
public:
SchedulerPerProcessorData() = default;
WeakPtr<Thread> m_pending_beneficiary;
const char* m_pending_donate_reason { nullptr };
bool m_in_scheduler { true };
};
SchedulerData* g_scheduler_data;
RecursiveSpinLock g_scheduler_lock;
void Scheduler::init_thread(Thread& thread)
{
ASSERT(g_scheduler_data);
g_scheduler_data->m_nonrunnable_threads.append(thread);
}
static u32 time_slice_for(const Thread& thread)
{
// One time slice unit == 1ms
if (&thread == Processor::current().idle_thread())
return 1;
return 10;
}
Thread* g_finalizer;
WaitQueue* g_finalizer_wait_queue;
Atomic<bool> g_finalizer_has_work { false };
static Process* s_colonel_process;
void Scheduler::start()
{
ASSERT_INTERRUPTS_DISABLED();
// We need to acquire our scheduler lock, which will be released
// by the idle thread once control transferred there
g_scheduler_lock.lock();
auto& processor = Processor::current();
processor.set_scheduler_data(*new SchedulerPerProcessorData());
ASSERT(processor.is_initialized());
auto& idle_thread = *processor.idle_thread();
ASSERT(processor.current_thread() == &idle_thread);
ASSERT(processor.idle_thread() == &idle_thread);
idle_thread.set_ticks_left(time_slice_for(idle_thread));
idle_thread.did_schedule();
idle_thread.set_initialized(true);
processor.init_context(idle_thread, false);
idle_thread.set_state(Thread::Running);
ASSERT(idle_thread.affinity() == (1u << processor.id()));
processor.initialize_context_switching(idle_thread);
ASSERT_NOT_REACHED();
}
bool Scheduler::pick_next()
{
ASSERT_INTERRUPTS_DISABLED();
auto current_thread = Thread::current();
// Set the m_in_scheduler flag before acquiring the spinlock. This
// prevents a recursive call into Scheduler::invoke_async upon
// leaving the scheduler lock.
ScopedCritical critical;
auto& scheduler_data = Processor::current().get_scheduler_data();
scheduler_data.m_in_scheduler = true;
ScopeGuard guard(
[]() {
// We may be on a different processor after we got switched
// back to this thread!
auto& scheduler_data = Processor::current().get_scheduler_data();
ASSERT(scheduler_data.m_in_scheduler);
scheduler_data.m_in_scheduler = false;
});
ScopedSpinLock lock(g_scheduler_lock);
if (current_thread->should_die() && current_thread->state() == Thread::Running) {
// Rather than immediately killing threads, yanking the kernel stack
// away from them (which can lead to e.g. reference leaks), we always
// allow Thread::wait_on to return. This allows the kernel stack to
// clean up and eventually we'll get here shortly before transitioning
// back to user mode (from Processor::exit_trap). At this point we
// no longer want to schedule this thread. We can't wait until
// Scheduler::enter_current because we don't want to allow it to
// transition back to user mode.
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << Processor::current().id() << "]: Thread " << *current_thread << " is dying";
#endif
current_thread->set_state(Thread::Dying);
}
#ifdef SCHEDULER_RUNNABLE_DEBUG
dbg() << "Scheduler[" << Processor::current().id() << "]: Non-runnables:";
Scheduler::for_each_nonrunnable([&](Thread& thread) -> IterationDecision {
if (thread.state() == Thread::Dying)
dbg() << " " << String::format("%-12s", thread.state_string()) << " " << thread << " @ " << String::format("%w", thread.tss().cs) << ":" << String::format("%x", thread.tss().eip) << " Finalizable: " << thread.is_finalizable();
else
dbg() << " " << String::format("%-12s", thread.state_string()) << " " << thread << " @ " << String::format("%w", thread.tss().cs) << ":" << String::format("%x", thread.tss().eip);
return IterationDecision::Continue;
});
dbg() << "Scheduler[" << Processor::current().id() << "]: Runnables:";
Scheduler::for_each_runnable([](Thread& thread) -> IterationDecision {
dbg() << " " << String::format("%3u", thread.effective_priority()) << "/" << String::format("%2u", thread.priority()) << " " << String::format("%-12s", thread.state_string()) << " " << thread << " @ " << String::format("%w", thread.tss().cs) << ":" << String::format("%x", thread.tss().eip);
return IterationDecision::Continue;
});
#endif
Thread* thread_to_schedule = nullptr;
auto pending_beneficiary = scheduler_data.m_pending_beneficiary.strong_ref();
Vector<Thread*, 128> sorted_runnables;
for_each_runnable([&](auto& thread) {
if ((thread.affinity() & (1u << Processor::current().id())) == 0)
return IterationDecision::Continue;
if (thread.state() == Thread::Running && &thread != current_thread)
return IterationDecision::Continue;
sorted_runnables.append(&thread);
if (&thread == pending_beneficiary) {
thread_to_schedule = &thread;
return IterationDecision::Break;
}
return IterationDecision::Continue;
});
if (thread_to_schedule) {
// The thread we're supposed to donate to still exists
const char* reason = scheduler_data.m_pending_donate_reason;
scheduler_data.m_pending_beneficiary = nullptr;
scheduler_data.m_pending_donate_reason = nullptr;
// We need to leave our first critical section before switching context,
// but since we're still holding the scheduler lock we're still in a critical section
critical.leave();
#ifdef SCHEDULER_DEBUG
dbg() << "Processing pending donate to " << *thread_to_schedule << " reason=" << reason;
#endif
return donate_to_and_switch(thread_to_schedule, reason);
}
// Either we're not donating or the beneficiary disappeared.
// Either way clear any pending information
scheduler_data.m_pending_beneficiary = nullptr;
scheduler_data.m_pending_donate_reason = nullptr;
quick_sort(sorted_runnables, [](auto& a, auto& b) { return a->effective_priority() >= b->effective_priority(); });
for (auto* thread : sorted_runnables) {
if (thread->process().exec_tid() && thread->process().exec_tid() != thread->tid())
continue;
ASSERT(thread->state() == Thread::Runnable || thread->state() == Thread::Running);
if (!thread_to_schedule) {
thread->m_extra_priority = 0;
thread_to_schedule = thread;
} else {
thread->m_extra_priority++;
}
}
if (!thread_to_schedule)
thread_to_schedule = Processor::current().idle_thread();
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << Processor::current().id() << "]: Switch to " << *thread_to_schedule << " @ " << String::format("%04x:%08x", thread_to_schedule->tss().cs, thread_to_schedule->tss().eip);
#endif
// We need to leave our first critical section before switching context,
// but since we're still holding the scheduler lock we're still in a critical section
critical.leave();
return context_switch(thread_to_schedule);
}
bool Scheduler::yield()
{
InterruptDisabler disabler;
auto& proc = Processor::current();
auto& scheduler_data = proc.get_scheduler_data();
// Clear any pending beneficiary
scheduler_data.m_pending_beneficiary = nullptr;
scheduler_data.m_pending_donate_reason = nullptr;
auto current_thread = Thread::current();
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << proc.id() << "]: yielding thread " << *current_thread << " in_irq: " << proc.in_irq();
#endif
ASSERT(current_thread != nullptr);
if (proc.in_irq() || proc.in_critical()) {
// If we're handling an IRQ we can't switch context, or we're in
// a critical section where we don't want to switch contexts, then
// delay until exiting the trap or critical section
proc.invoke_scheduler_async();
return false;
}
if (!Scheduler::pick_next())
return false;
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << Processor::current().id() << "]: yield returns to thread " << *current_thread << " in_irq: " << Processor::current().in_irq();
#endif
return true;
}
bool Scheduler::donate_to_and_switch(Thread* beneficiary, const char* reason)
{
ASSERT(g_scheduler_lock.own_lock());
auto& proc = Processor::current();
ASSERT(proc.in_critical() == 1);
(void)reason;
unsigned ticks_left = Thread::current()->ticks_left();
if (!beneficiary || beneficiary->state() != Thread::Runnable || ticks_left <= 1)
return Scheduler::yield();
unsigned ticks_to_donate = min(ticks_left - 1, time_slice_for(*beneficiary));
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << proc.id() << "]: Donating " << ticks_to_donate << " ticks to " << *beneficiary << ", reason=" << reason;
#endif
beneficiary->set_ticks_left(ticks_to_donate);
return Scheduler::context_switch(beneficiary);
}
bool Scheduler::donate_to(RefPtr<Thread>& beneficiary, const char* reason)
{
ASSERT(beneficiary);
if (beneficiary == Thread::current())
return Scheduler::yield();
// Set the m_in_scheduler flag before acquiring the spinlock. This
// prevents a recursive call into Scheduler::invoke_async upon
// leaving the scheduler lock.
ScopedCritical critical;
auto& proc = Processor::current();
auto& scheduler_data = proc.get_scheduler_data();
scheduler_data.m_in_scheduler = true;
ScopeGuard guard(
[]() {
// We may be on a different processor after we got switched
// back to this thread!
auto& scheduler_data = Processor::current().get_scheduler_data();
ASSERT(scheduler_data.m_in_scheduler);
scheduler_data.m_in_scheduler = false;
});
ASSERT(!proc.in_irq());
if (proc.in_critical() > 1) {
scheduler_data.m_pending_beneficiary = beneficiary; // Save the beneficiary
scheduler_data.m_pending_donate_reason = reason;
proc.invoke_scheduler_async();
return false;
}
ScopedSpinLock lock(g_scheduler_lock);
// "Leave" the critical section before switching context. Since we
// still hold the scheduler lock, we're not actually leaving it.
// Processor::switch_context expects Processor::in_critical() to be 1
critical.leave();
donate_to_and_switch(beneficiary, reason);
return false;
}
bool Scheduler::context_switch(Thread* thread)
{
thread->set_ticks_left(time_slice_for(*thread));
thread->did_schedule();
auto from_thread = Thread::current();
if (from_thread == thread)
return false;
if (from_thread) {
// If the last process hasn't blocked (still marked as running),
// mark it as runnable for the next round.
if (from_thread->state() == Thread::Running)
from_thread->set_state(Thread::Runnable);
#ifdef LOG_EVERY_CONTEXT_SWITCH
dbg() << "Scheduler[" << Processor::current().id() << "]: " << *from_thread << " -> " << *thread << " [" << thread->priority() << "] " << String::format("%w", thread->tss().cs) << ":" << String::format("%x", thread->tss().eip);
#endif
}
auto& proc = Processor::current();
if (!thread->is_initialized()) {
proc.init_context(*thread, false);
thread->set_initialized(true);
}
thread->set_state(Thread::Running);
// Mark it as active because we are using this thread. This is similar
// to comparing it with Processor::current_thread, but when there are
// multiple processors there's no easy way to check whether the thread
// is actually still needed. This prevents accidental finalization when
// a thread is no longer in Running state, but running on another core.
thread->set_active(true);
proc.switch_context(from_thread, thread);
// NOTE: from_thread at this point reflects the thread we were
// switched from, and thread reflects Thread::current()
enter_current(*from_thread, false);
ASSERT(thread == Thread::current());
return true;
}
void Scheduler::enter_current(Thread& prev_thread, bool is_first)
{
ASSERT(g_scheduler_lock.own_lock());
prev_thread.set_active(false);
if (prev_thread.state() == Thread::Dying) {
// If the thread we switched from is marked as dying, then notify
// the finalizer. Note that as soon as we leave the scheduler lock
// the finalizer may free from_thread!
notify_finalizer();
} else if (!is_first) {
// Check if we have any signals we should deliver (even if we don't
// end up switching to another thread).
auto current_thread = Thread::current();
if (!current_thread->is_in_block()) {
ScopedSpinLock lock(current_thread->get_lock());
if (current_thread->state() == Thread::Running && current_thread->pending_signals_for_state()) {
current_thread->dispatch_one_pending_signal();
}
}
}
}
void Scheduler::leave_on_first_switch(u32 flags)
{
2020-10-03 00:14:37 +03:00
// This is called when a thread is switched into for the first time.
// At this point, enter_current has already be called, but because
// Scheduler::context_switch is not in the call stack we need to
// clean up and release locks manually here
g_scheduler_lock.unlock(flags);
auto& scheduler_data = Processor::current().get_scheduler_data();
ASSERT(scheduler_data.m_in_scheduler);
scheduler_data.m_in_scheduler = false;
}
void Scheduler::prepare_after_exec()
{
// This is called after exec() when doing a context "switch" into
// the new process. This is called from Processor::assume_context
ASSERT(g_scheduler_lock.own_lock());
auto& scheduler_data = Processor::current().get_scheduler_data();
ASSERT(!scheduler_data.m_in_scheduler);
scheduler_data.m_in_scheduler = true;
}
void Scheduler::prepare_for_idle_loop()
{
// This is called when the CPU finished setting up the idle loop
// and is about to run it. We need to acquire he scheduler lock
ASSERT(!g_scheduler_lock.own_lock());
g_scheduler_lock.lock();
auto& scheduler_data = Processor::current().get_scheduler_data();
ASSERT(!scheduler_data.m_in_scheduler);
scheduler_data.m_in_scheduler = true;
}
Process* Scheduler::colonel()
{
ASSERT(s_colonel_process);
return s_colonel_process;
}
void Scheduler::initialize()
{
ASSERT(&Processor::current() != nullptr); // sanity check
RefPtr<Thread> idle_thread;
g_scheduler_data = new SchedulerData;
g_finalizer_wait_queue = new WaitQueue;
g_finalizer_has_work.store(false, AK::MemoryOrder::memory_order_release);
s_colonel_process = &Process::create_kernel_process(idle_thread, "colonel", idle_loop, nullptr, 1).leak_ref();
ASSERT(s_colonel_process);
ASSERT(idle_thread);
idle_thread->set_priority(THREAD_PRIORITY_MIN);
idle_thread->set_name(StringView("idle thread #0"));
set_idle_thread(idle_thread);
}
void Scheduler::set_idle_thread(Thread* idle_thread)
{
Processor::current().set_idle_thread(*idle_thread);
Processor::current().set_current_thread(*idle_thread);
}
Thread* Scheduler::create_ap_idle_thread(u32 cpu)
{
ASSERT(cpu != 0);
// This function is called on the bsp, but creates an idle thread for another AP
ASSERT(Processor::current().id() == 0);
ASSERT(s_colonel_process);
Thread* idle_thread = s_colonel_process->create_kernel_thread(idle_loop, nullptr, THREAD_PRIORITY_MIN, String::format("idle thread #%u", cpu), 1 << cpu, false);
ASSERT(idle_thread);
return idle_thread;
}
void Scheduler::timer_tick(const RegisterState& regs)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(Processor::current().in_irq());
auto current_thread = Processor::current().current_thread();
if (!current_thread)
return;
bool is_bsp = Processor::current().id() == 0;
if (!is_bsp)
return; // TODO: This prevents scheduling on other CPUs!
if (current_thread->process().is_profiling()) {
SmapDisabler disabler;
auto backtrace = current_thread->raw_backtrace(regs.ebp, regs.eip);
auto& sample = Profiling::next_sample_slot();
2020-08-09 02:08:24 +03:00
sample.pid = current_thread->process().pid();
sample.tid = current_thread->tid();
sample.timestamp = TimeManagement::the().uptime_ms();
for (size_t i = 0; i < min(backtrace.size(), Profiling::max_stack_frame_count); ++i) {
sample.frames[i] = backtrace[i];
}
}
if (current_thread->tick())
return;
ASSERT_INTERRUPTS_DISABLED();
ASSERT(Processor::current().in_irq());
Processor::current().invoke_scheduler_async();
}
void Scheduler::invoke_async()
{
ASSERT_INTERRUPTS_DISABLED();
auto& proc = Processor::current();
ASSERT(!proc.in_irq());
// Since this function is called when leaving critical sections (such
// as a SpinLock), we need to check if we're not already doing this
// to prevent recursion
if (!proc.get_scheduler_data().m_in_scheduler)
pick_next();
}
void Scheduler::yield_from_critical()
{
auto& proc = Processor::current();
ASSERT(proc.in_critical());
ASSERT(!proc.in_irq());
yield(); // Flag a context switch
u32 prev_flags;
u32 prev_crit = Processor::current().clear_critical(prev_flags, false);
// Note, we may now be on a different CPU!
Processor::current().restore_critical(prev_crit, prev_flags);
}
void Scheduler::notify_finalizer()
{
if (g_finalizer_has_work.exchange(true, AK::MemoryOrder::memory_order_acq_rel) == false)
g_finalizer_wait_queue->wake_all();
}
void Scheduler::idle_loop(void*)
{
dbg() << "Scheduler[" << Processor::current().id() << "]: idle loop running";
ASSERT(are_interrupts_enabled());
for (;;) {
asm("hlt");
if (Processor::current().id() == 0)
yield();
}
}
}