Kernel: Share Processor class (and others) across architectures

About half of the Processor code is common across architectures, so
let's share it with a templated base class. Also, other code that can be
shared in some ways, like FPUState and TrapFrame functions, is adjusted
here. Functions which cannot be shared trivially (without internal
refactoring) are left alone for now.
This commit is contained in:
kleines Filmröllchen 2023-09-18 21:45:14 +02:00 committed by Andrew Kaster
parent 0b824ab7a6
commit 398d271a46
Notes: sideshowbarker 2024-07-17 08:59:18 +09:00
26 changed files with 943 additions and 860 deletions

17
Kernel/Arch/CPUID.h Normal file
View File

@ -0,0 +1,17 @@
/*
* Copyright (c) 2023, kleines Filmröllchen <filmroellchen@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Platform.h>
#if ARCH(X86_64)
# include <Kernel/Arch/x86_64/CPUID.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/CPUID.h>
#else
# error "Unknown architecture"
#endif

21
Kernel/Arch/FPUState.h Normal file
View File

@ -0,0 +1,21 @@
/*
* Copyright (c) 2023, kleines Filmröllchen <filmroellchen@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Platform.h>
namespace Kernel {
struct FPUState;
}
#if ARCH(X86_64)
# include <Kernel/Arch/x86_64/FPUState.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/FPUState.h>
#else
# error "Unknown architecture"
#endif

View File

@ -5,21 +5,132 @@
*/
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/TrapFrame.h>
#include <Kernel/Interrupts/InterruptDisabler.h>
#include <Kernel/Sections.h>
#include <Kernel/Tasks/Scheduler.h>
#include <Kernel/Tasks/Thread.h>
namespace Kernel {
// FIXME: Move the InterruptsState related functions inside the Processor class, when we have a generic Processor base class.
InterruptsState processor_interrupts_state()
READONLY_AFTER_INIT FPUState s_clean_fpu_state;
READONLY_AFTER_INIT Atomic<u32> g_total_processors;
template<typename T>
void ProcessorBase<T>::check_invoke_scheduler()
{
return Processor::are_interrupts_enabled() ? InterruptsState::Enabled : InterruptsState::Disabled;
VERIFY_INTERRUPTS_DISABLED();
VERIFY(!m_in_irq);
VERIFY(!m_in_critical);
VERIFY(&Processor::current() == this);
if (m_invoke_scheduler_async && m_scheduler_initialized) {
m_invoke_scheduler_async = false;
Scheduler::invoke_async();
}
}
template void ProcessorBase<Processor>::check_invoke_scheduler();
template<typename T>
void ProcessorBase<T>::deferred_call_queue(Function<void()> callback)
{
// NOTE: If we are called outside of a critical section and outside
// of an irq handler, the function will be executed before we return!
ScopedCritical critical;
auto& cur_proc = Processor::current();
auto* entry = cur_proc.m_deferred_call_pool.get_free();
entry->handler_value() = move(callback);
cur_proc.m_deferred_call_pool.queue_entry(entry);
}
template void ProcessorBase<Processor>::deferred_call_queue(Function<void()>);
template<typename T>
void ProcessorBase<T>::enter_trap(TrapFrame& trap, bool raise_irq)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(&Processor::current() == this);
#if ARCH(X86_64)
// FIXME: Figure out if we need prev_irq_level
trap.prev_irq_level = m_in_irq;
#endif
if (raise_irq)
m_in_irq++;
auto* current_thread = Processor::current_thread();
if (current_thread) {
auto& current_trap = current_thread->current_trap();
trap.next_trap = current_trap;
current_trap = &trap;
auto new_previous_mode = trap.regs->previous_mode();
if (current_thread->set_previous_mode(new_previous_mode)) {
current_thread->update_time_scheduled(TimeManagement::scheduler_current_time(), new_previous_mode == ExecutionMode::Kernel, false);
}
} else {
trap.next_trap = nullptr;
}
}
template void ProcessorBase<Processor>::enter_trap(TrapFrame&, bool);
template<typename T>
u64 ProcessorBase<T>::time_spent_idle() const
{
return m_idle_thread->time_in_user() + m_idle_thread->time_in_kernel();
}
template u64 ProcessorBase<Processor>::time_spent_idle() const;
template<typename T>
void ProcessorBase<T>::leave_critical()
{
InterruptDisabler disabler;
current().do_leave_critical();
}
template void ProcessorBase<Processor>::leave_critical();
template<typename T>
void ProcessorBase<T>::do_leave_critical()
{
VERIFY(m_in_critical > 0);
if (m_in_critical == 1) {
if (m_in_irq == 0) {
m_deferred_call_pool.execute_pending();
VERIFY(m_in_critical == 1);
}
m_in_critical = 0;
if (m_in_irq == 0)
check_invoke_scheduler();
} else {
m_in_critical = m_in_critical - 1;
}
}
template void ProcessorBase<Processor>::do_leave_critical();
void exit_kernel_thread(void)
{
Thread::current()->exit();
}
void restore_processor_interrupts_state(InterruptsState interrupts_state)
void do_context_first_init(Thread* from_thread, Thread* to_thread)
{
if (interrupts_state == InterruptsState::Enabled)
Processor::enable_interrupts();
else
Processor::disable_interrupts();
VERIFY(!Processor::are_interrupts_enabled());
VERIFY(Processor::is_kernel_mode());
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
VERIFY(to_thread == Thread::current());
Scheduler::enter_current(*from_thread);
auto in_critical = to_thread->saved_critical();
VERIFY(in_critical > 0);
Processor::restore_critical(in_critical);
// Since we got here and don't have Scheduler::context_switch in the
// call stack (because this is the first time we switched into this
// context), we need to notify the scheduler so that it can release
// the scheduler lock. We don't want to enable interrupts at this point
// as we're still in the middle of a context switch. Doing so could
// trigger a context switch within a context switch, leading to a crash.
Scheduler::leave_on_first_switch(InterruptsState::Disabled);
}
}

View File

@ -8,18 +8,201 @@
#pragma once
#include <AK/Function.h>
#include <Kernel/Arch/CPUID.h>
#include <Kernel/Arch/DeferredCallEntry.h>
#include <Kernel/Arch/DeferredCallPool.h>
#include <Kernel/Arch/FPUState.h>
#include <Kernel/Arch/ProcessorSpecificDataID.h>
#include <Kernel/Memory/VirtualAddress.h>
#if ARCH(X86_64)
# include <Kernel/Arch/x86_64/DescriptorTable.h>
#endif
namespace Kernel {
// FIXME: Move the InterruptsState enum and related functions inside the Processor class.
enum class InterruptsState {
Enabled,
Disabled
};
InterruptsState processor_interrupts_state();
void restore_processor_interrupts_state(InterruptsState);
namespace Memory {
class PageDirectory;
}
struct TrapFrame;
class Thread;
class Processor;
extern Atomic<u32> g_total_processors;
extern FPUState s_clean_fpu_state;
// context_first_init is an architecture-specific detail with various properties.
// All variants eventually call into the common code here.
void do_context_first_init(Thread* from_thread, Thread* to_thread);
extern "C" void exit_kernel_thread(void);
extern "C" void thread_context_first_enter(void);
extern "C" void do_assume_context(Thread* thread, u32 flags);
extern "C" FlatPtr do_init_context(Thread* thread, u32) __attribute__((used));
template<typename ProcessorT>
class ProcessorBase {
public:
template<typename T>
T* get_specific()
{
return static_cast<T*>(m_processor_specific_data[static_cast<size_t>(T::processor_specific_data_id())]);
}
void set_specific(ProcessorSpecificDataID specific_id, void* ptr)
{
m_processor_specific_data[static_cast<size_t>(specific_id)] = ptr;
}
static bool is_smp_enabled();
static void smp_enable();
static u32 smp_wake_n_idle_processors(u32 wake_count);
static void flush_tlb_local(VirtualAddress vaddr, size_t page_count);
static void flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
void early_initialize(u32 cpu);
void initialize(u32 cpu);
ALWAYS_INLINE static bool is_initialized();
[[noreturn]] static void halt();
void wait_for_interrupt() const;
ALWAYS_INLINE static void pause();
ALWAYS_INLINE static void wait_check();
ALWAYS_INLINE static ProcessorT& current();
static Processor& by_id(u32);
ALWAYS_INLINE u32 id() const
{
// NOTE: This variant should only be used when iterating over all
// Processor instances, or when it's guaranteed that the thread
// cannot move to another processor in between calling Processor::current
// and Processor::id, or if this fact is not important.
// All other cases should use Processor::current_id instead!
return m_cpu;
}
ALWAYS_INLINE static u32 current_id();
ALWAYS_INLINE static bool is_bootstrap_processor();
ALWAYS_INLINE bool has_nx() const;
ALWAYS_INLINE bool has_pat() const;
ALWAYS_INLINE bool has_feature(CPUFeature::Type const& feature) const
{
return m_features.has_flag(feature);
}
static StringView platform_string();
static u32 count()
{
// NOTE: because this value never changes once all APs are booted,
// we can safely bypass loading it atomically.
// NOTE: This does not work on aarch64, since the variable is never written.
return *g_total_processors.ptr();
}
void enter_trap(TrapFrame& trap, bool raise_irq);
void exit_trap(TrapFrame& trap);
static void flush_entire_tlb_local();
ALWAYS_INLINE static Thread* current_thread();
ALWAYS_INLINE static void set_current_thread(Thread& current_thread);
ALWAYS_INLINE static Thread* idle_thread();
ALWAYS_INLINE static u32 in_critical();
ALWAYS_INLINE static void enter_critical();
static void leave_critical();
void do_leave_critical();
static u32 clear_critical();
ALWAYS_INLINE static void restore_critical(u32 prev_critical);
ALWAYS_INLINE static void verify_no_spinlocks_held()
{
VERIFY(!ProcessorBase::in_critical());
}
static InterruptsState interrupts_state();
static void restore_interrupts_state(InterruptsState);
static bool are_interrupts_enabled();
ALWAYS_INLINE static void enable_interrupts();
ALWAYS_INLINE static void disable_interrupts();
ALWAYS_INLINE static FlatPtr current_in_irq();
ALWAYS_INLINE static bool is_kernel_mode();
ALWAYS_INLINE void set_idle_thread(Thread& idle_thread)
{
m_idle_thread = &idle_thread;
}
void idle_begin() const;
void idle_end() const;
u64 time_spent_idle() const;
ALWAYS_INLINE static u64 read_cpu_counter();
void check_invoke_scheduler();
void invoke_scheduler_async() { m_invoke_scheduler_async = true; }
ALWAYS_INLINE static bool current_in_scheduler();
ALWAYS_INLINE static void set_current_in_scheduler(bool value);
ALWAYS_INLINE bool is_in_scheduler() const { return m_in_scheduler; }
ALWAYS_INLINE u8 physical_address_bit_width() const
{
return m_physical_address_bit_width;
}
ALWAYS_INLINE u8 virtual_address_bit_width() const
{
return m_virtual_address_bit_width;
}
ALWAYS_INLINE static FPUState const& clean_fpu_state() { return s_clean_fpu_state; }
static void deferred_call_queue(Function<void()> callback);
static void set_thread_specific_data(VirtualAddress thread_specific_data);
[[noreturn]] void initialize_context_switching(Thread& initial_thread);
NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread);
[[noreturn]] static void assume_context(Thread& thread, InterruptsState new_interrupts_state);
FlatPtr init_context(Thread& thread, bool leave_crit);
static ErrorOr<Vector<FlatPtr, 32>> capture_stack_trace(Thread& thread, size_t max_frames = 0);
protected:
ProcessorT* m_self;
CPUFeature::Type m_features;
Atomic<bool> m_halt_requested;
u8 m_physical_address_bit_width;
u8 m_virtual_address_bit_width;
private:
void* m_processor_specific_data[static_cast<size_t>(ProcessorSpecificDataID::__Count)];
Thread* m_idle_thread;
Thread* m_current_thread;
u32 m_cpu { 0 };
// FIXME: On aarch64, once there is code in place to differentiate IRQs from synchronous exceptions (syscalls),
// this member should be incremented. Also this member shouldn't be a FlatPtr.
FlatPtr m_in_irq { 0 };
volatile u32 m_in_critical;
// NOTE: Since these variables are accessed with atomic magic on x86 (through GP with a single load instruction),
// they need to be FlatPtrs or everything becomes highly unsound and breaks. They are actually just booleans.
FlatPtr m_in_scheduler;
FlatPtr m_invoke_scheduler_async;
FlatPtr m_scheduler_initialized;
DeferredCallPool m_deferred_call_pool {};
};
template class ProcessorBase<Processor>;
}
@ -33,8 +216,25 @@ void restore_processor_interrupts_state(InterruptsState);
namespace Kernel {
namespace Memory {
class PageDirectory;
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::is_bootstrap_processor()
{
return current_id() == 0;
}
template<typename T>
InterruptsState ProcessorBase<T>::interrupts_state()
{
return Processor::are_interrupts_enabled() ? InterruptsState::Enabled : InterruptsState::Disabled;
}
template<typename T>
void ProcessorBase<T>::restore_interrupts_state(InterruptsState interrupts_state)
{
if (interrupts_state == InterruptsState::Enabled)
Processor::enable_interrupts();
else
Processor::disable_interrupts();
}
struct ProcessorMessageEntry;

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2023, kleines Filmröllchen <filmroellchen@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Arch/Processor.h>
// This header instantiates all functions of ProcessorBase that are architecture-specific.
namespace Kernel {
template bool ProcessorBase<Processor>::is_smp_enabled();
template void ProcessorBase<Processor>::idle_begin() const;
template void ProcessorBase<Processor>::idle_end() const;
template void ProcessorBase<Processor>::smp_enable();
template void ProcessorBase<Processor>::flush_tlb_local(VirtualAddress vaddr, size_t page_count);
template void ProcessorBase<Processor>::flush_entire_tlb_local();
template void ProcessorBase<Processor>::flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
template void ProcessorBase<Processor>::early_initialize(u32 cpu);
template void ProcessorBase<Processor>::initialize(u32 cpu);
template void ProcessorBase<Processor>::halt();
template void ProcessorBase<Processor>::exit_trap(TrapFrame& trap);
template u32 ProcessorBase<Processor>::clear_critical();
template bool ProcessorBase<Processor>::are_interrupts_enabled();
template void ProcessorBase<Processor>::wait_for_interrupt() const;
template Processor& ProcessorBase<Processor>::by_id(u32 id);
template StringView ProcessorBase<Processor>::platform_string();
template void ProcessorBase<Processor>::set_thread_specific_data(VirtualAddress thread_specific_data);
template void ProcessorBase<Processor>::initialize_context_switching(Thread& initial_thread);
template void ProcessorBase<Processor>::switch_context(Thread*& from_thread, Thread*& to_thread);
template void ProcessorBase<Processor>::assume_context(Thread& thread, InterruptsState new_interrupts_state);
template FlatPtr ProcessorBase<Processor>::init_context(Thread& thread, bool leave_crit);
template ErrorOr<Vector<FlatPtr, 32>> ProcessorBase<Processor>::capture_stack_trace(Thread& thread, size_t max_frames);
template u32 ProcessorBase<Processor>::smp_wake_n_idle_processors(u32 wake_count);
}

View File

@ -1,11 +1,12 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2023, Idan Horowitz <idan.horowitz@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/x86_64/TrapFrame.h>
#include <Kernel/Arch/TrapFrame.h>
#include <Kernel/Interrupts/InterruptDisabler.h>
namespace Kernel {

View File

@ -8,6 +8,13 @@
#include <AK/Platform.h>
// FIXME: There's only a minor difference between x86 and Aarch64/RISC-V trap frames; the prev_irq member.
// This seems to be unnecessary (see FIXME in Processor::enter_trap),
// so investigate whether we need it and either:
// (1) Remove the member and corresponding code from x86
// (2) Implement prev_irq in the assembly stubs of Aarch64 and RISC-V
// and then use the same TrapFrame on all architectures.
#if ARCH(X86_64)
# include <Kernel/Arch/x86_64/TrapFrame.h>
#elif ARCH(AARCH64)
@ -15,3 +22,11 @@
#else
# error "Unknown architecture"
#endif
namespace Kernel {
extern "C" void enter_trap_no_irq(TrapFrame* trap) __attribute__((used));
extern "C" void enter_trap(TrapFrame*) __attribute__((used));
extern "C" void exit_trap(TrapFrame*) __attribute__((used));
}

View File

@ -9,7 +9,7 @@
#pragma once
#include <Kernel/Arch/aarch64/Processor.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/aarch64/Registers.h>
namespace Kernel::Aarch64::Asm {
@ -145,12 +145,3 @@ inline void flush_data_cache(FlatPtr start, size_t size)
}
}
namespace Kernel {
inline bool are_interrupts_enabled()
{
return Processor::are_interrupts_enabled();
}
}

View File

@ -4,9 +4,9 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/aarch64/ASM_wrapper.h>
#include <Kernel/Arch/aarch64/CPU.h>
#include <Kernel/Arch/aarch64/Processor.h>
#include <Kernel/Arch/aarch64/Registers.h>
#include <Kernel/Library/Panic.h>

View File

@ -0,0 +1,19 @@
/*
* Copyright (c) 2023, kleines Filmröllchen <filmroellchen@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
VALIDATE_IS_AARCH64()
namespace Kernel {
struct [[gnu::aligned(16)]] FPUState {
u8 buffer[512];
};
}

View File

@ -22,15 +22,10 @@
namespace Kernel {
extern "C" void thread_context_first_enter(void);
extern "C" void exit_kernel_thread(void);
extern "C" void do_assume_context(Thread* thread, u32 new_interrupts_state);
extern "C" void context_first_init(Thread* from_thread, Thread* to_thread) __attribute__((used));
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) __attribute__((used));
extern "C" FlatPtr do_init_context(Thread* thread, u32 new_interrupts_state) __attribute__((used));
Processor* g_current_processor;
READONLY_AFTER_INIT FPUState Processor::s_clean_fpu_state;
static void store_fpu_state(FPUState* fpu_state)
{
@ -78,7 +73,8 @@ static void load_fpu_state(FPUState* fpu_state)
"\n" ::[fpu_state] "r"(fpu_state));
}
void Processor::early_initialize(u32 cpu)
template<typename T>
void ProcessorBase<T>::early_initialize(u32 cpu)
{
VERIFY(g_current_processor == nullptr);
m_cpu = cpu;
@ -86,10 +82,11 @@ void Processor::early_initialize(u32 cpu)
m_physical_address_bit_width = detect_physical_address_bit_width();
m_virtual_address_bit_width = detect_virtual_address_bit_width();
g_current_processor = this;
g_current_processor = static_cast<Processor*>(this);
}
void Processor::initialize(u32)
template<typename T>
void ProcessorBase<T>::initialize(u32)
{
m_deferred_call_pool.init();
@ -102,14 +99,16 @@ void Processor::initialize(u32)
store_fpu_state(&s_clean_fpu_state);
}
[[noreturn]] void Processor::halt()
template<typename T>
[[noreturn]] void ProcessorBase<T>::halt()
{
disable_interrupts();
for (;;)
asm volatile("wfi");
}
void Processor::flush_tlb_local(VirtualAddress, size_t)
template<typename T>
void ProcessorBase<T>::flush_tlb_local(VirtualAddress, size_t)
{
// FIXME: Figure out how to flush a single page
asm volatile("dsb ishst");
@ -118,7 +117,8 @@ void Processor::flush_tlb_local(VirtualAddress, size_t)
asm volatile("isb");
}
void Processor::flush_entire_tlb_local()
template<typename T>
void ProcessorBase<T>::flush_entire_tlb_local()
{
asm volatile("dsb ishst");
asm volatile("tlbi vmalle1");
@ -126,34 +126,14 @@ void Processor::flush_entire_tlb_local()
asm volatile("isb");
}
void Processor::flush_tlb(Memory::PageDirectory const*, VirtualAddress vaddr, size_t page_count)
template<typename T>
void ProcessorBase<T>::flush_tlb(Memory::PageDirectory const*, VirtualAddress vaddr, size_t page_count)
{
flush_tlb_local(vaddr, page_count);
}
void Processor::leave_critical()
{
InterruptDisabler disabler;
current().do_leave_critical();
}
void Processor::do_leave_critical()
{
VERIFY(m_in_critical > 0);
if (m_in_critical == 1) {
if (m_in_irq == 0) {
m_deferred_call_pool.execute_pending();
VERIFY(m_in_critical == 1);
}
m_in_critical = 0;
if (m_in_irq == 0)
check_invoke_scheduler();
} else {
m_in_critical = m_in_critical - 1;
}
}
u32 Processor::clear_critical()
template<typename T>
u32 ProcessorBase<T>::clear_critical()
{
InterruptDisabler disabler;
auto prev_critical = in_critical();
@ -164,19 +144,16 @@ u32 Processor::clear_critical()
return prev_critical;
}
u64 Processor::time_spent_idle() const
{
return m_idle_thread->time_in_user() + m_idle_thread->time_in_kernel();
}
u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
template<typename T>
u32 ProcessorBase<T>::smp_wake_n_idle_processors(u32 wake_count)
{
(void)wake_count;
// FIXME: Actually wake up other cores when SMP is supported for aarch64.
return 0;
}
void Processor::initialize_context_switching(Thread& initial_thread)
template<typename T>
void ProcessorBase<T>::initialize_context_switching(Thread& initial_thread)
{
VERIFY(initial_thread.process().is_kernel_process());
@ -203,7 +180,8 @@ void Processor::initialize_context_switching(Thread& initial_thread)
VERIFY_NOT_REACHED();
}
void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
template<typename T>
void ProcessorBase<T>::switch_context(Thread*& from_thread, Thread*& to_thread)
{
VERIFY(!m_in_irq);
VERIFY(m_in_critical == 1);
@ -307,7 +285,9 @@ extern "C" FlatPtr do_init_context(Thread* thread, u32 new_interrupts_state)
return Processor::current().init_context(*thread, true);
}
void Processor::assume_context(Thread& thread, InterruptsState new_interrupts_state)
// FIXME: Share this code with other architectures.
template<typename T>
void ProcessorBase<T>::assume_context(Thread& thread, InterruptsState new_interrupts_state)
{
dbgln_if(CONTEXT_SWITCH_DEBUG, "Assume context for thread {} {}", VirtualAddress(&thread), thread);
@ -322,7 +302,8 @@ void Processor::assume_context(Thread& thread, InterruptsState new_interrupts_st
VERIFY_NOT_REACHED();
}
FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
template<typename T>
FlatPtr ProcessorBase<T>::init_context(Thread& thread, bool leave_crit)
{
VERIFY(g_scheduler_lock.is_locked());
if (leave_crit) {
@ -381,28 +362,9 @@ FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
return stack_top;
}
void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(&Processor::current() == this);
// FIXME: Figure out if we need prev_irq_level, see duplicated code in Kernel/Arch/x86/common/Processor.cpp
if (raise_irq)
m_in_irq++;
auto* current_thread = Processor::current_thread();
if (current_thread) {
auto& current_trap = current_thread->current_trap();
trap.next_trap = current_trap;
current_trap = &trap;
auto new_previous_mode = trap.regs->previous_mode();
if (current_thread->set_previous_mode(new_previous_mode)) {
current_thread->update_time_scheduled(TimeManagement::scheduler_current_time(), new_previous_mode == ExecutionMode::Kernel, false);
}
} else {
trap.next_trap = nullptr;
}
}
void Processor::exit_trap(TrapFrame& trap)
// FIXME: Figure out if we can fully share this code with x86.
template<typename T>
void ProcessorBase<T>::exit_trap(TrapFrame& trap)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(&Processor::current() == this);
@ -449,7 +411,8 @@ void Processor::exit_trap(TrapFrame& trap)
check_invoke_scheduler();
}
ErrorOr<Vector<FlatPtr, 32>> Processor::capture_stack_trace(Thread& thread, size_t max_frames)
template<typename T>
ErrorOr<Vector<FlatPtr, 32>> ProcessorBase<T>::capture_stack_trace(Thread& thread, size_t max_frames)
{
(void)thread;
(void)max_frames;
@ -457,18 +420,6 @@ ErrorOr<Vector<FlatPtr, 32>> Processor::capture_stack_trace(Thread& thread, size
return Vector<FlatPtr, 32> {};
}
void Processor::check_invoke_scheduler()
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(!m_in_irq);
VERIFY(!m_in_critical);
VERIFY(&Processor::current() == this);
if (m_invoke_scheduler_async && m_scheduler_initialized) {
m_invoke_scheduler_async = false;
Scheduler::invoke_async();
}
}
NAKED void thread_context_first_enter(void)
{
asm(
@ -498,32 +449,9 @@ NAKED void do_assume_context(Thread*, u32)
// clang-format on
}
void exit_kernel_thread(void)
extern "C" void context_first_init(Thread* from_thread, Thread* to_thread)
{
Thread::current()->exit();
}
extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread)
{
VERIFY(!are_interrupts_enabled());
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
VERIFY(to_thread == Thread::current());
Scheduler::enter_current(*from_thread);
auto in_critical = to_thread->saved_critical();
VERIFY(in_critical > 0);
Processor::restore_critical(in_critical);
// Since we got here and don't have Scheduler::context_switch in the
// call stack (because this is the first time we switched into this
// context), we need to notify the scheduler so that it can release
// the scheduler lock. We don't want to enable interrupts at this point
// as we're still in the middle of a context switch. Doing so could
// trigger a context switch within a context switch, leading to a crash.
Scheduler::leave_on_first_switch(InterruptsState::Disabled);
do_context_first_init(from_thread, to_thread);
}
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
@ -553,27 +481,31 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
load_fpu_state(&to_thread->fpu_state());
}
StringView Processor::platform_string()
template<typename T>
StringView ProcessorBase<T>::platform_string()
{
return "aarch64"sv;
}
void Processor::set_thread_specific_data(VirtualAddress thread_specific_data)
template<typename T>
void ProcessorBase<T>::set_thread_specific_data(VirtualAddress thread_specific_data)
{
Aarch64::Asm::set_tpidr_el0(thread_specific_data.get());
}
void Processor::deferred_call_queue(Function<void()> callback)
template<typename T>
void ProcessorBase<T>::wait_for_interrupt() const
{
// NOTE: If we are called outside of a critical section and outside
// of an irq handler, the function will be executed before we return!
ScopedCritical critical;
auto& cur_proc = Processor::current();
asm("wfi");
}
auto* entry = cur_proc.m_deferred_call_pool.get_free();
entry->handler_value() = move(callback);
cur_proc.m_deferred_call_pool.queue_entry(entry);
template<typename T>
Processor& ProcessorBase<T>::by_id(u32 id)
{
(void)id;
TODO_AARCH64();
}
}
#include <Kernel/Arch/ProcessorFunctions.include>

View File

@ -18,6 +18,9 @@
#include <Kernel/Arch/aarch64/Registers.h>
#include <Kernel/Memory/VirtualAddress.h>
#include <AK/Platform.h>
VALIDATE_IS_AARCH64()
namespace Kernel {
namespace Memory {
@ -29,205 +32,16 @@ class Processor;
struct TrapFrame;
enum class InterruptsState;
// FIXME This needs to go behind some sort of platform abstraction
// it is used between Thread and Processor.
struct [[gnu::aligned(16)]] FPUState {
u8 buffer[512];
};
template<typename ProcessorT>
class ProcessorBase;
// FIXME: Remove this once we support SMP in aarch64
extern Processor* g_current_processor;
constexpr size_t MAX_CPU_COUNT = 1;
class Processor {
void* m_processor_specific_data[static_cast<size_t>(ProcessorSpecificDataID::__Count)];
class Processor final : public ProcessorBase<Processor> {
public:
Processor() = default;
void early_initialize(u32 cpu);
void initialize(u32 cpu);
template<typename T>
T* get_specific()
{
return static_cast<T*>(m_processor_specific_data[static_cast<size_t>(T::processor_specific_data_id())]);
}
void set_specific(ProcessorSpecificDataID specific_id, void* ptr)
{
m_processor_specific_data[static_cast<size_t>(specific_id)] = ptr;
}
void idle_begin() const
{
// FIXME: Implement this when SMP for aarch64 is supported.
}
void idle_end() const
{
// FIXME: Implement this when SMP for aarch64 is supported.
}
void wait_for_interrupt() const
{
asm("wfi");
}
ALWAYS_INLINE static void pause()
{
asm volatile("isb sy");
}
ALWAYS_INLINE static void wait_check()
{
asm volatile("yield");
// FIXME: Process SMP messages once we support SMP on aarch64; cf. x86_64
}
ALWAYS_INLINE u8 physical_address_bit_width() const
{
return m_physical_address_bit_width;
}
ALWAYS_INLINE u8 virtual_address_bit_width() const
{
return m_virtual_address_bit_width;
}
ALWAYS_INLINE static bool is_initialized()
{
return g_current_processor != nullptr;
}
static void flush_tlb_local(VirtualAddress vaddr, size_t page_count);
static void flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
ALWAYS_INLINE u32 id() const
{
// NOTE: This variant should only be used when iterating over all
// Processor instances, or when it's guaranteed that the thread
// cannot move to another processor in between calling Processor::current
// and Processor::get_id, or if this fact is not important.
// All other cases should use Processor::id instead!
return 0;
}
// FIXME: When aarch64 supports multiple cores, return the correct core id here.
ALWAYS_INLINE static u32 current_id()
{
return 0;
}
ALWAYS_INLINE void set_idle_thread(Thread& idle_thread)
{
m_idle_thread = &idle_thread;
}
ALWAYS_INLINE static Thread* current_thread()
{
return current().m_current_thread;
}
ALWAYS_INLINE bool has_nx() const
{
return true;
}
ALWAYS_INLINE bool has_pat() const
{
return false;
}
ALWAYS_INLINE bool has_feature(CPUFeature::Type const& feature) const
{
return m_features.has_flag(feature);
}
ALWAYS_INLINE static FlatPtr current_in_irq()
{
return current().m_in_irq;
}
ALWAYS_INLINE static u64 read_cpu_counter()
{
TODO_AARCH64();
return 0;
}
ALWAYS_INLINE static bool are_interrupts_enabled()
{
auto daif = Aarch64::DAIF::read();
return !daif.I;
}
ALWAYS_INLINE static void enable_interrupts()
{
Aarch64::DAIF::clear_I();
}
ALWAYS_INLINE static void disable_interrupts()
{
Aarch64::DAIF::set_I();
}
void check_invoke_scheduler();
void invoke_scheduler_async() { m_invoke_scheduler_async = true; }
ALWAYS_INLINE static bool current_in_scheduler()
{
return current().m_in_scheduler;
}
ALWAYS_INLINE static void set_current_in_scheduler(bool value)
{
current().m_in_scheduler = value;
}
// FIXME: Share the critical functions with x86/Processor.h
ALWAYS_INLINE static void enter_critical()
{
auto& current_processor = current();
current_processor.m_in_critical = current_processor.m_in_critical + 1;
}
static void leave_critical();
static u32 clear_critical();
ALWAYS_INLINE static void restore_critical(u32 prev_critical)
{
current().m_in_critical = prev_critical;
}
ALWAYS_INLINE static u32 in_critical()
{
return current().m_in_critical;
}
ALWAYS_INLINE static void verify_no_spinlocks_held()
{
VERIFY(!Processor::in_critical());
}
ALWAYS_INLINE static FPUState const& clean_fpu_state()
{
return s_clean_fpu_state;
}
ALWAYS_INLINE static void set_current_thread(Thread& current_thread)
{
current().m_current_thread = &current_thread;
}
ALWAYS_INLINE static Thread* idle_thread()
{
return current().m_idle_thread;
}
ALWAYS_INLINE static Processor& current()
{
return *g_current_processor;
}
template<IteratorFunction<Processor&> Callback>
static inline IterationDecision for_each(Callback callback)
{
@ -244,65 +58,162 @@ public:
callback(*g_current_processor);
return IterationDecision::Continue;
}
static u32 count()
{
TODO_AARCH64();
}
// FIXME: Move this into generic Processor class, when there is such a class.
ALWAYS_INLINE static bool is_bootstrap_processor()
{
return Processor::current_id() == 0;
}
static void deferred_call_queue(Function<void()>);
u64 time_spent_idle() const;
static u32 smp_wake_n_idle_processors(u32 wake_count);
[[noreturn]] static void halt();
[[noreturn]] void initialize_context_switching(Thread& initial_thread);
NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread);
[[noreturn]] static void assume_context(Thread& thread, InterruptsState new_interrupts_state);
FlatPtr init_context(Thread& thread, bool leave_crit);
static ErrorOr<Vector<FlatPtr, 32>> capture_stack_trace(Thread& thread, size_t max_frames = 0);
void enter_trap(TrapFrame& trap, bool raise_irq);
void exit_trap(TrapFrame& trap);
static StringView platform_string();
static void set_thread_specific_data(VirtualAddress thread_specific_data);
static void flush_entire_tlb_local();
private:
Processor(Processor const&) = delete;
void do_leave_critical();
DeferredCallPool m_deferred_call_pool {};
u32 m_cpu;
CPUFeature::Type m_features;
u8 m_physical_address_bit_width;
u8 m_virtual_address_bit_width;
Thread* m_current_thread;
Thread* m_idle_thread;
u32 m_in_critical { 0 };
static FPUState s_clean_fpu_state;
// FIXME: Once there is code in place to differentiate IRQs from synchronous exceptions (syscalls),
// this member should be incremented. Also this member shouldn't be a FlatPtr.
FlatPtr m_in_irq { 0 };
bool m_in_scheduler { false };
bool m_invoke_scheduler_async { false };
bool m_scheduler_initialized { false };
};
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::is_initialized()
{
return g_current_processor != nullptr;
}
template<typename T>
ALWAYS_INLINE Thread* ProcessorBase<T>::idle_thread()
{
return current().m_idle_thread;
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::set_current_thread(Thread& current_thread)
{
current().m_current_thread = &current_thread;
}
// FIXME: When aarch64 supports multiple cores, return the correct core id here.
template<typename T>
ALWAYS_INLINE u32 ProcessorBase<T>::current_id()
{
return 0;
}
template<typename T>
ALWAYS_INLINE u32 ProcessorBase<T>::in_critical()
{
return current().m_in_critical;
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::enter_critical()
{
auto& current_processor = current();
current_processor.m_in_critical = current_processor.m_in_critical + 1;
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::restore_critical(u32 prev_critical)
{
current().m_in_critical = prev_critical;
}
template<typename T>
ALWAYS_INLINE T& ProcessorBase<T>::current()
{
return *g_current_processor;
}
template<typename T>
void ProcessorBase<T>::idle_begin() const
{
// FIXME: Implement this when SMP for aarch64 is supported.
}
template<typename T>
void ProcessorBase<T>::idle_end() const
{
// FIXME: Implement this when SMP for aarch64 is supported.
}
template<typename T>
void ProcessorBase<T>::smp_enable()
{
// FIXME: Implement this when SMP for aarch64 is supported.
}
template<typename T>
bool ProcessorBase<T>::is_smp_enabled()
{
return false;
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::are_interrupts_enabled()
{
auto daif = Aarch64::DAIF::read();
return !daif.I;
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::enable_interrupts()
{
Aarch64::DAIF::clear_I();
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::disable_interrupts()
{
Aarch64::DAIF::set_I();
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::is_kernel_mode()
{
// FIXME: Implement this correctly.
return true;
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::current_in_scheduler()
{
return current().m_in_scheduler;
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::set_current_in_scheduler(bool value)
{
current().m_in_scheduler = value;
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::has_nx() const
{
return true;
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::has_pat() const
{
return false;
}
template<typename T>
ALWAYS_INLINE FlatPtr ProcessorBase<T>::current_in_irq()
{
return current().m_in_irq;
}
template<typename T>
ALWAYS_INLINE Thread* ProcessorBase<T>::current_thread()
{
return current().m_current_thread;
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::pause()
{
asm volatile("isb sy");
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::wait_check()
{
asm volatile("yield");
// FIXME: Process SMP messages once we support SMP on aarch64; cf. x86_64
}
template<typename T>
ALWAYS_INLINE u64 ProcessorBase<T>::read_cpu_counter()
{
TODO_AARCH64();
return 0;
}
}

View File

@ -1,18 +0,0 @@
/*
* Copyright (c) 2023, Idan Horowitz <idan.horowitz@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/aarch64/TrapFrame.h>
#include <Kernel/Interrupts/InterruptDisabler.h>
namespace Kernel {
extern "C" void exit_trap(TrapFrame* trap)
{
return Processor::current().exit_trap(*trap);
}
}

View File

@ -27,6 +27,4 @@ struct TrapFrame {
#define TRAP_FRAME_SIZE (2 * 8)
static_assert(AssertSize<TrapFrame, TRAP_FRAME_SIZE>());
extern "C" void exit_trap(TrapFrame*) __attribute__((used));
}

View File

@ -105,15 +105,6 @@ void write_dr6(FlatPtr);
FlatPtr read_dr7();
void write_dr7(FlatPtr);
ALWAYS_INLINE static bool is_kernel_mode()
{
u16 cs;
asm volatile(
"mov %%cs, %[cs] \n"
: [cs] "=g"(cs));
return (cs & 3) == 0;
}
ALWAYS_INLINE void read_tsc(u32& lsw, u32& msw)
{
asm volatile("rdtsc"

View File

@ -5,8 +5,8 @@
*/
#include <Kernel/Arch/CurrentTime.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/x86_64/ASM_wrapper.h>
#include <Kernel/Arch/x86_64/Processor.h>
namespace Kernel {

View File

@ -5,9 +5,9 @@
*/
#include <Kernel/Arch/DebugOutput.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/x86_64/BochsDebugOutput.h>
#include <Kernel/Arch/x86_64/IO.h>
#include <Kernel/Arch/x86_64/Processor.h>
namespace Kernel {

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) 2023, kleines Filmröllchen <filmroellchen@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Platform.h>
#include <AK/Types.h>
#include <Kernel/Arch/x86_64/SIMDState.h>
VALIDATE_IS_X86()
namespace Kernel {
struct [[gnu::aligned(64), gnu::packed]] FPUState {
SIMD::LegacyRegion legacy_region;
SIMD::Header xsave_header;
// FIXME: This should be dynamically allocated! For now, we only save the `YMM` registers here,
// so this will do for now. The size of the area is queried via CPUID(EAX=0dh, ECX=2):EAX.
// https://www.intel.com/content/dam/develop/external/us/en/documents/36945
u8 ext_save_area[256];
};
}

View File

@ -275,7 +275,7 @@ extern "C" UNMAP_AFTER_INIT void pre_init_finished(void)
// to this point
// The target flags will get restored upon leaving the trap
Scheduler::leave_on_first_switch(processor_interrupts_state());
Scheduler::leave_on_first_switch(Processor::interrupts_state());
}
extern "C" UNMAP_AFTER_INIT void post_init_finished(void)

View File

@ -35,23 +35,18 @@
namespace Kernel {
READONLY_AFTER_INIT FPUState Processor::s_clean_fpu_state;
READONLY_AFTER_INIT static ProcessorContainer s_processors {};
READONLY_AFTER_INIT Atomic<u32> Processor::g_total_processors;
READONLY_AFTER_INIT static bool volatile s_smp_enabled;
static Atomic<ProcessorMessage*> s_message_pool;
Atomic<u32> Processor::s_idle_cpu_mask { 0 };
// The compiler can't see the calls to these functions inside assembly.
// Declare them, to avoid dead code warnings.
extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap) __attribute__((used));
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) __attribute__((used));
extern "C" FlatPtr do_init_context(Thread* thread, u32 flags) __attribute__((used));
extern "C" void syscall_entry();
bool Processor::is_smp_enabled()
template<typename T>
bool ProcessorBase<T>::is_smp_enabled()
{
return s_smp_enabled;
}
@ -62,11 +57,6 @@ UNMAP_AFTER_INIT static void sse_init()
write_cr4(read_cr4() | 0x600);
}
void exit_kernel_thread(void)
{
Thread::current()->exit();
}
UNMAP_AFTER_INIT void Processor::cpu_detect()
{
// NOTE: This is called during Processor::early_initialize, we cannot
@ -601,9 +591,11 @@ UNMAP_AFTER_INIT void Processor::cpu_setup()
m_features |= CPUFeature::OSPKE;
}
UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu)
template<typename T>
UNMAP_AFTER_INIT void ProcessorBase<T>::early_initialize(u32 cpu)
{
m_self = this;
m_self = static_cast<Processor*>(this);
auto self = static_cast<Processor*>(this);
m_cpu = cpu;
m_in_irq = 0;
@ -613,10 +605,10 @@ UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu)
m_scheduler_initialized = false;
m_in_scheduler = true;
m_message_queue = nullptr;
self->m_message_queue = nullptr;
m_idle_thread = nullptr;
m_current_thread = nullptr;
m_info = nullptr;
self->m_info = nullptr;
m_halt_requested = false;
if (cpu == 0) {
@ -628,26 +620,29 @@ UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu)
m_deferred_call_pool.init();
cpu_setup();
gdt_init();
self->cpu_setup();
self->gdt_init();
VERIFY(is_initialized()); // sanity check
VERIFY(is_initialized());
VERIFY(&current() == this); // sanity check
}
UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
template<typename T>
UNMAP_AFTER_INIT void ProcessorBase<T>::initialize(u32 cpu)
{
VERIFY(m_self == this);
VERIFY(&current() == this); // sanity check
m_info = new ProcessorInfo(*this);
auto self = static_cast<Processor*>(this);
dmesgln("CPU[{}]: Supported features: {}", current_id(), m_info->features_string());
self->m_info = new ProcessorInfo(*self);
dmesgln("CPU[{}]: Supported features: {}", current_id(), self->m_info->features_string());
if (!has_feature(CPUFeature::RDRAND))
dmesgln("CPU[{}]: No RDRAND support detected, randomness will be poor", current_id());
dmesgln("CPU[{}]: Physical address bit width: {}", current_id(), m_physical_address_bit_width);
dmesgln("CPU[{}]: Virtual address bit width: {}", current_id(), m_virtual_address_bit_width);
if (m_has_qemu_hvf_quirk)
if (self->m_has_qemu_hvf_quirk)
dmesgln("CPU[{}]: Applied correction for QEMU Hypervisor.framework quirk", current_id());
if (cpu == 0)
@ -672,13 +667,13 @@ UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
}
if (has_feature(CPUFeature::HYPERVISOR))
detect_hypervisor();
self->detect_hypervisor();
}
{
// We need to prevent races between APs starting up at the same time
VERIFY(cpu < s_processors.size());
s_processors[cpu] = this;
s_processors[cpu] = static_cast<Processor*>(this);
}
}
@ -765,7 +760,8 @@ DescriptorTablePointer const& Processor::get_gdtr()
return m_gdtr;
}
ErrorOr<Vector<FlatPtr, 32>> Processor::capture_stack_trace(Thread& thread, size_t max_frames)
template<typename T>
ErrorOr<Vector<FlatPtr, 32>> ProcessorBase<T>::capture_stack_trace(Thread& thread, size_t max_frames)
{
FlatPtr frame_ptr = 0, ip = 0;
Vector<FlatPtr, 32> stack_trace;
@ -836,7 +832,7 @@ ErrorOr<Vector<FlatPtr, 32>> Processor::capture_stack_trace(Thread& thread, size
// until it returns the data back to us
auto& proc = Processor::current();
ErrorOr<void> result;
smp_unicast(
Processor::smp_unicast(
thread.cpu(),
[&]() {
dbgln("CPU[{}] getting stack for cpu #{}", Processor::current_id(), proc.id());
@ -889,37 +885,19 @@ ProcessorContainer& Processor::processors()
return s_processors;
}
Processor& Processor::by_id(u32 id)
template<typename T>
Processor& ProcessorBase<T>::by_id(u32 id)
{
return *s_processors[id];
}
void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
template<typename T>
void ProcessorBase<T>::exit_trap(TrapFrame& trap)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(&Processor::current() == this);
trap.prev_irq_level = m_in_irq;
if (raise_irq)
m_in_irq++;
auto* current_thread = Processor::current_thread();
if (current_thread) {
auto& current_trap = current_thread->current_trap();
trap.next_trap = current_trap;
current_trap = &trap;
auto new_previous_mode = trap.regs->previous_mode();
if (current_thread->set_previous_mode(new_previous_mode) && trap.prev_irq_level == 0) {
current_thread->update_time_scheduled(TimeManagement::scheduler_current_time(), new_previous_mode == ExecutionMode::Kernel, false);
}
} else {
trap.next_trap = nullptr;
}
}
void Processor::exit_trap(TrapFrame& trap)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(&Processor::current() == this);
auto* self = static_cast<Processor*>(this);
// Temporarily enter a critical section. This is to prevent critical
// sections entered and left within e.g. smp_process_pending_messages
@ -932,7 +910,7 @@ void Processor::exit_trap(TrapFrame& trap)
m_in_irq = trap.prev_irq_level;
if (s_smp_enabled)
smp_process_pending_messages();
self->smp_process_pending_messages();
// Process the deferred call queue. Among other things, this ensures
// that any pending thread unblocks happen before we enter the scheduler.
@ -968,19 +946,8 @@ void Processor::exit_trap(TrapFrame& trap)
check_invoke_scheduler();
}
void Processor::check_invoke_scheduler()
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(!m_in_irq);
VERIFY(!m_in_critical);
VERIFY(&Processor::current() == this);
if (m_invoke_scheduler_async && m_scheduler_initialized) {
m_invoke_scheduler_async = false;
Scheduler::invoke_async();
}
}
void Processor::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
template<typename T>
void ProcessorBase<T>::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
{
auto ptr = vaddr.as_ptr();
while (page_count > 0) {
@ -995,10 +962,17 @@ void Processor::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
}
}
void Processor::flush_tlb(Memory::PageDirectory const* page_directory, VirtualAddress vaddr, size_t page_count)
template<typename T>
void ProcessorBase<T>::flush_entire_tlb_local()
{
write_cr3(read_cr3());
}
template<typename T>
void ProcessorBase<T>::flush_tlb(Memory::PageDirectory const* page_directory, VirtualAddress vaddr, size_t page_count)
{
if (s_smp_enabled && (!Memory::is_user_address(vaddr) || Process::current().thread_count() > 1))
smp_broadcast_flush_tlb(page_directory, vaddr, page_count);
Processor::smp_broadcast_flush_tlb(page_directory, vaddr, page_count);
else
flush_tlb_local(vaddr, page_count);
}
@ -1042,7 +1016,8 @@ ProcessorMessage& Processor::smp_get_from_pool()
return *msg;
}
u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
template<typename T>
u32 ProcessorBase<T>::smp_wake_n_idle_processors(u32 wake_count)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(wake_count > 0);
@ -1061,7 +1036,7 @@ u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
auto& apic = APIC::the();
while (did_wake_count < wake_count) {
// Try to get a set of idle CPUs and flip them to busy
u32 idle_mask = s_idle_cpu_mask.load(AK::MemoryOrder::memory_order_relaxed) & ~(1u << current_id);
u32 idle_mask = Processor::s_idle_cpu_mask.load(AK::MemoryOrder::memory_order_relaxed) & ~(1u << current_id);
u32 idle_count = popcount(idle_mask);
if (idle_count == 0)
break; // No (more) idle processor available
@ -1073,7 +1048,7 @@ u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
found_mask |= 1u << cpu;
}
idle_mask = s_idle_cpu_mask.fetch_and(~found_mask, AK::MemoryOrder::memory_order_acq_rel) & found_mask;
idle_mask = Processor::s_idle_cpu_mask.fetch_and(~found_mask, AK::MemoryOrder::memory_order_acq_rel) & found_mask;
if (idle_mask == 0)
continue; // All of them were flipped to busy, try again
idle_count = popcount(idle_mask);
@ -1091,7 +1066,8 @@ u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
return did_wake_count;
}
UNMAP_AFTER_INIT void Processor::smp_enable()
template<typename T>
UNMAP_AFTER_INIT void ProcessorBase<T>::smp_enable()
{
size_t msg_pool_size = Processor::count() * 100u;
size_t msg_entries_cnt = Processor::count();
@ -1325,27 +1301,15 @@ void Processor::smp_broadcast_halt()
APIC::the().broadcast_ipi();
}
void Processor::Processor::halt()
template<typename T>
void ProcessorBase<T>::halt()
{
if (s_smp_enabled)
smp_broadcast_halt();
Processor::smp_broadcast_halt();
halt_this();
}
void Processor::deferred_call_queue(Function<void()> callback)
{
// NOTE: If we are called outside of a critical section and outside
// of an irq handler, the function will be executed before we return!
ScopedCritical critical;
auto& cur_proc = Processor::current();
auto* entry = cur_proc.m_deferred_call_pool.get_free();
entry->handler_value() = move(callback);
cur_proc.m_deferred_call_pool.queue_entry(entry);
}
UNMAP_AFTER_INIT void Processor::gdt_init()
{
m_gdt_length = 0;
@ -1381,28 +1345,9 @@ UNMAP_AFTER_INIT void Processor::gdt_init()
gs_base.set((u64)this);
}
extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
{
VERIFY(!are_interrupts_enabled());
VERIFY(is_kernel_mode());
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
VERIFY(to_thread == Thread::current());
Scheduler::enter_current(*from_thread);
auto in_critical = to_thread->saved_critical();
VERIFY(in_critical > 0);
Processor::restore_critical(in_critical);
// Since we got here and don't have Scheduler::context_switch in the
// call stack (because this is the first time we switched into this
// context), we need to notify the scheduler so that it can release
// the scheduler lock. We don't want to enable interrupts at this point
// as we're still in the middle of a context switch. Doing so could
// trigger a context switch within a context switch, leading to a crash.
Scheduler::leave_on_first_switch(InterruptsState::Disabled);
do_context_first_init(from_thread, to_thread);
}
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
@ -1471,7 +1416,9 @@ extern "C" FlatPtr do_init_context(Thread* thread, u32 flags)
return Processor::current().init_context(*thread, true);
}
void Processor::assume_context(Thread& thread, InterruptsState new_interrupts_state)
// FIXME: Share this code with other architectures.
template<typename T>
void ProcessorBase<T>::assume_context(Thread& thread, InterruptsState new_interrupts_state)
{
dbgln_if(CONTEXT_SWITCH_DEBUG, "Assume context for thread {} {}", VirtualAddress(&thread), thread);
@ -1487,34 +1434,8 @@ void Processor::assume_context(Thread& thread, InterruptsState new_interrupts_st
VERIFY_NOT_REACHED();
}
u64 Processor::time_spent_idle() const
{
return m_idle_thread->time_in_user() + m_idle_thread->time_in_kernel();
}
void Processor::leave_critical()
{
InterruptDisabler disabler;
current().do_leave_critical();
}
void Processor::do_leave_critical()
{
VERIFY(m_in_critical > 0);
if (m_in_critical == 1) {
if (m_in_irq == 0) {
m_deferred_call_pool.execute_pending();
VERIFY(m_in_critical == 1);
}
m_in_critical = 0;
if (m_in_irq == 0)
check_invoke_scheduler();
} else {
m_in_critical = m_in_critical - 1;
}
}
u32 Processor::clear_critical()
template<typename T>
u32 ProcessorBase<T>::clear_critical()
{
InterruptDisabler disabler;
auto prev_critical = in_critical();
@ -1563,13 +1484,14 @@ NAKED void do_assume_context(Thread*, u32)
// clang-format on
}
StringView Processor::platform_string()
template<typename T>
StringView ProcessorBase<T>::platform_string()
{
return "x86_64"sv;
}
// FIXME: For the most part this is a copy of the i386-specific function, get rid of the code duplication
FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
template<typename T>
FlatPtr ProcessorBase<T>::init_context(Thread& thread, bool leave_crit)
{
VERIFY(is_kernel_mode());
VERIFY(g_scheduler_lock.is_locked());
@ -1673,11 +1595,13 @@ FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
return stack_top;
}
void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
template<typename T>
void ProcessorBase<T>::switch_context(Thread*& from_thread, Thread*& to_thread)
{
VERIFY(!m_in_irq);
VERIFY(m_in_critical == 1);
VERIFY(is_kernel_mode());
auto* self = static_cast<Processor*>(this);
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
@ -1739,8 +1663,8 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
: [from_rsp] "=m" (from_thread->regs().rsp),
[from_rbp] "=m" (from_thread->regs().rbp),
[from_rip] "=m" (from_thread->regs().rip),
[tss_rsp0l] "=m" (m_tss.rsp0l),
[tss_rsp0h] "=m" (m_tss.rsp0h),
[tss_rsp0l] "=m" (self->m_tss.rsp0l),
[tss_rsp0h] "=m" (self->m_tss.rsp0h),
"=d" (from_thread), // needed so that from_thread retains the correct value
"=a" (to_thread) // needed so that to_thread retains the correct value
: [to_rsp] "g" (to_thread->regs().rsp),
@ -1755,14 +1679,16 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
}
UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_thread)
template<typename T>
UNMAP_AFTER_INIT void ProcessorBase<T>::initialize_context_switching(Thread& initial_thread)
{
VERIFY(initial_thread.process().is_kernel_process());
auto* self = static_cast<Processor*>(this);
auto& regs = initial_thread.regs();
m_tss.iomapbase = sizeof(m_tss);
m_tss.rsp0l = regs.rsp0 & 0xffffffff;
m_tss.rsp0h = regs.rsp0 >> 32;
self->m_tss.iomapbase = sizeof(self->m_tss);
self->m_tss.rsp0l = regs.rsp0 & 0xffffffff;
self->m_tss.rsp0h = regs.rsp0 >> 32;
m_scheduler_initialized = true;
@ -1791,10 +1717,31 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th
VERIFY_NOT_REACHED();
}
void Processor::set_thread_specific_data(VirtualAddress thread_specific_data)
template<typename T>
void ProcessorBase<T>::set_thread_specific_data(VirtualAddress thread_specific_data)
{
MSR fs_base_msr(MSR_FS_BASE);
fs_base_msr.set(thread_specific_data.get());
}
template<typename T>
void ProcessorBase<T>::idle_begin() const
{
Processor::s_idle_cpu_mask.fetch_or(1u << m_cpu, AK::MemoryOrder::memory_order_relaxed);
}
template<typename T>
void ProcessorBase<T>::idle_end() const
{
Processor::s_idle_cpu_mask.fetch_and(~(1u << m_cpu), AK::MemoryOrder::memory_order_relaxed);
}
template<typename T>
void ProcessorBase<T>::wait_for_interrupt() const
{
asm("hlt");
}
}
#include <Kernel/Arch/ProcessorFunctions.include>

View File

@ -40,37 +40,28 @@ struct ProcessorMessageEntry;
#define MSR_IA32_EFER 0xc0000080
#define MSR_IA32_PAT 0x277
// FIXME: Find a better place for these
extern "C" void thread_context_first_enter(void);
extern "C" void exit_kernel_thread(void);
extern "C" void do_assume_context(Thread* thread, u32 flags);
struct [[gnu::aligned(64), gnu::packed]] FPUState {
SIMD::LegacyRegion legacy_region;
SIMD::Header xsave_header;
// FIXME: This should be dynamically allocated! For now, we only save the `YMM` registers here,
// so this will do for now. The size of the area is queried via CPUID(EAX=0dh, ECX=2):EAX.
// https://www.intel.com/content/dam/develop/external/us/en/documents/36945
u8 ext_save_area[256];
};
enum class InterruptsState;
class Processor;
template<typename ProcessorT>
class ProcessorBase;
// Note: We only support 64 processors at most at the moment,
// so allocate 64 slots of inline capacity in the container.
constexpr size_t MAX_CPU_COUNT = 64;
using ProcessorContainer = Array<Processor*, MAX_CPU_COUNT>;
class Processor {
extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, [[maybe_unused]] TrapFrame* trap);
// If this fails to compile because ProcessorBase was not found, you are including this header directly.
// Include Arch/Processor.h instead.
class Processor final : public ProcessorBase<Processor> {
friend class ProcessorInfo;
// Allow some implementations to access the idle CPU mask and various x86 implementation details.
friend class ProcessorBase<Processor>;
AK_MAKE_NONCOPYABLE(Processor);
AK_MAKE_NONMOVABLE(Processor);
Processor* m_self;
private:
// Saved user stack for the syscall instruction.
void* m_user_stack;
@ -78,34 +69,15 @@ class Processor {
alignas(Descriptor) Descriptor m_gdt[256];
u32 m_gdt_length;
u32 m_cpu;
FlatPtr m_in_irq;
volatile u32 m_in_critical;
static Atomic<u32> s_idle_cpu_mask;
TSS m_tss;
static FPUState s_clean_fpu_state;
CPUFeature::Type m_features;
static Atomic<u32> g_total_processors;
u8 m_physical_address_bit_width;
u8 m_virtual_address_bit_width;
bool m_has_qemu_hvf_quirk;
ProcessorInfo* m_info;
Thread* m_current_thread;
Thread* m_idle_thread;
Atomic<ProcessorMessageEntry*> m_message_queue;
bool m_invoke_scheduler_async;
bool m_scheduler_initialized;
bool m_in_scheduler;
Atomic<bool> m_halt_requested;
DeferredCallPool m_deferred_call_pool {};
void* m_processor_specific_data[(size_t)ProcessorSpecificDataID::__Count];
void gdt_init();
void write_raw_gdt_entry(u16 selector, u32 low, u32 high);
void write_gdt_entry(u16 selector, Descriptor& descriptor);
@ -123,66 +95,10 @@ class Processor {
void cpu_detect();
void cpu_setup();
public:
Processor() = default;
void early_initialize(u32 cpu);
void initialize(u32 cpu);
void detect_hypervisor();
void detect_hypervisor_hyperv(CPUID const& hypervisor_leaf_range);
void idle_begin() const
{
s_idle_cpu_mask.fetch_or(1u << m_cpu, AK::MemoryOrder::memory_order_relaxed);
}
void idle_end() const
{
s_idle_cpu_mask.fetch_and(~(1u << m_cpu), AK::MemoryOrder::memory_order_relaxed);
}
void wait_for_interrupt() const
{
asm("hlt");
}
static Processor& by_id(u32);
static u32 count()
{
// NOTE: because this value never changes once all APs are booted,
// we can safely bypass loading it atomically.
return *g_total_processors.ptr();
}
ALWAYS_INLINE static u64 read_cpu_counter()
{
return read_tsc();
}
ALWAYS_INLINE static void pause()
{
asm volatile("pause");
}
ALWAYS_INLINE static void wait_check()
{
Processor::pause();
if (Processor::is_smp_enabled())
Processor::current().smp_process_pending_messages();
}
[[noreturn]] static void halt();
static void flush_entire_tlb_local()
{
write_cr3(read_cr3());
}
static void flush_tlb_local(VirtualAddress vaddr, size_t page_count);
static void flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
public:
Descriptor& get_gdt_entry(u16 selector);
void flush_gdt();
DescriptorTablePointer const& get_gdtr();
@ -222,15 +138,8 @@ public:
return {};
}
ALWAYS_INLINE u8 physical_address_bit_width() const { return m_physical_address_bit_width; }
ALWAYS_INLINE u8 virtual_address_bit_width() const { return m_virtual_address_bit_width; }
ALWAYS_INLINE ProcessorInfo& info() { return *m_info; }
u64 time_spent_idle() const;
static bool is_smp_enabled();
static constexpr u64 user_stack_offset()
{
return __builtin_offsetof(Processor, m_user_stack);
@ -240,180 +149,157 @@ public:
return __builtin_offsetof(Processor, m_tss) + __builtin_offsetof(TSS, rsp0l);
}
ALWAYS_INLINE static Processor& current()
{
return *(Processor*)read_gs_ptr(__builtin_offsetof(Processor, m_self));
}
ALWAYS_INLINE static bool is_initialized()
{
return read_gs_ptr(__builtin_offsetof(Processor, m_self)) != 0;
}
template<typename T>
T* get_specific()
{
return static_cast<T*>(m_processor_specific_data[static_cast<size_t>(T::processor_specific_data_id())]);
}
void set_specific(ProcessorSpecificDataID specific_id, void* ptr)
{
m_processor_specific_data[static_cast<size_t>(specific_id)] = ptr;
}
ALWAYS_INLINE void set_idle_thread(Thread& idle_thread)
{
m_idle_thread = &idle_thread;
}
ALWAYS_INLINE static Thread* current_thread()
{
// If we were to use Processor::current here, we'd have to
// disable interrupts to prevent a race where we may get pre-empted
// right after getting the Processor structure and then get moved
// to another processor, which would lead us to get the wrong thread.
// To avoid having to disable interrupts, we can just read the field
// directly in an atomic fashion, similar to Processor::current.
return (Thread*)read_gs_ptr(__builtin_offsetof(Processor, m_current_thread));
}
ALWAYS_INLINE static void set_current_thread(Thread& current_thread)
{
// See comment in Processor::current_thread
write_gs_ptr(__builtin_offsetof(Processor, m_current_thread), FlatPtr(&current_thread));
}
ALWAYS_INLINE static Thread* idle_thread()
{
// See comment in Processor::current_thread
return (Thread*)read_gs_ptr(__builtin_offsetof(Processor, m_idle_thread));
}
ALWAYS_INLINE u32 id() const
{
// NOTE: This variant should only be used when iterating over all
// Processor instances, or when it's guaranteed that the thread
// cannot move to another processor in between calling Processor::current
// and Processor::get_id, or if this fact is not important.
// All other cases should use Processor::id instead!
return m_cpu;
}
ALWAYS_INLINE static u32 current_id()
{
// See comment in Processor::current_thread
return read_gs_ptr(__builtin_offsetof(Processor, m_cpu));
}
ALWAYS_INLINE static bool is_bootstrap_processor()
{
return Processor::current_id() == 0;
}
ALWAYS_INLINE static FlatPtr current_in_irq()
{
return read_gs_ptr(__builtin_offsetof(Processor, m_in_irq));
}
ALWAYS_INLINE static void enter_critical()
{
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), in_critical() + 1);
}
ALWAYS_INLINE static bool current_in_scheduler()
{
return read_gs_value<decltype(m_in_scheduler)>(__builtin_offsetof(Processor, m_in_scheduler));
}
ALWAYS_INLINE static void set_current_in_scheduler(bool value)
{
write_gs_value<decltype(m_in_scheduler)>(__builtin_offsetof(Processor, m_in_scheduler), value);
}
private:
void do_leave_critical();
public:
static void leave_critical();
static u32 clear_critical();
ALWAYS_INLINE static void restore_critical(u32 prev_critical)
{
// NOTE: This doesn't have to be atomic, and it's also fine if we
// get preempted in between these steps. If we move to another
// processors m_in_critical will move along with us. And if we
// are preempted, we would resume with the same flags.
write_gs_ptr(__builtin_offsetof(Processor, m_in_critical), prev_critical);
}
ALWAYS_INLINE static u32 in_critical()
{
// See comment in Processor::current_thread
return read_gs_ptr(__builtin_offsetof(Processor, m_in_critical));
}
ALWAYS_INLINE static void verify_no_spinlocks_held()
{
VERIFY(!Processor::in_critical());
}
ALWAYS_INLINE static FPUState const& clean_fpu_state() { return s_clean_fpu_state; }
static void smp_enable();
bool smp_process_pending_messages();
static void smp_unicast(u32 cpu, Function<void()>, bool async);
static void smp_broadcast_flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
static u32 smp_wake_n_idle_processors(u32 wake_count);
static void deferred_call_queue(Function<void()> callback);
ALWAYS_INLINE bool has_nx() const
{
return has_feature(CPUFeature::NX);
}
ALWAYS_INLINE bool has_pat() const
{
return has_feature(CPUFeature::PAT);
}
ALWAYS_INLINE bool has_feature(CPUFeature::Type const& feature) const
{
return m_features.has_flag(feature);
}
ALWAYS_INLINE static bool are_interrupts_enabled()
{
return Kernel::are_interrupts_enabled();
}
ALWAYS_INLINE static void enable_interrupts()
{
sti();
}
ALWAYS_INLINE static void disable_interrupts()
{
cli();
}
void check_invoke_scheduler();
void invoke_scheduler_async() { m_invoke_scheduler_async = true; }
void enter_trap(TrapFrame& trap, bool raise_irq);
void exit_trap(TrapFrame& trap);
[[noreturn]] void initialize_context_switching(Thread& initial_thread);
NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread);
[[noreturn]] static void assume_context(Thread& thread, InterruptsState new_interrupts_state);
FlatPtr init_context(Thread& thread, bool leave_crit);
static ErrorOr<Vector<FlatPtr, 32>> capture_stack_trace(Thread& thread, size_t max_frames = 0);
static StringView platform_string();
static void set_thread_specific_data(VirtualAddress thread_specific_data);
};
template<typename T>
ALWAYS_INLINE Thread* ProcessorBase<T>::current_thread()
{
// If we were to use ProcessorBase::current here, we'd have to
// disable interrupts to prevent a race where we may get pre-empted
// right after getting the Processor structure and then get moved
// to another processor, which would lead us to get the wrong thread.
// To avoid having to disable interrupts, we can just read the field
// directly in an atomic fashion, similar to Processor::current.
return (Thread*)read_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_current_thread));
}
template<typename T>
ALWAYS_INLINE u32 ProcessorBase<T>::current_id()
{
// See comment in ProcessorBase::current_thread
return read_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_cpu));
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::restore_critical(u32 prev_critical)
{
// NOTE: This doesn't have to be atomic, and it's also fine if we
// get preempted in between these steps. If we move to another
// processors m_in_critical will move along with us. And if we
// are preempted, we would resume with the same flags.
write_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_in_critical), prev_critical);
}
template<typename T>
ALWAYS_INLINE u32 ProcessorBase<T>::in_critical()
{
// See comment in ProcessorBase::current_thread
return read_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_in_critical));
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::set_current_thread(Thread& current_thread)
{
// See comment in ProcessorBase::current_thread
write_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_current_thread), FlatPtr(&current_thread));
}
template<typename T>
ALWAYS_INLINE Thread* ProcessorBase<T>::idle_thread()
{
// See comment in ProcessorBase::current_thread
return (Thread*)read_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_idle_thread));
}
template<typename T>
T& ProcessorBase<T>::current()
{
return *(Processor*)(read_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_self)));
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::is_initialized()
{
return read_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_self)) != 0;
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::enter_critical()
{
write_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_in_critical), in_critical() + 1);
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::are_interrupts_enabled()
{
return Kernel::are_interrupts_enabled();
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::is_kernel_mode()
{
u16 cs;
asm volatile(
"mov %%cs, %[cs] \n"
: [cs] "=g"(cs));
return (cs & 3) == 0;
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::current_in_scheduler()
{
auto value = read_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_in_scheduler));
return value;
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::set_current_in_scheduler(bool value)
{
write_gs_ptr(__builtin_offsetof(ProcessorBase<T>, m_in_scheduler), value);
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::enable_interrupts()
{
sti();
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::disable_interrupts()
{
cli();
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::has_nx() const
{
return has_feature(CPUFeature::NX);
}
template<typename T>
ALWAYS_INLINE bool ProcessorBase<T>::has_pat() const
{
return has_feature(CPUFeature::PAT);
}
template<typename T>
ALWAYS_INLINE u64 ProcessorBase<T>::read_cpu_counter()
{
return read_tsc();
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::pause()
{
asm volatile("pause");
}
template<typename T>
ALWAYS_INLINE void ProcessorBase<T>::wait_check()
{
Processor::pause();
if (Processor::is_smp_enabled())
Processor::current().smp_process_pending_messages();
}
template<typename T>
ALWAYS_INLINE FlatPtr ProcessorBase<T>::current_in_irq()
{
return read_gs_ptr(__builtin_offsetof(Processor, m_in_irq));
}
}

View File

@ -5,9 +5,9 @@
*/
#include <Kernel/API/Syscall.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Arch/TrapFrame.h>
#include <Kernel/Arch/x86_64/DescriptorTable.h>
#include <Kernel/Arch/x86_64/Processor.h>
#include <Kernel/Library/Assertions.h>
#include <Kernel/Library/Panic.h>
#include <Kernel/Tasks/Process.h>

View File

@ -31,8 +31,4 @@ struct TrapFrame {
static_assert(AssertSize<TrapFrame, TRAP_FRAME_SIZE>());
extern "C" void enter_trap_no_irq(TrapFrame* trap) __attribute__((used));
extern "C" void enter_trap(TrapFrame*) __attribute__((used));
extern "C" void exit_trap(TrapFrame*) __attribute__((used));
}

View File

@ -19,8 +19,10 @@ set(KERNEL_HEAP_SOURCES
set(KERNEL_SOURCES
Arch/init.cpp
Arch/PageFault.cpp
Arch/DeferredCallPool.cpp
Arch/PageFault.cpp
Arch/Processor.cpp
Arch/TrapFrame.cpp
Boot/CommandLine.cpp
Bus/PCI/Controller/HostController.cpp
Bus/PCI/Controller/MemoryBackedHostBridge.cpp
@ -376,7 +378,6 @@ set(KERNEL_SOURCES
if ("${SERENITY_ARCH}" STREQUAL "x86_64")
set(KERNEL_SOURCES
${KERNEL_SOURCES}
Arch/Processor.cpp
Arch/x86_64/CMOS.cpp
Arch/x86_64/DebugOutput.cpp
@ -443,7 +444,6 @@ if ("${SERENITY_ARCH}" STREQUAL "x86_64")
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86_64/Processor.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86_64/ProcessorInfo.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86_64/SafeMem.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86_64/TrapFrame.cpp
)
if("${SERENITY_ARCH}" STREQUAL "x86_64")
@ -475,7 +475,6 @@ elseif("${SERENITY_ARCH}" STREQUAL "aarch64")
${KERNEL_SOURCES}
${RPI_SOURCES}
${SOURCES_RUNNING_WITHOUT_MMU}
Arch/Processor.cpp
Arch/aarch64/Firmware/ACPI/StaticParsing.cpp
@ -494,7 +493,6 @@ elseif("${SERENITY_ARCH}" STREQUAL "aarch64")
Arch/aarch64/PowerState.cpp
Arch/aarch64/SafeMem.cpp
Arch/aarch64/SmapDisabler.cpp
Arch/aarch64/TrapFrame.cpp
Arch/aarch64/vector_table.S
)
@ -593,6 +591,10 @@ add_compile_options(-fsigned-char)
add_compile_options(-Wno-unknown-warning-option -Wvla -Wnull-dereference)
add_compile_options(-fno-rtti -ffreestanding -fbuiltin)
# We use __builtin_offsetof() on Processor, which inherits from ProcessorBase and is therefore a non-standard-layout type.
# This is an issue on non-Itanium ABIs (and in irrelevant edge cases), so we can safely ignore it.
add_compile_options(-Wno-invalid-offsetof)
if ("${SERENITY_ARCH}" STREQUAL "x86_64")
add_compile_options(-mno-80387 -mno-mmx -mno-sse -mno-sse2)
elseif("${SERENITY_ARCH}" STREQUAL "aarch64")

View File

@ -23,7 +23,7 @@ public:
InterruptsState lock()
{
InterruptsState previous_interrupts_state = processor_interrupts_state();
InterruptsState previous_interrupts_state = Processor::interrupts_state();
Processor::enter_critical();
Processor::disable_interrupts();
while (m_lock.exchange(1, AK::memory_order_acquire) != 0)
@ -39,7 +39,7 @@ public:
m_lock.store(0, AK::memory_order_release);
Processor::leave_critical();
restore_processor_interrupts_state(previous_interrupts_state);
Processor::restore_interrupts_state(previous_interrupts_state);
}
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
@ -67,7 +67,7 @@ public:
InterruptsState lock()
{
InterruptsState previous_interrupts_state = processor_interrupts_state();
InterruptsState previous_interrupts_state = Processor::interrupts_state();
Processor::disable_interrupts();
Processor::enter_critical();
auto& proc = Processor::current();
@ -96,7 +96,7 @@ public:
}
Processor::leave_critical();
restore_processor_interrupts_state(previous_interrupts_state);
Processor::restore_interrupts_state(previous_interrupts_state);
}
[[nodiscard]] ALWAYS_INLINE bool is_locked() const

View File

@ -674,7 +674,7 @@ ErrorOr<void> Process::do_exec(NonnullRefPtr<OpenFileDescription> main_program_d
// and Processor::assume_context() or the next context switch.
// If we used an InterruptDisabler that calls enable_interrupts() on exit, we might timer tick'd too soon in exec().
Processor::enter_critical();
previous_interrupts_state = processor_interrupts_state();
previous_interrupts_state = Processor::interrupts_state();
Processor::disable_interrupts();
// NOTE: Be careful to not trigger any page faults below!
@ -998,7 +998,7 @@ ErrorOr<FlatPtr> Process::sys$execve(Userspace<Syscall::SC_execve_params const*>
// NOTE: This code path is taken in the non-syscall case, i.e when the kernel spawns
// a userspace process directly (such as /bin/SystemServer on startup)
restore_processor_interrupts_state(previous_interrupts_state);
Processor::restore_interrupts_state(previous_interrupts_state);
Processor::leave_critical();
return 0;
}