From 056e406a12296260115a1680e477d4a52fe1d2d4 Mon Sep 17 00:00:00 2001 From: Gunnar Beutner Date: Sun, 16 Oct 2022 22:43:43 +0200 Subject: [PATCH] Kernel: Add even more AARCH64 stubs --- Kernel/Arch/aarch64/ASM_wrapper.h | 10 ++++++ Kernel/Arch/aarch64/Processor.cpp | 32 ++++++++++++++++++ Kernel/Arch/aarch64/Processor.h | 52 +++++++++++++++++++++++++++++ Kernel/Arch/aarch64/RegisterState.h | 6 ++++ Kernel/Arch/aarch64/TrapFrame.h | 18 ++++++---- Kernel/Arch/aarch64/init.cpp | 14 ++++---- Kernel/Arch/aarch64/vector_table.S | 9 +++++ Kernel/CMakeLists.txt | 1 + Kernel/Scheduler.cpp | 7 ++++ 9 files changed, 136 insertions(+), 13 deletions(-) diff --git a/Kernel/Arch/aarch64/ASM_wrapper.h b/Kernel/Arch/aarch64/ASM_wrapper.h index 6950a156f67..b4ac85f45da 100644 --- a/Kernel/Arch/aarch64/ASM_wrapper.h +++ b/Kernel/Arch/aarch64/ASM_wrapper.h @@ -9,6 +9,7 @@ #pragma once +#include #include namespace Kernel::Aarch64::Asm { @@ -94,3 +95,12 @@ inline void enter_el1_from_el2() } } + +namespace Kernel { + +inline bool are_interrupts_enabled() +{ + return Processor::are_interrupts_enabled(); +} + +} diff --git a/Kernel/Arch/aarch64/Processor.cpp b/Kernel/Arch/aarch64/Processor.cpp index f84354d8ca8..4f9dcbcfa83 100644 --- a/Kernel/Arch/aarch64/Processor.cpp +++ b/Kernel/Arch/aarch64/Processor.cpp @@ -65,6 +65,33 @@ u32 Processor::smp_wake_n_idle_processors(u32 wake_count) TODO_AARCH64(); } +void Processor::initialize_context_switching(Thread& initial_thread) +{ + (void)initial_thread; + TODO_AARCH64(); +} + +void Processor::switch_context(Thread*& from_thread, Thread*& to_thread) +{ + (void)from_thread; + (void)to_thread; + TODO_AARCH64(); +} + +void Processor::assume_context(Thread& thread, FlatPtr flags) +{ + (void)thread; + (void)flags; + TODO_AARCH64(); +} + +FlatPtr Processor::init_context(Thread& thread, bool leave_crit) +{ + (void)thread; + (void)leave_crit; + TODO_AARCH64(); +} + ErrorOr> Processor::capture_stack_trace(Thread& thread, size_t max_frames) { (void)thread; @@ -73,4 +100,9 @@ ErrorOr> Processor::capture_stack_trace(Thread& thread, size return Vector {}; } +void Processor::check_invoke_scheduler() +{ + TODO_AARCH64(); +} + } diff --git a/Kernel/Arch/aarch64/Processor.h b/Kernel/Arch/aarch64/Processor.h index 2f353314ae2..03bd67b2a26 100644 --- a/Kernel/Arch/aarch64/Processor.h +++ b/Kernel/Arch/aarch64/Processor.h @@ -51,6 +51,16 @@ public: m_processor_specific_data[static_cast(specific_id)] = ptr; } + void idle_begin() const + { + TODO_AARCH64(); + } + + void idle_end() const + { + TODO_AARCH64(); + } + ALWAYS_INLINE static void pause() { TODO_AARCH64(); @@ -80,12 +90,27 @@ public: static void flush_tlb_local(VirtualAddress vaddr, size_t page_count); static void flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t); + ALWAYS_INLINE u32 id() const + { + // NOTE: This variant should only be used when iterating over all + // Processor instances, or when it's guaranteed that the thread + // cannot move to another processor in between calling Processor::current + // and Processor::get_id, or if this fact is not important. + // All other cases should use Processor::id instead! + return 0; + } + // FIXME: When aarch64 supports multiple cores, return the correct core id here. ALWAYS_INLINE static u32 current_id() { return 0; } + ALWAYS_INLINE void set_idle_thread(Thread& idle_thread) + { + m_idle_thread = &idle_thread; + } + // FIXME: Actually return the current thread once aarch64 supports threading. ALWAYS_INLINE static Thread* current_thread() { @@ -129,6 +154,20 @@ public: Aarch64::DAIF::set_I(); } + void check_invoke_scheduler(); + void invoke_scheduler_async() { m_invoke_scheduler_async = true; } + + ALWAYS_INLINE static bool current_in_scheduler() + { + auto current_processor = current(); + return current_processor.m_in_scheduler; + } + + ALWAYS_INLINE static void set_current_in_scheduler(bool value) + { + current().m_in_scheduler = value; + } + // FIXME: Share the critical functions with x86/Processor.h ALWAYS_INLINE static void enter_critical() { @@ -162,6 +201,12 @@ public: ALWAYS_INLINE static FPUState const& clean_fpu_state() { TODO_AARCH64(); } + ALWAYS_INLINE static void set_current_thread(Thread& current_thread) + { + (void)current_thread; + TODO_AARCH64(); + } + // FIXME: Actually return the idle thread once aarch64 supports threading. ALWAYS_INLINE static Thread* idle_thread() { @@ -188,14 +233,21 @@ public: [[noreturn]] static void halt(); + [[noreturn]] void initialize_context_switching(Thread& initial_thread); + NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread); + [[noreturn]] static void assume_context(Thread& thread, FlatPtr flags); + FlatPtr init_context(Thread& thread, bool leave_crit); static ErrorOr> capture_stack_trace(Thread& thread, size_t max_frames = 0); private: + Thread* m_idle_thread; u32 m_in_critical { 0 }; // FIXME: Once there is code in place to differentiate IRQs from synchronous exceptions (syscalls), // this member should be incremented. Also this member shouldn't be a FlatPtr. FlatPtr m_in_irq { 0 }; + bool m_in_scheduler { false }; + bool m_invoke_scheduler_async { false }; }; } diff --git a/Kernel/Arch/aarch64/RegisterState.h b/Kernel/Arch/aarch64/RegisterState.h index 7aab709d284..24e851c699d 100644 --- a/Kernel/Arch/aarch64/RegisterState.h +++ b/Kernel/Arch/aarch64/RegisterState.h @@ -14,6 +14,12 @@ VALIDATE_IS_AARCH64() namespace Kernel { struct RegisterState { + u64 x[31]; // Saved general purpose registers + u64 spsr_el1; // Save Processor Status Register, EL1 + u64 elr_el1; // Exception Link Register, EL1 + u64 tpidr_el1; // EL0 thread ID + u64 sp_el0; // EL0 stack pointer + FlatPtr userspace_sp() const { return 0; } void set_userspace_sp(FlatPtr value) { diff --git a/Kernel/Arch/aarch64/TrapFrame.h b/Kernel/Arch/aarch64/TrapFrame.h index c7a3c6bf16f..d3cf9c94a68 100644 --- a/Kernel/Arch/aarch64/TrapFrame.h +++ b/Kernel/Arch/aarch64/TrapFrame.h @@ -1,23 +1,27 @@ /* * Copyright (c) 2018-2021, Andreas Kling + * Copyright (c) 2022, Gunnar Beutner * * SPDX-License-Identifier: BSD-2-Clause */ #pragma once -#include - #include +#include +#include namespace Kernel { struct TrapFrame { - u64 x[31]; // Saved general purpose registers - u64 spsr_el1; // Save Processor Status Register, EL1 - u64 elr_el1; // Exception Link Register, EL1 - u64 tpidr_el1; // EL0 thread ID - u64 sp_el0; // EL0 stack pointer + TrapFrame* next_trap; + RegisterState* regs; + + TrapFrame() = delete; + TrapFrame(TrapFrame const&) = delete; + TrapFrame(TrapFrame&&) = delete; + TrapFrame& operator=(TrapFrame const&) = delete; + TrapFrame& operator=(TrapFrame&&) = delete; }; } diff --git a/Kernel/Arch/aarch64/init.cpp b/Kernel/Arch/aarch64/init.cpp index ef6caf9163f..d8e039f3d40 100644 --- a/Kernel/Arch/aarch64/init.cpp +++ b/Kernel/Arch/aarch64/init.cpp @@ -36,20 +36,22 @@ extern "C" void exception_common(Kernel::TrapFrame const* const trap_frame) if constexpr (print_stack_frame) { dbgln("Exception Generated by processor!"); + auto* regs = trap_frame->regs; + for (auto reg = 0; reg < 31; reg++) { - dbgln("x{}: {:x}", reg, trap_frame->x[reg]); + dbgln("x{}: {:x}", reg, regs->x[reg]); } // Special registers - dbgln("spsr_el1: {:x}", trap_frame->spsr_el1); - dbgln("elr_el1: {:x}", trap_frame->elr_el1); - dbgln("tpidr_el1: {:x}", trap_frame->tpidr_el1); - dbgln("sp_el0: {:x}", trap_frame->sp_el0); + dbgln("spsr_el1: {:x}", regs->spsr_el1); + dbgln("elr_el1: {:x}", regs->elr_el1); + dbgln("tpidr_el1: {:x}", regs->tpidr_el1); + dbgln("sp_el0: {:x}", regs->sp_el0); auto esr_el1 = Kernel::Aarch64::ESR_EL1::read(); dbgln("esr_el1: EC({:#b}) IL({:#b}) ISS({:#b}) ISS2({:#b})", esr_el1.EC, esr_el1.IL, esr_el1.ISS, esr_el1.ISS2); - dump_backtrace_from_base_pointer(trap_frame->x[29]); + dump_backtrace_from_base_pointer(regs->x[29]); } Kernel::Processor::halt(); diff --git a/Kernel/Arch/aarch64/vector_table.S b/Kernel/Arch/aarch64/vector_table.S index 13e6b7d4005..7e951918a97 100644 --- a/Kernel/Arch/aarch64/vector_table.S +++ b/Kernel/Arch/aarch64/vector_table.S @@ -63,12 +63,21 @@ mrs x0, sp_el0 str x0, [sp, #SP_EL0_SLOT] + // Set up TrapFrame struct on the stack + sub sp, sp, #16 + mov x0, sp + str x0, [sp, #(1 * 8)] + str xzr, [sp, #(0 * 0)] + // Move stack pointer into first argument register // and jump to the C++ exception handler mov x0, sp .endm .macro restore_previous_context + // Remove TrapFrame from the stack + add sp, sp, #16 + // Restore special registers first ldr x0, [sp, #SPSR_EL1_SLOT] msr spsr_el1, x0 diff --git a/Kernel/CMakeLists.txt b/Kernel/CMakeLists.txt index ac8d6b3a3d3..8cbb1938afa 100644 --- a/Kernel/CMakeLists.txt +++ b/Kernel/CMakeLists.txt @@ -505,6 +505,7 @@ else() StdLib.cpp Time/TimeManagement.cpp TimerQueue.cpp + ThreadTracer.cpp UBSanitizer.cpp UserOrKernelBuffer.cpp diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index f86cbce816d..0ede9fe6812 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -508,9 +508,16 @@ void dump_thread_list(bool with_stack_traces) dbgln("Scheduler thread list for processor {}:", Processor::current_id()); auto get_cs = [](Thread& thread) -> u16 { +#if ARCH(I386) || ARCH(X86_64) if (!thread.current_trap()) return thread.regs().cs; return thread.get_register_dump_from_stack().cs; +#elif ARCH(AARCH64) + (void)thread; + return 0; +#else +# error Unknown architecture +#endif }; auto get_eip = [](Thread& thread) -> u32 {