Kernel: Add even more AARCH64 stubs

This commit is contained in:
Gunnar Beutner 2022-10-16 22:43:43 +02:00 committed by Linus Groh
parent 63a91d6971
commit 056e406a12
Notes: sideshowbarker 2024-07-18 05:37:06 +09:00
9 changed files with 136 additions and 13 deletions

View File

@ -9,6 +9,7 @@
#pragma once
#include <Kernel/Arch/aarch64/Processor.h>
#include <Kernel/Arch/aarch64/Registers.h>
namespace Kernel::Aarch64::Asm {
@ -94,3 +95,12 @@ inline void enter_el1_from_el2()
}
}
namespace Kernel {
inline bool are_interrupts_enabled()
{
return Processor::are_interrupts_enabled();
}
}

View File

@ -65,6 +65,33 @@ u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
TODO_AARCH64();
}
void Processor::initialize_context_switching(Thread& initial_thread)
{
(void)initial_thread;
TODO_AARCH64();
}
void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
{
(void)from_thread;
(void)to_thread;
TODO_AARCH64();
}
void Processor::assume_context(Thread& thread, FlatPtr flags)
{
(void)thread;
(void)flags;
TODO_AARCH64();
}
FlatPtr Processor::init_context(Thread& thread, bool leave_crit)
{
(void)thread;
(void)leave_crit;
TODO_AARCH64();
}
ErrorOr<Vector<FlatPtr, 32>> Processor::capture_stack_trace(Thread& thread, size_t max_frames)
{
(void)thread;
@ -73,4 +100,9 @@ ErrorOr<Vector<FlatPtr, 32>> Processor::capture_stack_trace(Thread& thread, size
return Vector<FlatPtr, 32> {};
}
void Processor::check_invoke_scheduler()
{
TODO_AARCH64();
}
}

View File

@ -51,6 +51,16 @@ public:
m_processor_specific_data[static_cast<size_t>(specific_id)] = ptr;
}
void idle_begin() const
{
TODO_AARCH64();
}
void idle_end() const
{
TODO_AARCH64();
}
ALWAYS_INLINE static void pause()
{
TODO_AARCH64();
@ -80,12 +90,27 @@ public:
static void flush_tlb_local(VirtualAddress vaddr, size_t page_count);
static void flush_tlb(Memory::PageDirectory const*, VirtualAddress, size_t);
ALWAYS_INLINE u32 id() const
{
// NOTE: This variant should only be used when iterating over all
// Processor instances, or when it's guaranteed that the thread
// cannot move to another processor in between calling Processor::current
// and Processor::get_id, or if this fact is not important.
// All other cases should use Processor::id instead!
return 0;
}
// FIXME: When aarch64 supports multiple cores, return the correct core id here.
ALWAYS_INLINE static u32 current_id()
{
return 0;
}
ALWAYS_INLINE void set_idle_thread(Thread& idle_thread)
{
m_idle_thread = &idle_thread;
}
// FIXME: Actually return the current thread once aarch64 supports threading.
ALWAYS_INLINE static Thread* current_thread()
{
@ -129,6 +154,20 @@ public:
Aarch64::DAIF::set_I();
}
void check_invoke_scheduler();
void invoke_scheduler_async() { m_invoke_scheduler_async = true; }
ALWAYS_INLINE static bool current_in_scheduler()
{
auto current_processor = current();
return current_processor.m_in_scheduler;
}
ALWAYS_INLINE static void set_current_in_scheduler(bool value)
{
current().m_in_scheduler = value;
}
// FIXME: Share the critical functions with x86/Processor.h
ALWAYS_INLINE static void enter_critical()
{
@ -162,6 +201,12 @@ public:
ALWAYS_INLINE static FPUState const& clean_fpu_state() { TODO_AARCH64(); }
ALWAYS_INLINE static void set_current_thread(Thread& current_thread)
{
(void)current_thread;
TODO_AARCH64();
}
// FIXME: Actually return the idle thread once aarch64 supports threading.
ALWAYS_INLINE static Thread* idle_thread()
{
@ -188,14 +233,21 @@ public:
[[noreturn]] static void halt();
[[noreturn]] void initialize_context_switching(Thread& initial_thread);
NEVER_INLINE void switch_context(Thread*& from_thread, Thread*& to_thread);
[[noreturn]] static void assume_context(Thread& thread, FlatPtr flags);
FlatPtr init_context(Thread& thread, bool leave_crit);
static ErrorOr<Vector<FlatPtr, 32>> capture_stack_trace(Thread& thread, size_t max_frames = 0);
private:
Thread* m_idle_thread;
u32 m_in_critical { 0 };
// FIXME: Once there is code in place to differentiate IRQs from synchronous exceptions (syscalls),
// this member should be incremented. Also this member shouldn't be a FlatPtr.
FlatPtr m_in_irq { 0 };
bool m_in_scheduler { false };
bool m_invoke_scheduler_async { false };
};
}

View File

@ -14,6 +14,12 @@ VALIDATE_IS_AARCH64()
namespace Kernel {
struct RegisterState {
u64 x[31]; // Saved general purpose registers
u64 spsr_el1; // Save Processor Status Register, EL1
u64 elr_el1; // Exception Link Register, EL1
u64 tpidr_el1; // EL0 thread ID
u64 sp_el0; // EL0 stack pointer
FlatPtr userspace_sp() const { return 0; }
void set_userspace_sp(FlatPtr value)
{

View File

@ -1,23 +1,27 @@
/*
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2022, Gunnar Beutner <gbeutner@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Types.h>
#include <AK/Platform.h>
#include <AK/Types.h>
#include <Kernel/Arch/RegisterState.h>
namespace Kernel {
struct TrapFrame {
u64 x[31]; // Saved general purpose registers
u64 spsr_el1; // Save Processor Status Register, EL1
u64 elr_el1; // Exception Link Register, EL1
u64 tpidr_el1; // EL0 thread ID
u64 sp_el0; // EL0 stack pointer
TrapFrame* next_trap;
RegisterState* regs;
TrapFrame() = delete;
TrapFrame(TrapFrame const&) = delete;
TrapFrame(TrapFrame&&) = delete;
TrapFrame& operator=(TrapFrame const&) = delete;
TrapFrame& operator=(TrapFrame&&) = delete;
};
}

View File

@ -36,20 +36,22 @@ extern "C" void exception_common(Kernel::TrapFrame const* const trap_frame)
if constexpr (print_stack_frame) {
dbgln("Exception Generated by processor!");
auto* regs = trap_frame->regs;
for (auto reg = 0; reg < 31; reg++) {
dbgln("x{}: {:x}", reg, trap_frame->x[reg]);
dbgln("x{}: {:x}", reg, regs->x[reg]);
}
// Special registers
dbgln("spsr_el1: {:x}", trap_frame->spsr_el1);
dbgln("elr_el1: {:x}", trap_frame->elr_el1);
dbgln("tpidr_el1: {:x}", trap_frame->tpidr_el1);
dbgln("sp_el0: {:x}", trap_frame->sp_el0);
dbgln("spsr_el1: {:x}", regs->spsr_el1);
dbgln("elr_el1: {:x}", regs->elr_el1);
dbgln("tpidr_el1: {:x}", regs->tpidr_el1);
dbgln("sp_el0: {:x}", regs->sp_el0);
auto esr_el1 = Kernel::Aarch64::ESR_EL1::read();
dbgln("esr_el1: EC({:#b}) IL({:#b}) ISS({:#b}) ISS2({:#b})", esr_el1.EC, esr_el1.IL, esr_el1.ISS, esr_el1.ISS2);
dump_backtrace_from_base_pointer(trap_frame->x[29]);
dump_backtrace_from_base_pointer(regs->x[29]);
}
Kernel::Processor::halt();

View File

@ -63,12 +63,21 @@
mrs x0, sp_el0
str x0, [sp, #SP_EL0_SLOT]
// Set up TrapFrame struct on the stack
sub sp, sp, #16
mov x0, sp
str x0, [sp, #(1 * 8)]
str xzr, [sp, #(0 * 0)]
// Move stack pointer into first argument register
// and jump to the C++ exception handler
mov x0, sp
.endm
.macro restore_previous_context
// Remove TrapFrame from the stack
add sp, sp, #16
// Restore special registers first
ldr x0, [sp, #SPSR_EL1_SLOT]
msr spsr_el1, x0

View File

@ -505,6 +505,7 @@ else()
StdLib.cpp
Time/TimeManagement.cpp
TimerQueue.cpp
ThreadTracer.cpp
UBSanitizer.cpp
UserOrKernelBuffer.cpp

View File

@ -508,9 +508,16 @@ void dump_thread_list(bool with_stack_traces)
dbgln("Scheduler thread list for processor {}:", Processor::current_id());
auto get_cs = [](Thread& thread) -> u16 {
#if ARCH(I386) || ARCH(X86_64)
if (!thread.current_trap())
return thread.regs().cs;
return thread.get_register_dump_from_stack().cs;
#elif ARCH(AARCH64)
(void)thread;
return 0;
#else
# error Unknown architecture
#endif
};
auto get_eip = [](Thread& thread) -> u32 {