Kernel: Add stubs for missing x86_64 functionality

This adds just enough stubs to make the kernel compile on x86_64. Obviously
it won't do anything useful - in fact it won't even attempt to boot because
Multiboot doesn't support ELF64 binaries - but it gets those compiler errors
out of the way so more progress can be made getting all the missing
functionality in place.
This commit is contained in:
Gunnar Beutner 2021-06-23 21:54:41 +02:00 committed by Andreas Kling
parent f2eb759901
commit 38fca26f54
Notes: sideshowbarker 2024-07-18 11:35:51 +09:00
21 changed files with 295 additions and 40 deletions

View File

@ -289,7 +289,7 @@ public:
ALWAYS_INLINE static Thread* idle_thread()
{
// See comment in Processor::current_thread
return (Thread*)read_fs_u32(__builtin_offsetof(Processor, m_idle_thread));
return (Thread*)read_fs_ptr(__builtin_offsetof(Processor, m_idle_thread));
}
ALWAYS_INLINE u32 get_id() const

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <AK/Types.h>
#include <Kernel/Arch/x86/CPU.h>
#include <Kernel/Arch/x86/Processor.h>
#include <Kernel/Arch/x86/TrapFrame.h>
#include <Kernel/KSyms.h>
#include <Kernel/Process.h>
void __assertion_failed(const char* msg, const char* file, unsigned line, const char* func)
{
asm volatile("cli");
critical_dmesgln("ASSERTION FAILED: {}", msg);
critical_dmesgln("{}:{} in {}", file, line, func);
abort();
}
[[noreturn]] void abort()
{
// Switch back to the current process's page tables if there are any.
// Otherwise stack walking will be a disaster.
auto process = Process::current();
if (process)
MM.enter_process_paging_scope(*process);
Kernel::dump_backtrace();
Processor::halt();
abort();
}
[[noreturn]] void _abort()
{
asm volatile("ud2");
__builtin_unreachable();
}

View File

@ -485,7 +485,13 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
// to be ebp.
ProcessPagingScope paging_scope(thread.process());
auto& tss = thread.tss();
u32* stack_top = reinterpret_cast<u32*>(tss.esp);
u32* stack_top;
#if ARCH(I386)
stack_top = reinterpret_cast<u32*>(tss.esp);
#else
(void)tss;
TODO();
#endif
if (is_user_range(VirtualAddress(stack_top), sizeof(FlatPtr))) {
if (!copy_from_user(&frame_ptr, &((FlatPtr*)stack_top)[0]))
frame_ptr = 0;
@ -494,7 +500,11 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
if (!safe_memcpy(&frame_ptr, &((FlatPtr*)stack_top)[0], sizeof(FlatPtr), fault_at))
frame_ptr = 0;
}
#if ARCH(I386)
eip = tss.eip;
#else
TODO();
#endif
// TODO: We need to leave the scheduler lock here, but we also
// need to prevent the target thread from being run while
// we walk the stack

View File

@ -101,32 +101,3 @@ extern "C" u32 do_init_context(Thread* thread, u32 flags)
}
}
void __assertion_failed(const char* msg, const char* file, unsigned line, const char* func)
{
asm volatile("cli");
critical_dmesgln("ASSERTION FAILED: {}", msg);
critical_dmesgln("{}:{} in {}", file, line, func);
abort();
}
[[noreturn]] void abort()
{
// Switch back to the current process's page tables if there are any.
// Otherwise stack walking will be a disaster.
auto process = Process::current();
if (process)
MM.enter_process_paging_scope(*process);
Kernel::dump_backtrace();
Processor::halt();
abort();
}
[[noreturn]] void _abort()
{
asm volatile("ud2");
__builtin_unreachable();
}

View File

@ -6,6 +6,7 @@
#include <Kernel/Arch/x86/DescriptorTable.h>
#include <Kernel/Arch/x86/TrapFrame.h>
// clang-format off
asm(
".globl interrupt_common_asm_entry\n"

View File

@ -37,7 +37,6 @@ asm(
);
// clang-format on
#if ARCH(I386)
// clang-format off
asm(
".global do_assume_context \n"
@ -59,7 +58,6 @@ asm(
" jmp enter_thread_context \n"
);
// clang-format on
#endif
String Processor::platform_string() const
{

View File

@ -317,7 +317,8 @@ apic_ap_start32:
movl $0x80000001, %eax
cpuid
testl $0x100000, %edx
je (1f - apic_ap_start + 0x8000)
// TODO: Uncomment this
//je (1f - apic_ap_start + 0x8000)
/* turn on IA32_EFER.NXE */
movl $0xc0000080, %ecx
rdmsr

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <AK/Types.h>
#include <Kernel/Arch/x86/CPU.h>
#include <Kernel/Arch/x86/Processor.h>
#include <Kernel/Arch/x86/TrapFrame.h>
#include <Kernel/KSyms.h>
#include <Kernel/Process.h>
#include <Kernel/Thread.h>
namespace Kernel {
// The compiler can't see the calls to these functions inside assembly.
// Declare them, to avoid dead code warnings.
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread) __attribute__((used));
extern "C" void context_first_init(Thread* from_thread, Thread* to_thread, TrapFrame* trap) __attribute__((used));
extern "C" u32 do_init_context(Thread* thread, u32 flags) __attribute__((used));
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
{
(void)from_thread;
(void)to_thread;
TODO();
}
extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
{
TODO();
}
extern "C" u32 do_init_context(Thread* thread, u32 flags)
{
(void)thread;
(void)flags;
TODO();
}
}

View File

@ -0,0 +1,24 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2021, Gunnar Beutner <gbeutner@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86/DescriptorTable.h>
#include <Kernel/Arch/x86/TrapFrame.h>
// clang-format off
asm(
".globl interrupt_common_asm_entry\n"
"interrupt_common_asm_entry: \n"
" int3 \n" // FIXME
".globl common_trap_exit \n"
"common_trap_exit: \n"
// another thread may have handled this trap at this point, so don't
// make assumptions about the stack other than there's a TrapFrame
// and a pointer to it.
" call exit_trap \n"
" int3 \n" // FIXME
);
// clang-format on

View File

@ -88,6 +88,7 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
// TODO: handle NT?
VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
#if 0
auto& tss = thread.tss();
bool return_to_user = (tss.cs & 3) != 0;
@ -116,7 +117,6 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
// However, the first step is to always start in kernel mode with thread_context_first_enter
RegisterState& iretframe = *reinterpret_cast<RegisterState*>(stack_top);
// FIXME: copy state to be recovered through TSS
TODO();
// make space for a trap frame
stack_top -= sizeof(TrapFrame);
@ -161,6 +161,9 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
tss.gs = GDT_SELECTOR_DATA0;
tss.ss = GDT_SELECTOR_DATA0;
tss.fs = GDT_SELECTOR_PROC;
#else
TODO();
#endif
return stack_top;
}
@ -202,11 +205,15 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th
auto& tss = initial_thread.tss();
m_tss = tss;
#if 0
m_tss.esp0 = tss.esp0;
m_tss.ss0 = GDT_SELECTOR_DATA0;
// user mode needs to be able to switch to kernel mode:
m_tss.cs = m_tss.ds = m_tss.es = m_tss.gs = m_tss.ss = GDT_SELECTOR_CODE0 | 3;
m_tss.fs = GDT_SELECTOR_PROC | 3;
#else
TODO();
#endif
m_scheduler_initialized = true;

View File

@ -0,0 +1,89 @@
/*
* Copyright (c) 2021, Gunnar Beutner <gbeutner@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86/SafeMem.h>
#define CODE_SECTION(section_name) __attribute__((section(section_name)))
namespace Kernel {
CODE_SECTION(".text.safemem")
NEVER_INLINE bool safe_memcpy(void* dest_ptr, const void* src_ptr, size_t n, void*& fault_at)
{
(void)dest_ptr;
(void)src_ptr;
(void)n;
(void)fault_at;
TODO();
}
CODE_SECTION(".text.safemem")
NEVER_INLINE ssize_t safe_strnlen(const char* str, size_t max_n, void*& fault_at)
{
(void)str;
(void)max_n;
(void)fault_at;
TODO();
}
CODE_SECTION(".text.safemem")
NEVER_INLINE bool safe_memset(void* dest_ptr, int c, size_t n, void*& fault_at)
{
(void)dest_ptr;
(void)c;
(void)n;
(void)fault_at;
TODO();
}
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE Optional<u32> safe_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
{
(void)var;
(void)val;
TODO();
}
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE Optional<u32> safe_atomic_exchange_relaxed(volatile u32* var, u32 val)
{
(void)var;
(void)val;
TODO();
}
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE Optional<u32> safe_atomic_load_relaxed(volatile u32* var)
{
(void)var;
TODO();
}
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE bool safe_atomic_store_relaxed(volatile u32* var, u32 val)
{
(void)var;
(void)val;
TODO();
}
CODE_SECTION(".text.safemem.atomic")
NEVER_INLINE Optional<bool> safe_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
{
(void)var;
(void)expected;
(void)val;
TODO();
}
bool handle_safe_access_fault(RegisterState& regs, u32 fault_address)
{
(void)regs;
(void)fault_address;
TODO();
}
}

View File

@ -275,15 +275,16 @@ set(KERNEL_SOURCES
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/${KERNEL_ARCH}/CPU.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/${KERNEL_ARCH}/InterruptEntry.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/${KERNEL_ARCH}/Processor.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/${KERNEL_ARCH}/ProcessorInfo.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/${KERNEL_ARCH}/SafeMem.cpp
)
set(KERNEL_SOURCES
${KERNEL_SOURCES}
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/ASM_wrapper.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/CPU.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/Interrupts.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/Processor.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/ProcessorInfo.cpp
${CMAKE_CURRENT_SOURCE_DIR}/Arch/x86/common/TrapFrame.cpp
)
@ -395,7 +396,7 @@ if (ENABLE_KERNEL_LTO)
check_ipo_supported()
set_property(TARGET Kernel PROPERTY INTERPROCEDURAL_OPTIMIZATION TRUE)
endif()
target_link_libraries(Kernel kernel_heap gcc stdc++)
target_link_libraries(Kernel kernel_heap gcc supc++)
add_dependencies(Kernel kernel_heap)
install(TARGETS Kernel RUNTIME DESTINATION boot)

View File

@ -20,6 +20,7 @@
#include <Kernel/KBufferBuilder.h>
#include <Kernel/KSyms.h>
#include <Kernel/Module.h>
#include <Kernel/Panic.h>
#include <Kernel/PerformanceEventBuffer.h>
#include <Kernel/PerformanceManager.h>
#include <Kernel/Process.h>
@ -179,8 +180,14 @@ RefPtr<Process> Process::create_kernel_process(RefPtr<Thread>& first_thread, Str
auto process = Process::create(first_thread, move(name), (uid_t)0, (gid_t)0, ProcessID(0), true);
if (!first_thread || !process)
return {};
#if ARCH(I386)
first_thread->tss().eip = (FlatPtr)entry;
first_thread->tss().esp = FlatPtr(entry_data); // entry function argument is expected to be in tss.esp
#else
(void)entry;
(void)entry_data;
PANIC("Process::create_kernel_process() not implemented");
#endif
if (process->pid() != 0) {
process->ref();
@ -640,9 +647,15 @@ RefPtr<Thread> Process::create_kernel_thread(void (*entry)(void*), void* entry_d
if (!joinable)
thread->detach();
#if ARCH(I386)
auto& tss = thread->tss();
tss.eip = (FlatPtr)entry;
tss.esp = FlatPtr(entry_data); // entry function argument is expected to be in tss.esp
#else
(void)entry;
(void)entry_data;
PANIC("Process::create_kernel_thread() not implemented");
#endif
ScopedSpinLock lock(g_scheduler_lock);
thread->set_state(Thread::State::Runnable);

View File

@ -228,10 +228,14 @@ bool Scheduler::pick_next()
auto& thread_to_schedule = pull_next_runnable_thread();
if constexpr (SCHEDULER_DEBUG) {
#if ARCH(I386)
dbgln("Scheduler[{}]: Switch to {} @ {:04x}:{:08x}",
Processor::id(),
thread_to_schedule,
thread_to_schedule.tss().cs, thread_to_schedule.tss().eip);
#else
PANIC("Scheduler::pick_next() not implemented");
#endif
}
// We need to leave our first critical section before switching context,
@ -571,14 +575,22 @@ void dump_thread_list()
dbgln("Scheduler thread list for processor {}:", Processor::id());
auto get_cs = [](Thread& thread) -> u16 {
#if ARCH(I386)
if (!thread.current_trap())
return thread.tss().cs;
#else
PANIC("get_cs() not implemented");
#endif
return thread.get_register_dump_from_stack().cs;
};
auto get_eip = [](Thread& thread) -> u32 {
#if ARCH(I386)
if (!thread.current_trap())
return thread.tss().eip;
#else
PANIC("get_eip() not implemented");
#endif
return thread.get_register_dump_from_stack().eip;
};

View File

@ -11,6 +11,7 @@
#include <Kernel/Debug.h>
#include <Kernel/FileSystem/Custody.h>
#include <Kernel/FileSystem/FileDescription.h>
#include <Kernel/Panic.h>
#include <Kernel/PerformanceManager.h>
#include <Kernel/Process.h>
#include <Kernel/Random.h>
@ -635,6 +636,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
}
new_main_thread->reset_fpu_state();
#if ARCH(I386)
auto& tss = new_main_thread->m_tss;
tss.cs = GDT_SELECTOR_CODE3 | 3;
tss.ds = GDT_SELECTOR_DATA3 | 3;
@ -646,6 +648,10 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
tss.esp = new_userspace_esp;
tss.cr3 = space().page_directory().cr3();
tss.ss2 = pid().value();
#else
(void)new_userspace_esp;
PANIC("Process::do_exec() not implemented");
#endif
{
TemporaryChange profiling_disabler(m_profiling, was_profiling);

View File

@ -7,6 +7,7 @@
#include <Kernel/Debug.h>
#include <Kernel/FileSystem/Custody.h>
#include <Kernel/FileSystem/FileDescription.h>
#include <Kernel/Panic.h>
#include <Kernel/PerformanceManager.h>
#include <Kernel/Process.h>
#include <Kernel/VM/Region.h>
@ -43,6 +44,7 @@ KResultOr<pid_t> Process::sys$fork(RegisterState& regs)
dbgln_if(FORK_DEBUG, "fork: child={}", child);
child->space().set_enforces_syscall_regions(space().enforces_syscall_regions());
#if ARCH(I386)
auto& child_tss = child_first_thread->m_tss;
child_tss.eax = 0; // fork() returns 0 in the child :^)
child_tss.ebx = regs.ebx;
@ -62,6 +64,10 @@ KResultOr<pid_t> Process::sys$fork(RegisterState& regs)
child_tss.ss = regs.userspace_ss;
dbgln_if(FORK_DEBUG, "fork: child will begin executing at {:04x}:{:08x} with stack {:04x}:{:08x}, kstack {:04x}:{:08x}", child_tss.cs, child_tss.eip, child_tss.ss, child_tss.esp, child_tss.ss0, child_tss.esp0);
#else
(void)regs;
PANIC("Process::sys$fork() not implemented.");
#endif
{
ScopedSpinLock lock(space().get_lock());

View File

@ -8,6 +8,7 @@
#include <AK/String.h>
#include <AK/StringBuilder.h>
#include <AK/StringView.h>
#include <Kernel/Panic.h>
#include <Kernel/PerformanceManager.h>
#include <Kernel/Process.h>
#include <Kernel/VM/MemoryManager.h>
@ -60,11 +61,16 @@ KResultOr<int> Process::sys$create_thread(void* (*entry)(void*), Userspace<const
if (!is_thread_joinable)
thread->detach();
#if ARCH(I386)
auto& tss = thread->tss();
tss.eip = (FlatPtr)entry;
tss.eflags = 0x0202;
tss.cr3 = space().page_directory().cr3();
tss.esp = user_esp.value();
#else
(void)entry;
PANIC("Process::sys$create_thread() not implemented");
#endif
auto tsr_result = thread->make_thread_specific_region({});
if (tsr_result.is_error())

View File

@ -86,6 +86,7 @@ Thread::Thread(NonnullRefPtr<Process> process, NonnullOwnPtr<Region> kernel_stac
reset_fpu_state();
m_tss.iomapbase = sizeof(TSS32);
#if ARCH(I386)
// Only IF is set when a process boots.
m_tss.eflags = 0x0202;
@ -106,10 +107,14 @@ Thread::Thread(NonnullRefPtr<Process> process, NonnullOwnPtr<Region> kernel_stac
}
m_tss.cr3 = m_process->space().page_directory().cr3();
#else
PANIC("Thread::Thread() not implemented");
#endif
m_kernel_stack_base = m_kernel_stack_region->vaddr().get();
m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u;
#if ARCH(I386)
if (m_process->is_kernel_process()) {
m_tss.esp = m_tss.esp0 = m_kernel_stack_top;
} else {
@ -118,6 +123,9 @@ Thread::Thread(NonnullRefPtr<Process> process, NonnullOwnPtr<Region> kernel_stac
m_tss.ss0 = GDT_SELECTOR_DATA0;
m_tss.esp0 = m_kernel_stack_top;
}
#else
PANIC("Thread::Thread() not implemented");
#endif
// We need to add another reference if we could successfully create
// all the resources needed for this thread. The reason for this is that
@ -801,12 +809,12 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
FlatPtr old_esp = *stack;
FlatPtr ret_eip = state.eip;
FlatPtr ret_eflags = state.eflags;
dbgln_if(SIGNAL_DEBUG, "Setting up user stack to return to EIP {:p}, ESP {:p}", ret_eip, old_esp);
#elif ARCH(X86_64)
FlatPtr* stack = &state.userspace_esp;
#endif
dbgln_if(SIGNAL_DEBUG, "Setting up user stack to return to EIP {:p}, ESP {:p}", ret_eip, old_esp);
#if ARCH(I386)
// Align the stack to 16 bytes.
// Note that we push 56 bytes (4 * 14) on to the stack,
@ -826,8 +834,9 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
push_value_on_user_stack(stack, state.esi);
push_value_on_user_stack(stack, state.edi);
#elif ARCH(X86_64)
#else
// FIXME
PANIC("Thread:dispatch_signal() not implemented");
#endif
// PUSH old_signal_mask
@ -848,7 +857,12 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
setup_stack(regs);
regs.eip = process.signal_trampoline().get();
#if ARCH(I386)
dbgln_if(SIGNAL_DEBUG, "Thread in state '{}' has been primed with signal handler {:04x}:{:08x} to deliver {}", state_string(), m_tss.cs, m_tss.eip, signal);
#else
PANIC("Thread:dispatch_signal() not implemented");
#endif
return DispatchSignalResult::Continue;
}

View File

@ -11,6 +11,7 @@
#include <Kernel/FileSystem/Inode.h>
#include <Kernel/Heap/kmalloc.h>
#include <Kernel/Multiboot.h>
#include <Kernel/Panic.h>
#include <Kernel/Process.h>
#include <Kernel/Sections.h>
#include <Kernel/StdLib.h>
@ -732,8 +733,13 @@ void MemoryManager::enter_space(Space& space)
VERIFY(current_thread != nullptr);
ScopedSpinLock lock(s_mm_lock);
#if ARCH(I386)
current_thread->tss().cr3 = space.page_directory().cr3();
write_cr3(space.page_directory().cr3());
#else
(void)space;
PANIC("MemoryManager::enter_space not implemented");
#endif
}
void MemoryManager::flush_tlb_local(VirtualAddress vaddr, size_t page_count)

View File

@ -5,6 +5,7 @@
*/
#include <Kernel/Arch/x86/InterruptDisabler.h>
#include <Kernel/Panic.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/ProcessPagingScope.h>
@ -20,7 +21,11 @@ ProcessPagingScope::ProcessPagingScope(Process& process)
ProcessPagingScope::~ProcessPagingScope()
{
InterruptDisabler disabler;
#if ARCH(I386)
Thread::current()->tss().cr3 = m_previous_cr3;
#else
PANIC("ProcessPagingScope::~ProcessPagingScope() not implemented");
#endif
write_cr3(m_previous_cr3);
}