2020-01-18 11:38:21 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright notice, this
|
|
|
|
* list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
|
|
* and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
|
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
|
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2019-06-07 21:02:01 +03:00
|
|
|
#include <Kernel/Arch/i386/CPU.h>
|
2019-07-19 14:08:26 +03:00
|
|
|
#include <Kernel/Process.h>
|
2020-01-03 14:48:28 +03:00
|
|
|
#include <Kernel/Random.h>
|
2020-07-04 18:22:23 +03:00
|
|
|
#include <Kernel/API/Syscall.h>
|
2020-03-28 11:47:16 +03:00
|
|
|
#include <Kernel/ThreadTracer.h>
|
2019-11-17 14:11:43 +03:00
|
|
|
#include <Kernel/VM/MemoryManager.h>
|
2018-10-16 12:01:38 +03:00
|
|
|
|
2020-02-16 03:27:42 +03:00
|
|
|
namespace Kernel {
|
|
|
|
|
2020-06-27 22:42:28 +03:00
|
|
|
extern "C" void syscall_handler(TrapFrame*);
|
2019-12-14 18:09:07 +03:00
|
|
|
extern "C" void syscall_asm_entry();
|
2018-10-16 12:01:38 +03:00
|
|
|
|
2018-11-09 14:20:44 +03:00
|
|
|
asm(
|
2019-12-14 18:09:07 +03:00
|
|
|
".globl syscall_asm_entry\n"
|
|
|
|
"syscall_asm_entry:\n"
|
2019-10-04 17:31:34 +03:00
|
|
|
" pushl $0x0\n"
|
2018-10-16 12:01:38 +03:00
|
|
|
" pusha\n"
|
2019-12-15 14:11:39 +03:00
|
|
|
" pushl %ds\n"
|
|
|
|
" pushl %es\n"
|
|
|
|
" pushl %fs\n"
|
|
|
|
" pushl %gs\n"
|
|
|
|
" pushl %ss\n"
|
2020-06-27 22:42:28 +03:00
|
|
|
" mov $" __STRINGIFY(GDT_SELECTOR_DATA0) ", %ax\n"
|
2019-12-15 14:11:39 +03:00
|
|
|
" mov %ax, %ds\n"
|
|
|
|
" mov %ax, %es\n"
|
2020-06-27 22:42:28 +03:00
|
|
|
" mov $" __STRINGIFY(GDT_SELECTOR_PROC) ", %ax\n"
|
|
|
|
" mov %ax, %fs\n"
|
2019-11-10 00:40:35 +03:00
|
|
|
" cld\n"
|
2020-01-25 12:17:45 +03:00
|
|
|
" xor %esi, %esi\n"
|
|
|
|
" xor %edi, %edi\n"
|
2020-06-27 22:42:28 +03:00
|
|
|
" pushl %esp \n" // set TrapFrame::regs
|
|
|
|
" subl $" __STRINGIFY(TRAP_FRAME_SIZE - 4) ", %esp \n"
|
|
|
|
" movl %esp, %ebx \n"
|
|
|
|
" pushl %ebx \n" // push pointer to TrapFrame
|
|
|
|
" call enter_trap_no_irq \n"
|
|
|
|
" movl %ebx, 0(%esp) \n" // push pointer to TrapFrame
|
|
|
|
" call syscall_handler \n"
|
|
|
|
" movl %ebx, 0(%esp) \n" // push pointer to TrapFrame
|
|
|
|
" jmp common_trap_exit \n");
|
2018-10-16 12:01:38 +03:00
|
|
|
|
|
|
|
namespace Syscall {
|
|
|
|
|
2020-03-22 03:12:45 +03:00
|
|
|
static int handle(RegisterState&, u32 function, u32 arg1, u32 arg2, u32 arg3);
|
2019-11-10 00:18:16 +03:00
|
|
|
|
2020-03-22 03:12:45 +03:00
|
|
|
void initialize()
|
|
|
|
{
|
2020-03-14 21:19:38 +03:00
|
|
|
register_user_callable_interrupt_handler(syscall_vector, syscall_asm_entry);
|
2020-03-22 03:12:45 +03:00
|
|
|
klog() << "Syscall: int 0x82 handler installed";
|
|
|
|
}
|
2018-10-16 12:01:38 +03:00
|
|
|
|
2019-11-10 00:18:16 +03:00
|
|
|
#pragma GCC diagnostic ignored "-Wcast-function-type"
|
2020-03-22 03:12:45 +03:00
|
|
|
typedef int (Process::*Handler)(u32, u32, u32);
|
2019-11-10 00:18:16 +03:00
|
|
|
#define __ENUMERATE_SYSCALL(x) reinterpret_cast<Handler>(&Process::sys$##x),
|
2020-03-22 03:12:45 +03:00
|
|
|
static Handler s_syscall_table[] = {
|
2020-08-04 14:00:50 +03:00
|
|
|
ENUMERATE_SYSCALLS(__ENUMERATE_SYSCALL)
|
2020-03-22 03:12:45 +03:00
|
|
|
};
|
2019-11-10 00:18:16 +03:00
|
|
|
#undef __ENUMERATE_SYSCALL
|
2018-12-20 02:39:29 +03:00
|
|
|
|
2020-03-22 03:12:45 +03:00
|
|
|
int handle(RegisterState& regs, u32 function, u32 arg1, u32 arg2, u32 arg3)
|
|
|
|
{
|
|
|
|
ASSERT_INTERRUPTS_ENABLED();
|
2020-06-29 00:34:31 +03:00
|
|
|
auto current_thread = Thread::current();
|
|
|
|
auto& process = current_thread->process();
|
|
|
|
current_thread->did_syscall();
|
2020-03-22 03:12:45 +03:00
|
|
|
|
|
|
|
if (function == SC_exit || function == SC_exit_thread) {
|
|
|
|
// These syscalls need special handling since they never return to the caller.
|
|
|
|
cli();
|
|
|
|
if (function == SC_exit)
|
|
|
|
process.sys$exit((int)arg1);
|
|
|
|
else
|
2020-08-10 01:45:51 +03:00
|
|
|
process.sys$exit_thread(arg1);
|
2020-03-22 03:12:45 +03:00
|
|
|
ASSERT_NOT_REACHED();
|
|
|
|
return 0;
|
2019-07-19 10:58:12 +03:00
|
|
|
}
|
2019-11-10 00:18:16 +03:00
|
|
|
|
2020-03-22 03:12:45 +03:00
|
|
|
if (function == SC_fork)
|
|
|
|
return process.sys$fork(regs);
|
|
|
|
|
|
|
|
if (function == SC_sigreturn)
|
|
|
|
return process.sys$sigreturn(regs);
|
|
|
|
|
|
|
|
if (function >= Function::__Count) {
|
|
|
|
dbg() << process << ": Unknown syscall %u requested (" << arg1 << ", " << arg2 << ", " << arg3 << ")";
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s_syscall_table[function] == nullptr) {
|
|
|
|
dbg() << process << ": Null syscall " << function << " requested: \"" << to_string((Function)function) << "\", you probably need to rebuild this program.";
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
return (process.*(s_syscall_table[function]))(arg1, arg2, arg3);
|
|
|
|
}
|
|
|
|
|
2018-10-16 12:01:38 +03:00
|
|
|
}
|
|
|
|
|
2020-08-13 22:05:08 +03:00
|
|
|
constexpr int RandomByteBufferSize = 256;
|
|
|
|
u8 g_random_byte_buffer[RandomByteBufferSize];
|
|
|
|
int g_random_byte_buffer_offset = RandomByteBufferSize;
|
|
|
|
|
2020-06-27 22:42:28 +03:00
|
|
|
void syscall_handler(TrapFrame* trap)
|
2018-10-16 12:01:38 +03:00
|
|
|
{
|
2020-06-27 22:42:28 +03:00
|
|
|
auto& regs = *trap->regs;
|
2020-06-29 00:34:31 +03:00
|
|
|
auto current_thread = Thread::current();
|
2020-01-19 19:16:38 +03:00
|
|
|
|
2020-06-29 00:34:31 +03:00
|
|
|
if (current_thread->tracer() && current_thread->tracer()->is_tracing_syscalls()) {
|
|
|
|
current_thread->tracer()->set_trace_syscalls(false);
|
|
|
|
current_thread->tracer_trap(regs);
|
2020-03-28 11:47:16 +03:00
|
|
|
}
|
|
|
|
|
2020-01-05 20:00:15 +03:00
|
|
|
// Make sure SMAP protection is enabled on syscall entry.
|
|
|
|
clac();
|
|
|
|
|
2020-01-02 01:10:25 +03:00
|
|
|
// Apply a random offset in the range 0-255 to the stack pointer,
|
|
|
|
// to make kernel stacks a bit less deterministic.
|
2020-08-13 22:05:08 +03:00
|
|
|
// Since this is very hot code, request random data in chunks instead of
|
|
|
|
// one byte at a time. This is a noticeable speedup.
|
|
|
|
if (g_random_byte_buffer_offset == RandomByteBufferSize) {
|
|
|
|
get_fast_random_bytes(g_random_byte_buffer, RandomByteBufferSize);
|
|
|
|
g_random_byte_buffer_offset = 0;
|
|
|
|
}
|
|
|
|
auto* ptr = (char*)__builtin_alloca(g_random_byte_buffer[g_random_byte_buffer_offset++]);
|
2020-01-02 01:10:25 +03:00
|
|
|
asm volatile(""
|
|
|
|
: "=m"(*ptr));
|
|
|
|
|
2020-07-18 01:12:34 +03:00
|
|
|
auto& process = current_thread->process();
|
2020-01-09 20:02:01 +03:00
|
|
|
if (!MM.validate_user_stack(process, VirtualAddress(regs.userspace_esp))) {
|
2020-02-24 18:34:38 +03:00
|
|
|
dbg() << "Invalid stack pointer: " << String::format("%p", regs.userspace_esp);
|
2019-11-17 14:11:43 +03:00
|
|
|
handle_crash(regs, "Bad stack on syscall entry", SIGSTKFLT);
|
|
|
|
ASSERT_NOT_REACHED();
|
|
|
|
}
|
|
|
|
|
2020-07-31 00:52:28 +03:00
|
|
|
auto* calling_region = MM.find_region_from_vaddr(process, VirtualAddress(regs.eip));
|
2019-11-29 18:15:30 +03:00
|
|
|
if (!calling_region) {
|
2020-02-24 18:34:38 +03:00
|
|
|
dbg() << "Syscall from " << String::format("%p", regs.eip) << " which has no region";
|
2019-11-29 18:15:30 +03:00
|
|
|
handle_crash(regs, "Syscall from unknown region", SIGSEGV);
|
|
|
|
ASSERT_NOT_REACHED();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (calling_region->is_writable()) {
|
2020-02-24 18:34:38 +03:00
|
|
|
dbg() << "Syscall from writable memory at " << String::format("%p", regs.eip);
|
2019-11-29 18:15:30 +03:00
|
|
|
handle_crash(regs, "Syscall from writable memory", SIGSEGV);
|
|
|
|
ASSERT_NOT_REACHED();
|
|
|
|
}
|
|
|
|
|
2019-11-10 00:18:16 +03:00
|
|
|
process.big_lock().lock();
|
2019-07-03 22:17:35 +03:00
|
|
|
u32 function = regs.eax;
|
|
|
|
u32 arg1 = regs.edx;
|
|
|
|
u32 arg2 = regs.ecx;
|
|
|
|
u32 arg3 = regs.ebx;
|
2019-11-10 00:18:16 +03:00
|
|
|
regs.eax = (u32)Syscall::handle(regs, function, arg1, arg2, arg3);
|
2020-03-28 11:47:16 +03:00
|
|
|
|
2020-06-29 00:34:31 +03:00
|
|
|
if (current_thread->tracer() && current_thread->tracer()->is_tracing_syscalls()) {
|
|
|
|
current_thread->tracer()->set_trace_syscalls(false);
|
|
|
|
current_thread->tracer_trap(regs);
|
2020-03-28 11:47:16 +03:00
|
|
|
}
|
|
|
|
|
2019-11-10 00:18:16 +03:00
|
|
|
process.big_lock().unlock();
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
|
|
|
|
|
|
|
// Check if we're supposed to return to userspace or just die.
|
2020-06-29 00:34:31 +03:00
|
|
|
current_thread->die_if_needed();
|
2020-01-12 17:04:33 +03:00
|
|
|
|
2020-06-29 00:34:31 +03:00
|
|
|
if (current_thread->has_unmasked_pending_signals())
|
2020-08-03 18:43:19 +03:00
|
|
|
(void)current_thread->block<Thread::SemiPermanentBlocker>(nullptr, Thread::SemiPermanentBlocker::Reason::Signal);
|
2018-10-16 12:01:38 +03:00
|
|
|
}
|
2020-02-16 03:27:42 +03:00
|
|
|
|
|
|
|
}
|