2020-01-18 11:38:21 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright notice, this
|
|
|
|
* list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
|
|
* and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
|
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
|
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2019-11-29 16:55:07 +03:00
|
|
|
#include <AK/Demangle.h>
|
2021-01-01 08:45:16 +03:00
|
|
|
#include <AK/ScopeGuard.h>
|
2019-07-25 22:02:19 +03:00
|
|
|
#include <AK/StringBuilder.h>
|
2020-11-15 21:58:19 +03:00
|
|
|
#include <AK/Time.h>
|
2020-01-05 20:00:15 +03:00
|
|
|
#include <Kernel/Arch/i386/CPU.h>
|
2021-01-25 18:07:10 +03:00
|
|
|
#include <Kernel/Debug.h>
|
2019-06-07 10:36:51 +03:00
|
|
|
#include <Kernel/FileSystem/FileDescription.h>
|
2020-02-16 03:27:42 +03:00
|
|
|
#include <Kernel/KSyms.h>
|
2021-01-11 11:52:18 +03:00
|
|
|
#include <Kernel/PerformanceEventBuffer.h>
|
2019-06-07 12:43:58 +03:00
|
|
|
#include <Kernel/Process.h>
|
|
|
|
#include <Kernel/Scheduler.h>
|
|
|
|
#include <Kernel/Thread.h>
|
2020-03-28 11:47:16 +03:00
|
|
|
#include <Kernel/ThreadTracer.h>
|
2020-04-26 12:32:37 +03:00
|
|
|
#include <Kernel/TimerQueue.h>
|
2019-04-03 16:13:07 +03:00
|
|
|
#include <Kernel/VM/MemoryManager.h>
|
2020-02-16 03:33:41 +03:00
|
|
|
#include <Kernel/VM/PageDirectory.h>
|
2020-03-01 17:38:09 +03:00
|
|
|
#include <Kernel/VM/ProcessPagingScope.h>
|
2019-03-24 00:03:17 +03:00
|
|
|
#include <LibC/signal_numbers.h>
|
|
|
|
|
2020-02-16 03:27:42 +03:00
|
|
|
namespace Kernel {
|
|
|
|
|
2020-08-02 05:04:56 +03:00
|
|
|
Thread::Thread(NonnullRefPtr<Process> process)
|
|
|
|
: m_process(move(process))
|
|
|
|
, m_name(m_process->name())
|
2019-05-18 19:31:36 +03:00
|
|
|
{
|
2021-01-01 08:45:16 +03:00
|
|
|
bool is_first_thread = m_process->m_thread_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed) == 0;
|
|
|
|
ArmedScopeGuard guard([&]() {
|
|
|
|
drop_thread_count(is_first_thread);
|
|
|
|
});
|
|
|
|
if (is_first_thread) {
|
2019-12-22 13:51:24 +03:00
|
|
|
// First thread gets TID == PID
|
2020-08-08 18:32:34 +03:00
|
|
|
m_tid = m_process->pid().value();
|
2019-12-22 13:51:24 +03:00
|
|
|
} else {
|
2020-08-08 18:32:34 +03:00
|
|
|
m_tid = Process::allocate_pid().value();
|
2019-12-22 13:51:24 +03:00
|
|
|
}
|
2021-01-24 01:59:27 +03:00
|
|
|
if constexpr (THREAD_DEBUG)
|
2021-01-13 00:30:52 +03:00
|
|
|
dbgln("Created new thread {}({}:{})", m_process->name(), m_process->pid().value(), m_tid.value());
|
2019-03-24 00:03:17 +03:00
|
|
|
set_default_signal_dispositions();
|
2020-08-30 01:41:30 +03:00
|
|
|
m_fpu_state = (FPUState*)kmalloc_aligned<16>(sizeof(FPUState));
|
2020-02-18 15:44:27 +03:00
|
|
|
reset_fpu_state();
|
2019-03-24 00:03:17 +03:00
|
|
|
memset(&m_tss, 0, sizeof(m_tss));
|
2020-01-01 19:26:25 +03:00
|
|
|
m_tss.iomapbase = sizeof(TSS32);
|
2019-03-24 00:03:17 +03:00
|
|
|
|
|
|
|
// Only IF is set when a process boots.
|
|
|
|
m_tss.eflags = 0x0202;
|
|
|
|
|
2020-09-10 18:46:24 +03:00
|
|
|
if (m_process->is_kernel_process()) {
|
2020-06-27 22:42:28 +03:00
|
|
|
m_tss.cs = GDT_SELECTOR_CODE0;
|
|
|
|
m_tss.ds = GDT_SELECTOR_DATA0;
|
|
|
|
m_tss.es = GDT_SELECTOR_DATA0;
|
|
|
|
m_tss.fs = GDT_SELECTOR_PROC;
|
|
|
|
m_tss.ss = GDT_SELECTOR_DATA0;
|
|
|
|
m_tss.gs = 0;
|
2019-03-24 00:03:17 +03:00
|
|
|
} else {
|
2020-06-27 22:42:28 +03:00
|
|
|
m_tss.cs = GDT_SELECTOR_CODE3 | 3;
|
|
|
|
m_tss.ds = GDT_SELECTOR_DATA3 | 3;
|
|
|
|
m_tss.es = GDT_SELECTOR_DATA3 | 3;
|
|
|
|
m_tss.fs = GDT_SELECTOR_DATA3 | 3;
|
|
|
|
m_tss.ss = GDT_SELECTOR_DATA3 | 3;
|
|
|
|
m_tss.gs = GDT_SELECTOR_TLS | 3;
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2020-08-02 05:04:56 +03:00
|
|
|
m_tss.cr3 = m_process->page_directory().cr3();
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2021-01-12 00:07:01 +03:00
|
|
|
m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::formatted("Kernel Stack (Thread {})", m_tid.value()), Region::Access::Read | Region::Access::Write, false, AllocationStrategy::AllocateNow);
|
2020-09-06 00:52:14 +03:00
|
|
|
if (!m_kernel_stack_region) {
|
|
|
|
// Abort creating this thread, was_created() will return false
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-01-27 14:52:10 +03:00
|
|
|
m_kernel_stack_region->set_stack(true);
|
|
|
|
m_kernel_stack_base = m_kernel_stack_region->vaddr().get();
|
|
|
|
m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u;
|
|
|
|
|
2020-09-10 18:46:24 +03:00
|
|
|
if (m_process->is_kernel_process()) {
|
2020-06-27 22:42:28 +03:00
|
|
|
m_tss.esp = m_tss.esp0 = m_kernel_stack_top;
|
2019-03-24 00:03:17 +03:00
|
|
|
} else {
|
2020-01-27 14:52:10 +03:00
|
|
|
// Ring 3 processes get a separate stack for ring 0.
|
|
|
|
// The ring 3 stack will be assigned by exec().
|
2020-06-27 22:42:28 +03:00
|
|
|
m_tss.ss0 = GDT_SELECTOR_DATA0;
|
2019-09-04 05:31:38 +03:00
|
|
|
m_tss.esp0 = m_kernel_stack_top;
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2020-09-27 17:53:35 +03:00
|
|
|
// We need to add another reference if we could successfully create
|
|
|
|
// all the resources needed for this thread. The reason for this is that
|
|
|
|
// we don't want to delete this thread after dropping the reference,
|
|
|
|
// it may still be running or scheduled to be run.
|
|
|
|
// The finalizer is responsible for dropping this reference once this
|
|
|
|
// thread is ready to be cleaned up.
|
|
|
|
ref();
|
2021-01-01 08:45:16 +03:00
|
|
|
guard.disarm();
|
2020-09-27 17:53:35 +03:00
|
|
|
|
2020-08-02 05:04:56 +03:00
|
|
|
if (m_process->pid() != 0)
|
2019-07-19 18:21:13 +03:00
|
|
|
Scheduler::init_thread(*this);
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
Thread::~Thread()
|
|
|
|
{
|
2020-11-12 02:05:00 +03:00
|
|
|
{
|
|
|
|
// We need to explicitly remove ourselves from the thread list
|
|
|
|
// here. We may get pre-empted in the middle of destructing this
|
|
|
|
// thread, which causes problems if the thread list is iterated.
|
|
|
|
// Specifically, if this is the last thread of a process, checking
|
|
|
|
// block conditions would access m_process, which would be in
|
|
|
|
// the middle of being destroyed.
|
|
|
|
ScopedSpinLock lock(g_scheduler_lock);
|
|
|
|
g_scheduler_data->thread_list_for_state(m_state).remove(*this);
|
|
|
|
}
|
2020-11-30 02:05:27 +03:00
|
|
|
}
|
2020-11-12 02:05:00 +03:00
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
void Thread::unblock_from_blocker(Blocker& blocker)
|
|
|
|
{
|
2020-12-09 07:18:45 +03:00
|
|
|
auto do_unblock = [&]() {
|
|
|
|
ScopedSpinLock scheduler_lock(g_scheduler_lock);
|
|
|
|
ScopedSpinLock block_lock(m_block_lock);
|
|
|
|
if (m_blocker != &blocker)
|
|
|
|
return;
|
|
|
|
if (!should_be_stopped() && !is_stopped())
|
|
|
|
unblock();
|
|
|
|
};
|
|
|
|
if (Processor::current().in_irq()) {
|
|
|
|
Processor::current().deferred_call_queue([do_unblock = move(do_unblock), self = make_weak_ptr()]() {
|
|
|
|
if (auto this_thread = self.strong_ref())
|
|
|
|
do_unblock();
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
do_unblock();
|
|
|
|
}
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
void Thread::unblock(u8 signal)
|
2019-03-24 00:03:17 +03:00
|
|
|
{
|
2020-12-09 07:18:45 +03:00
|
|
|
ASSERT(!Processor::current().in_irq());
|
2020-10-26 05:22:59 +03:00
|
|
|
ASSERT(g_scheduler_lock.own_lock());
|
2020-12-08 07:29:41 +03:00
|
|
|
ASSERT(m_block_lock.own_lock());
|
2020-11-30 02:05:27 +03:00
|
|
|
if (m_state != Thread::Blocked)
|
|
|
|
return;
|
|
|
|
ASSERT(m_blocker);
|
2020-12-09 07:18:45 +03:00
|
|
|
if (signal != 0) {
|
2021-01-21 02:06:19 +03:00
|
|
|
if (is_handling_page_fault()) {
|
|
|
|
// Don't let signals unblock threads that are blocked inside a page fault handler.
|
|
|
|
// This prevents threads from EINTR'ing the inode read in an inode page fault.
|
|
|
|
// FIXME: There's probably a better way to solve this.
|
|
|
|
return;
|
|
|
|
}
|
2020-12-09 07:18:45 +03:00
|
|
|
if (!m_blocker->can_be_interrupted() && !m_should_die)
|
|
|
|
return;
|
2020-11-30 02:05:27 +03:00
|
|
|
m_blocker->set_interrupted_by_signal(signal);
|
2020-12-09 07:18:45 +03:00
|
|
|
}
|
2020-04-06 15:38:33 +03:00
|
|
|
m_blocker = nullptr;
|
2020-06-29 00:34:31 +03:00
|
|
|
if (Thread::current() == this) {
|
2020-08-10 23:05:24 +03:00
|
|
|
set_state(Thread::Running);
|
2019-03-24 00:03:17 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
ASSERT(m_state != Thread::Runnable && m_state != Thread::Running);
|
2020-08-10 23:05:24 +03:00
|
|
|
set_state(Thread::Runnable);
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
|
|
|
void Thread::set_should_die()
|
|
|
|
{
|
2019-12-22 13:35:02 +03:00
|
|
|
if (m_should_die) {
|
2021-01-13 00:30:52 +03:00
|
|
|
dbgln("{} Should already die", *this);
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
|
|
|
return;
|
2019-12-22 13:35:02 +03:00
|
|
|
}
|
2020-07-05 23:32:07 +03:00
|
|
|
ScopedCritical critical;
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
|
|
|
|
|
|
|
// Remember that we should die instead of returning to
|
|
|
|
// the userspace.
|
2020-12-08 07:29:41 +03:00
|
|
|
ScopedSpinLock lock(g_scheduler_lock);
|
|
|
|
m_should_die = true;
|
|
|
|
|
|
|
|
// NOTE: Even the current thread can technically be in "Stopped"
|
|
|
|
// state! This is the case when another thread sent a SIGSTOP to
|
|
|
|
// it while it was running and it calls e.g. exit() before
|
|
|
|
// the scheduler gets involved again.
|
|
|
|
if (is_stopped()) {
|
|
|
|
// If we were stopped, we need to briefly resume so that
|
|
|
|
// the kernel stacks can clean up. We won't ever return back
|
|
|
|
// to user mode, though
|
2020-12-09 07:18:45 +03:00
|
|
|
ASSERT(!process().is_stopped());
|
2020-12-08 07:29:41 +03:00
|
|
|
resume_from_stopped();
|
2020-08-14 19:24:31 +03:00
|
|
|
}
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
|
|
|
if (is_blocked()) {
|
2020-12-08 07:29:41 +03:00
|
|
|
ScopedSpinLock block_lock(m_block_lock);
|
|
|
|
if (m_blocker) {
|
|
|
|
// We're blocked in the kernel.
|
|
|
|
m_blocker->set_interrupted_by_death();
|
|
|
|
unblock();
|
|
|
|
}
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Thread::die_if_needed()
|
|
|
|
{
|
2020-06-29 00:34:31 +03:00
|
|
|
ASSERT(Thread::current() == this);
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
|
|
|
|
|
|
|
if (!m_should_die)
|
|
|
|
return;
|
|
|
|
|
2020-12-15 02:36:22 +03:00
|
|
|
u32 unlock_count;
|
2020-12-21 02:09:48 +03:00
|
|
|
[[maybe_unused]] auto rc = unlock_process_if_locked(unlock_count);
|
2019-12-22 14:34:38 +03:00
|
|
|
|
2020-07-05 23:32:07 +03:00
|
|
|
ScopedCritical critical;
|
2020-08-10 23:05:24 +03:00
|
|
|
set_should_die();
|
2020-07-05 02:37:36 +03:00
|
|
|
|
2020-07-05 23:32:07 +03:00
|
|
|
// Flag a context switch. Because we're in a critical section,
|
|
|
|
// Scheduler::yield will actually only mark a pending scontext switch
|
|
|
|
// Simply leaving the critical section would not necessarily trigger
|
|
|
|
// a switch.
|
2020-06-27 22:42:28 +03:00
|
|
|
Scheduler::yield();
|
2020-07-05 02:37:36 +03:00
|
|
|
|
2020-07-05 23:32:07 +03:00
|
|
|
// Now leave the critical section so that we can also trigger the
|
|
|
|
// actual context switch
|
|
|
|
u32 prev_flags;
|
|
|
|
Processor::current().clear_critical(prev_flags, false);
|
2021-01-09 02:42:44 +03:00
|
|
|
dbgln("die_if_needed returned from clear_critical!!! in irq: {}", Processor::current().in_irq());
|
2020-07-05 23:32:07 +03:00
|
|
|
// We should never get here, but the scoped scheduler lock
|
|
|
|
// will be released by Scheduler::context_switch again
|
|
|
|
ASSERT_NOT_REACHED();
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
|
|
|
}
|
|
|
|
|
2020-11-17 06:51:34 +03:00
|
|
|
void Thread::exit(void* exit_value)
|
|
|
|
{
|
|
|
|
ASSERT(Thread::current() == this);
|
2020-11-30 02:05:27 +03:00
|
|
|
m_join_condition.thread_did_exit(exit_value);
|
2020-11-17 06:51:34 +03:00
|
|
|
set_should_die();
|
2020-12-15 02:36:22 +03:00
|
|
|
u32 unlock_count;
|
2020-12-21 02:09:48 +03:00
|
|
|
[[maybe_unused]] auto rc = unlock_process_if_locked(unlock_count);
|
2020-11-17 06:51:34 +03:00
|
|
|
die_if_needed();
|
|
|
|
}
|
|
|
|
|
2020-12-08 07:29:41 +03:00
|
|
|
void Thread::yield_while_not_holding_big_lock()
|
|
|
|
{
|
|
|
|
ASSERT(!g_scheduler_lock.own_lock());
|
|
|
|
u32 prev_flags;
|
|
|
|
u32 prev_crit = Processor::current().clear_critical(prev_flags, true);
|
|
|
|
Scheduler::yield();
|
|
|
|
// NOTE: We may be on a different CPU now!
|
|
|
|
Processor::current().restore_critical(prev_crit, prev_flags);
|
|
|
|
}
|
|
|
|
|
2019-11-16 14:18:59 +03:00
|
|
|
void Thread::yield_without_holding_big_lock()
|
2019-03-24 00:03:17 +03:00
|
|
|
{
|
2020-11-30 02:05:27 +03:00
|
|
|
ASSERT(!g_scheduler_lock.own_lock());
|
2020-12-15 02:36:22 +03:00
|
|
|
u32 lock_count_to_restore = 0;
|
|
|
|
auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
|
2020-09-26 06:44:43 +03:00
|
|
|
// NOTE: Even though we call Scheduler::yield here, unless we happen
|
|
|
|
// to be outside of a critical section, the yield will be postponed
|
|
|
|
// until leaving it in relock_process.
|
2019-03-24 00:03:17 +03:00
|
|
|
Scheduler::yield();
|
2020-12-15 02:36:22 +03:00
|
|
|
relock_process(previous_locked, lock_count_to_restore);
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
2019-12-01 13:57:20 +03:00
|
|
|
|
2020-12-15 02:36:22 +03:00
|
|
|
void Thread::donate_without_holding_big_lock(RefPtr<Thread>& thread, const char* reason)
|
2019-12-01 13:57:20 +03:00
|
|
|
{
|
2020-12-15 02:36:22 +03:00
|
|
|
ASSERT(!g_scheduler_lock.own_lock());
|
|
|
|
u32 lock_count_to_restore = 0;
|
|
|
|
auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
|
|
|
|
// NOTE: Even though we call Scheduler::yield here, unless we happen
|
|
|
|
// to be outside of a critical section, the yield will be postponed
|
|
|
|
// until leaving it in relock_process.
|
|
|
|
Scheduler::donate_to(thread, reason);
|
|
|
|
relock_process(previous_locked, lock_count_to_restore);
|
2019-12-01 17:54:47 +03:00
|
|
|
}
|
|
|
|
|
2020-12-15 02:36:22 +03:00
|
|
|
LockMode Thread::unlock_process_if_locked(u32& lock_count_to_restore)
|
2020-12-09 07:18:45 +03:00
|
|
|
{
|
2020-12-15 02:36:22 +03:00
|
|
|
return process().big_lock().force_unlock_if_locked(lock_count_to_restore);
|
2020-12-09 07:18:45 +03:00
|
|
|
}
|
|
|
|
|
2020-12-15 02:36:22 +03:00
|
|
|
void Thread::relock_process(LockMode previous_locked, u32 lock_count_to_restore)
|
2019-12-01 17:54:47 +03:00
|
|
|
{
|
2020-09-26 06:44:43 +03:00
|
|
|
// Clearing the critical section may trigger the context switch
|
|
|
|
// flagged by calling Scheduler::donate_to or Scheduler::yield
|
|
|
|
// above. We have to do it this way because we intentionally
|
|
|
|
// leave the critical section here to be able to switch contexts.
|
|
|
|
u32 prev_flags;
|
|
|
|
u32 prev_crit = Processor::current().clear_critical(prev_flags, true);
|
|
|
|
|
2020-12-15 02:36:22 +03:00
|
|
|
// CONTEXT SWITCH HAPPENS HERE!
|
2020-09-26 06:44:43 +03:00
|
|
|
|
2020-10-03 00:14:37 +03:00
|
|
|
// NOTE: We may be on a different CPU now!
|
2020-09-26 06:44:43 +03:00
|
|
|
Processor::current().restore_critical(prev_crit, prev_flags);
|
2020-12-15 02:36:22 +03:00
|
|
|
|
|
|
|
if (previous_locked != LockMode::Unlocked) {
|
|
|
|
// We've unblocked, relock the process if needed and carry on.
|
|
|
|
RESTORE_LOCK(process().big_lock(), previous_locked, lock_count_to_restore);
|
|
|
|
}
|
2019-12-01 13:57:20 +03:00
|
|
|
}
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2020-12-02 02:53:47 +03:00
|
|
|
auto Thread::sleep(clockid_t clock_id, const timespec& duration, timespec* remaining_time) -> BlockResult
|
2019-03-24 00:03:17 +03:00
|
|
|
{
|
2019-03-24 03:52:10 +03:00
|
|
|
ASSERT(state() == Thread::Running);
|
2021-01-11 02:29:28 +03:00
|
|
|
return Thread::current()->block<Thread::SleepBlocker>({}, Thread::BlockTimeout(false, &duration, nullptr, clock_id), remaining_time);
|
2020-11-15 21:58:19 +03:00
|
|
|
}
|
|
|
|
|
2020-12-02 02:53:47 +03:00
|
|
|
auto Thread::sleep_until(clockid_t clock_id, const timespec& deadline) -> BlockResult
|
2020-11-15 21:58:19 +03:00
|
|
|
{
|
|
|
|
ASSERT(state() == Thread::Running);
|
2021-01-11 02:29:28 +03:00
|
|
|
return Thread::current()->block<Thread::SleepBlocker>({}, Thread::BlockTimeout(true, &deadline, nullptr, clock_id));
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2019-07-19 10:51:48 +03:00
|
|
|
const char* Thread::state_string() const
|
2019-03-24 00:03:17 +03:00
|
|
|
{
|
2019-07-19 10:51:48 +03:00
|
|
|
switch (state()) {
|
2019-06-07 12:43:58 +03:00
|
|
|
case Thread::Invalid:
|
|
|
|
return "Invalid";
|
|
|
|
case Thread::Runnable:
|
|
|
|
return "Runnable";
|
|
|
|
case Thread::Running:
|
|
|
|
return "Running";
|
|
|
|
case Thread::Dying:
|
|
|
|
return "Dying";
|
|
|
|
case Thread::Dead:
|
|
|
|
return "Dead";
|
|
|
|
case Thread::Stopped:
|
|
|
|
return "Stopped";
|
2020-09-27 17:53:35 +03:00
|
|
|
case Thread::Blocked: {
|
2020-12-08 07:29:41 +03:00
|
|
|
ScopedSpinLock block_lock(m_block_lock);
|
2019-09-09 06:58:42 +03:00
|
|
|
ASSERT(m_blocker != nullptr);
|
|
|
|
return m_blocker->state_string();
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
2020-09-27 17:53:35 +03:00
|
|
|
}
|
2020-03-01 22:45:39 +03:00
|
|
|
klog() << "Thread::state_string(): Invalid state: " << state();
|
2019-03-24 00:03:17 +03:00
|
|
|
ASSERT_NOT_REACHED();
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Thread::finalize()
|
|
|
|
{
|
2020-06-29 00:34:31 +03:00
|
|
|
ASSERT(Thread::current() == g_finalizer);
|
2020-07-05 23:32:07 +03:00
|
|
|
ASSERT(Thread::current() != this);
|
2019-08-01 21:01:23 +03:00
|
|
|
|
2021-01-24 01:29:11 +03:00
|
|
|
#if LOCK_DEBUG
|
2020-10-26 05:22:59 +03:00
|
|
|
ASSERT(!m_lock.own_lock());
|
2020-12-01 05:04:36 +03:00
|
|
|
if (lock_count() > 0) {
|
2021-01-18 19:25:44 +03:00
|
|
|
dbgln("Thread {} leaking {} Locks!", *this, lock_count());
|
2020-12-01 05:04:36 +03:00
|
|
|
ScopedSpinLock list_lock(m_holding_locks_lock);
|
|
|
|
for (auto& info : m_holding_locks_list)
|
2021-01-18 19:25:44 +03:00
|
|
|
dbgln(" - {} @ {} locked at {}:{} count: {}", info.lock->name(), info.lock, info.file, info.line, info.count);
|
2020-12-01 05:04:36 +03:00
|
|
|
ASSERT_NOT_REACHED();
|
|
|
|
}
|
|
|
|
#endif
|
2020-10-26 05:22:59 +03:00
|
|
|
|
|
|
|
{
|
|
|
|
ScopedSpinLock lock(g_scheduler_lock);
|
2021-01-24 01:59:27 +03:00
|
|
|
dbgln<THREAD_DEBUG>("Finalizing thread {}", *this);
|
2020-10-26 05:22:59 +03:00
|
|
|
set_state(Thread::State::Dead);
|
2020-11-30 02:05:27 +03:00
|
|
|
m_join_condition.thread_finalizing();
|
2019-11-14 22:58:23 +03:00
|
|
|
}
|
|
|
|
|
2019-08-06 20:43:07 +03:00
|
|
|
if (m_dump_backtrace_on_finalization)
|
2021-01-09 02:42:44 +03:00
|
|
|
dbgln("{}", backtrace_impl());
|
2020-09-27 17:53:35 +03:00
|
|
|
|
|
|
|
kfree_aligned(m_fpu_state);
|
2021-01-01 08:45:16 +03:00
|
|
|
drop_thread_count(false);
|
|
|
|
}
|
2020-09-27 17:53:35 +03:00
|
|
|
|
2021-01-01 08:45:16 +03:00
|
|
|
void Thread::drop_thread_count(bool initializing_first_thread)
|
|
|
|
{
|
2020-09-27 17:53:35 +03:00
|
|
|
auto thread_cnt_before = m_process->m_thread_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
|
2020-11-30 02:05:27 +03:00
|
|
|
|
2020-09-27 17:53:35 +03:00
|
|
|
ASSERT(thread_cnt_before != 0);
|
2021-01-01 08:45:16 +03:00
|
|
|
if (!initializing_first_thread && thread_cnt_before == 1)
|
2020-12-09 07:18:45 +03:00
|
|
|
process().finalize();
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void Thread::finalize_dying_threads()
|
|
|
|
{
|
2020-06-29 00:34:31 +03:00
|
|
|
ASSERT(Thread::current() == g_finalizer);
|
2019-04-20 15:02:19 +03:00
|
|
|
Vector<Thread*, 32> dying_threads;
|
2019-03-24 00:03:17 +03:00
|
|
|
{
|
2020-07-05 23:32:07 +03:00
|
|
|
ScopedSpinLock lock(g_scheduler_lock);
|
2019-06-07 12:43:58 +03:00
|
|
|
for_each_in_state(Thread::State::Dying, [&](Thread& thread) {
|
2020-07-05 23:32:07 +03:00
|
|
|
if (thread.is_finalizable())
|
|
|
|
dying_threads.append(&thread);
|
2019-07-19 13:16:00 +03:00
|
|
|
return IterationDecision::Continue;
|
2019-03-24 00:03:17 +03:00
|
|
|
});
|
|
|
|
}
|
2019-12-22 13:35:02 +03:00
|
|
|
for (auto* thread : dying_threads) {
|
2019-03-24 00:03:17 +03:00
|
|
|
thread->finalize();
|
2020-09-27 17:53:35 +03:00
|
|
|
|
|
|
|
// This thread will never execute again, drop the running reference
|
|
|
|
// NOTE: This may not necessarily drop the last reference if anything
|
|
|
|
// else is still holding onto this thread!
|
|
|
|
thread->unref();
|
2019-12-22 13:35:02 +03:00
|
|
|
}
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2021-01-26 02:37:36 +03:00
|
|
|
bool Thread::tick()
|
2019-03-24 00:03:17 +03:00
|
|
|
{
|
2021-01-26 02:37:36 +03:00
|
|
|
if (previous_mode() == PreviousMode::KernelMode) {
|
2020-08-02 05:04:56 +03:00
|
|
|
++m_process->m_ticks_in_kernel;
|
2020-12-04 08:12:50 +03:00
|
|
|
++m_ticks_in_kernel;
|
|
|
|
} else {
|
|
|
|
++m_process->m_ticks_in_user;
|
|
|
|
++m_ticks_in_user;
|
|
|
|
}
|
2019-03-24 00:03:17 +03:00
|
|
|
return --m_ticks_left;
|
|
|
|
}
|
|
|
|
|
2020-12-08 07:29:41 +03:00
|
|
|
void Thread::check_dispatch_pending_signal()
|
|
|
|
{
|
|
|
|
auto result = DispatchSignalResult::Continue;
|
|
|
|
{
|
|
|
|
ScopedSpinLock scheduler_lock(g_scheduler_lock);
|
|
|
|
if (pending_signals_for_state()) {
|
|
|
|
ScopedSpinLock lock(m_lock);
|
|
|
|
result = dispatch_one_pending_signal();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (result) {
|
|
|
|
case DispatchSignalResult::Yield:
|
|
|
|
yield_while_not_holding_big_lock();
|
|
|
|
break;
|
|
|
|
case DispatchSignalResult::Terminate:
|
|
|
|
process().die();
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-09 05:37:15 +03:00
|
|
|
bool Thread::has_pending_signal(u8 signal) const
|
|
|
|
{
|
|
|
|
ScopedSpinLock lock(g_scheduler_lock);
|
2020-11-30 02:05:27 +03:00
|
|
|
return pending_signals_for_state() & (1 << (signal - 1));
|
2020-09-09 05:37:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
u32 Thread::pending_signals() const
|
|
|
|
{
|
|
|
|
ScopedSpinLock lock(g_scheduler_lock);
|
2020-11-30 02:05:27 +03:00
|
|
|
return pending_signals_for_state();
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 Thread::pending_signals_for_state() const
|
|
|
|
{
|
|
|
|
ASSERT(g_scheduler_lock.own_lock());
|
|
|
|
constexpr u32 stopped_signal_mask = (1 << (SIGCONT - 1)) | (1 << (SIGKILL - 1)) | (1 << (SIGTRAP - 1));
|
2021-01-21 02:06:19 +03:00
|
|
|
if (is_handling_page_fault())
|
|
|
|
return 0;
|
2020-11-30 02:05:27 +03:00
|
|
|
return m_state != Stopped ? m_pending_signals : m_pending_signals & stopped_signal_mask;
|
2020-09-09 05:37:15 +03:00
|
|
|
}
|
|
|
|
|
2020-02-01 12:27:25 +03:00
|
|
|
void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
|
2019-03-24 00:03:17 +03:00
|
|
|
{
|
|
|
|
ASSERT(signal < 32);
|
2020-11-30 02:05:27 +03:00
|
|
|
ScopedSpinLock scheduler_lock(g_scheduler_lock);
|
2019-07-08 19:59:48 +03:00
|
|
|
|
|
|
|
// FIXME: Figure out what to do for masked signals. Should we also ignore them here?
|
|
|
|
if (should_ignore_signal(signal)) {
|
2021-01-24 01:59:27 +03:00
|
|
|
dbgln<SIGNAL_DEBUG>("Signal {} was ignored by {}", signal, process());
|
2019-07-08 19:59:48 +03:00
|
|
|
return;
|
|
|
|
}
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2021-01-24 01:59:27 +03:00
|
|
|
if constexpr (SIGNAL_DEBUG) {
|
2021-01-13 00:30:52 +03:00
|
|
|
if (sender)
|
|
|
|
dbgln("Signal: {} sent {} to {}", *sender, signal, process());
|
|
|
|
else
|
|
|
|
dbgln("Signal: Kernel send {} to {}", signal, process());
|
|
|
|
}
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2019-08-01 12:00:36 +03:00
|
|
|
m_pending_signals |= 1 << (signal - 1);
|
2020-11-30 02:05:27 +03:00
|
|
|
m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
|
|
|
|
|
|
|
|
if (m_state == Stopped) {
|
2020-12-08 07:29:41 +03:00
|
|
|
ScopedSpinLock lock(m_lock);
|
|
|
|
if (pending_signals_for_state()) {
|
2021-01-24 01:59:27 +03:00
|
|
|
dbgln<SIGNAL_DEBUG>("Signal: Resuming stopped {} to deliver signal {}", *this, signal);
|
2020-11-30 02:05:27 +03:00
|
|
|
resume_from_stopped();
|
2020-12-08 07:29:41 +03:00
|
|
|
}
|
2020-11-30 02:05:27 +03:00
|
|
|
} else {
|
2020-12-08 07:29:41 +03:00
|
|
|
ScopedSpinLock block_lock(m_block_lock);
|
2021-01-24 01:59:27 +03:00
|
|
|
dbgln<SIGNAL_DEBUG>("Signal: Unblocking {} to deliver signal {}", *this, signal);
|
2020-11-30 02:05:27 +03:00
|
|
|
unblock(signal);
|
|
|
|
}
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2020-09-09 05:37:15 +03:00
|
|
|
u32 Thread::update_signal_mask(u32 signal_mask)
|
|
|
|
{
|
|
|
|
ScopedSpinLock lock(g_scheduler_lock);
|
|
|
|
auto previous_signal_mask = m_signal_mask;
|
|
|
|
m_signal_mask = signal_mask;
|
2020-11-30 02:05:27 +03:00
|
|
|
m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
|
2020-09-09 05:37:15 +03:00
|
|
|
return previous_signal_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 Thread::signal_mask() const
|
|
|
|
{
|
|
|
|
ScopedSpinLock lock(g_scheduler_lock);
|
|
|
|
return m_signal_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 Thread::signal_mask_block(sigset_t signal_set, bool block)
|
|
|
|
{
|
|
|
|
ScopedSpinLock lock(g_scheduler_lock);
|
|
|
|
auto previous_signal_mask = m_signal_mask;
|
|
|
|
if (block)
|
|
|
|
m_signal_mask &= ~signal_set;
|
|
|
|
else
|
|
|
|
m_signal_mask |= signal_set;
|
2020-11-30 02:05:27 +03:00
|
|
|
m_have_any_unmasked_pending_signals.store(pending_signals_for_state() & ~m_signal_mask, AK::memory_order_release);
|
2020-09-09 05:37:15 +03:00
|
|
|
return previous_signal_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Thread::clear_signals()
|
|
|
|
{
|
|
|
|
ScopedSpinLock lock(g_scheduler_lock);
|
|
|
|
m_signal_mask = 0;
|
|
|
|
m_pending_signals = 0;
|
|
|
|
m_have_any_unmasked_pending_signals.store(false, AK::memory_order_release);
|
|
|
|
}
|
|
|
|
|
2019-10-07 12:22:50 +03:00
|
|
|
// Certain exceptions, such as SIGSEGV and SIGILL, put a
|
|
|
|
// thread into a state where the signal handler must be
|
|
|
|
// invoked immediately, otherwise it will continue to fault.
|
|
|
|
// This function should be used in an exception handler to
|
|
|
|
// ensure that when the thread resumes, it's executing in
|
|
|
|
// the appropriate signal handler.
|
|
|
|
void Thread::send_urgent_signal_to_self(u8 signal)
|
|
|
|
{
|
2020-07-03 14:19:50 +03:00
|
|
|
ASSERT(Thread::current() == this);
|
2020-11-30 02:05:27 +03:00
|
|
|
DispatchSignalResult result;
|
|
|
|
{
|
|
|
|
ScopedSpinLock lock(g_scheduler_lock);
|
|
|
|
result = dispatch_signal(signal);
|
|
|
|
}
|
|
|
|
if (result == DispatchSignalResult::Yield)
|
|
|
|
yield_without_holding_big_lock();
|
2019-10-07 12:22:50 +03:00
|
|
|
}
|
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
DispatchSignalResult Thread::dispatch_one_pending_signal()
|
2019-03-24 00:03:17 +03:00
|
|
|
{
|
2020-09-07 17:31:00 +03:00
|
|
|
ASSERT(m_lock.own_lock());
|
2020-11-30 02:05:27 +03:00
|
|
|
u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask;
|
2020-12-01 18:05:49 +03:00
|
|
|
if (signal_candidates == 0)
|
|
|
|
return DispatchSignalResult::Continue;
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2019-08-01 12:00:36 +03:00
|
|
|
u8 signal = 1;
|
2019-03-24 00:03:17 +03:00
|
|
|
for (; signal < 32; ++signal) {
|
2019-08-01 12:00:36 +03:00
|
|
|
if (signal_candidates & (1 << (signal - 1))) {
|
2019-03-24 00:03:17 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return dispatch_signal(signal);
|
|
|
|
}
|
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
DispatchSignalResult Thread::try_dispatch_one_pending_signal(u8 signal)
|
|
|
|
{
|
|
|
|
ASSERT(signal != 0);
|
|
|
|
ScopedSpinLock scheduler_lock(g_scheduler_lock);
|
|
|
|
ScopedSpinLock lock(m_lock);
|
|
|
|
u32 signal_candidates = pending_signals_for_state() & ~m_signal_mask;
|
|
|
|
if (!(signal_candidates & (1 << (signal - 1))))
|
|
|
|
return DispatchSignalResult::Continue;
|
|
|
|
return dispatch_signal(signal);
|
|
|
|
}
|
|
|
|
|
2019-06-07 18:13:23 +03:00
|
|
|
enum class DefaultSignalAction {
|
2019-03-24 00:03:17 +03:00
|
|
|
Terminate,
|
|
|
|
Ignore,
|
|
|
|
DumpCore,
|
|
|
|
Stop,
|
|
|
|
Continue,
|
|
|
|
};
|
|
|
|
|
Kernel: Mark compilation-unit-only functions as static
This enables a nice warning in case a function becomes dead code. Also, in case
of signal_trampoline_dummy, marking it external (non-static) prevents it from
being 'optimized away', which would lead to surprising and weird linker errors.
I found these places by using -Wmissing-declarations.
The Kernel still shows these issues, which I think are false-positives,
but don't want to touch:
- Kernel/Arch/i386/CPU.cpp:1081:17: void Kernel::enter_thread_context(Kernel::Thread*, Kernel::Thread*)
- Kernel/Arch/i386/CPU.cpp:1170:17: void Kernel::context_first_init(Kernel::Thread*, Kernel::Thread*, Kernel::TrapFrame*)
- Kernel/Arch/i386/CPU.cpp:1304:16: u32 Kernel::do_init_context(Kernel::Thread*, u32)
- Kernel/Arch/i386/CPU.cpp:1347:17: void Kernel::pre_init_finished()
- Kernel/Arch/i386/CPU.cpp:1360:17: void Kernel::post_init_finished()
No idea, not gonna touch it.
- Kernel/init.cpp:104:30: void Kernel::init()
- Kernel/init.cpp:167:30: void Kernel::init_ap(u32, Kernel::Processor*)
- Kernel/init.cpp:184:17: void Kernel::init_finished(u32)
Called by boot.S.
- Kernel/init.cpp:383:16: int Kernel::__cxa_atexit(void (*)(void*), void*, void*)
- Kernel/StdLib.cpp:285:19: void __cxa_pure_virtual()
- Kernel/StdLib.cpp:300:19: void __stack_chk_fail()
- Kernel/StdLib.cpp:305:19: void __stack_chk_fail_local()
Not sure how to tell the compiler that the compiler is already using them.
Also, maybe __cxa_atexit should go into StdLib.cpp?
- Kernel/Modules/TestModule.cpp:31:17: void module_init()
- Kernel/Modules/TestModule.cpp:40:17: void module_fini()
Could maybe go into a new header. This would also provide type-checking for new modules.
2020-08-10 22:12:13 +03:00
|
|
|
static DefaultSignalAction default_signal_action(u8 signal)
|
2019-03-24 00:03:17 +03:00
|
|
|
{
|
|
|
|
ASSERT(signal && signal < NSIG);
|
|
|
|
|
|
|
|
switch (signal) {
|
|
|
|
case SIGHUP:
|
|
|
|
case SIGINT:
|
|
|
|
case SIGKILL:
|
|
|
|
case SIGPIPE:
|
|
|
|
case SIGALRM:
|
|
|
|
case SIGUSR1:
|
|
|
|
case SIGUSR2:
|
|
|
|
case SIGVTALRM:
|
|
|
|
case SIGSTKFLT:
|
|
|
|
case SIGIO:
|
|
|
|
case SIGPROF:
|
|
|
|
case SIGTERM:
|
|
|
|
return DefaultSignalAction::Terminate;
|
|
|
|
case SIGCHLD:
|
|
|
|
case SIGURG:
|
|
|
|
case SIGWINCH:
|
2020-09-08 19:07:25 +03:00
|
|
|
case SIGINFO:
|
2019-03-24 00:03:17 +03:00
|
|
|
return DefaultSignalAction::Ignore;
|
|
|
|
case SIGQUIT:
|
|
|
|
case SIGILL:
|
|
|
|
case SIGTRAP:
|
|
|
|
case SIGABRT:
|
|
|
|
case SIGBUS:
|
|
|
|
case SIGFPE:
|
|
|
|
case SIGSEGV:
|
|
|
|
case SIGXCPU:
|
|
|
|
case SIGXFSZ:
|
|
|
|
case SIGSYS:
|
|
|
|
return DefaultSignalAction::DumpCore;
|
|
|
|
case SIGCONT:
|
|
|
|
return DefaultSignalAction::Continue;
|
|
|
|
case SIGSTOP:
|
|
|
|
case SIGTSTP:
|
|
|
|
case SIGTTIN:
|
|
|
|
case SIGTTOU:
|
|
|
|
return DefaultSignalAction::Stop;
|
|
|
|
}
|
|
|
|
ASSERT_NOT_REACHED();
|
|
|
|
}
|
|
|
|
|
2019-07-08 19:59:48 +03:00
|
|
|
bool Thread::should_ignore_signal(u8 signal) const
|
|
|
|
{
|
|
|
|
ASSERT(signal < 32);
|
|
|
|
auto& action = m_signal_action_data[signal];
|
|
|
|
if (action.handler_or_sigaction.is_null())
|
|
|
|
return default_signal_action(signal) == DefaultSignalAction::Ignore;
|
|
|
|
if (action.handler_or_sigaction.as_ptr() == SIG_IGN)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-10-07 12:22:50 +03:00
|
|
|
bool Thread::has_signal_handler(u8 signal) const
|
|
|
|
{
|
|
|
|
ASSERT(signal < 32);
|
|
|
|
auto& action = m_signal_action_data[signal];
|
|
|
|
return !action.handler_or_sigaction.is_null();
|
|
|
|
}
|
|
|
|
|
2020-09-12 06:11:07 +03:00
|
|
|
static bool push_value_on_user_stack(u32* stack, u32 data)
|
2019-11-04 11:29:47 +03:00
|
|
|
{
|
|
|
|
*stack -= 4;
|
2020-09-12 06:11:07 +03:00
|
|
|
return copy_to_user((u32*)*stack, &data);
|
2019-11-04 11:29:47 +03:00
|
|
|
}
|
|
|
|
|
2020-08-14 19:24:31 +03:00
|
|
|
void Thread::resume_from_stopped()
|
|
|
|
{
|
|
|
|
ASSERT(is_stopped());
|
|
|
|
ASSERT(m_stop_state != State::Invalid);
|
2020-10-26 05:22:59 +03:00
|
|
|
ASSERT(g_scheduler_lock.own_lock());
|
2020-12-09 07:18:45 +03:00
|
|
|
if (m_stop_state == Blocked) {
|
|
|
|
ScopedSpinLock block_lock(m_block_lock);
|
|
|
|
if (m_blocker) {
|
|
|
|
// Hasn't been unblocked yet
|
|
|
|
set_state(Blocked, 0);
|
|
|
|
} else {
|
|
|
|
// Was unblocked while stopped
|
|
|
|
set_state(Runnable);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
set_state(m_stop_state, 0);
|
|
|
|
}
|
2020-08-14 19:24:31 +03:00
|
|
|
}
|
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
DispatchSignalResult Thread::dispatch_signal(u8 signal)
|
2019-03-24 00:03:17 +03:00
|
|
|
{
|
|
|
|
ASSERT_INTERRUPTS_DISABLED();
|
2020-09-07 17:31:00 +03:00
|
|
|
ASSERT(g_scheduler_lock.own_lock());
|
2019-08-01 12:00:36 +03:00
|
|
|
ASSERT(signal > 0 && signal <= 32);
|
2020-09-10 18:46:24 +03:00
|
|
|
ASSERT(process().is_user_process());
|
2020-11-30 02:05:27 +03:00
|
|
|
ASSERT(this == Thread::current());
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2021-01-24 01:29:11 +03:00
|
|
|
#if SIGNAL_DEBUG
|
2020-12-08 07:29:41 +03:00
|
|
|
klog() << "signal: dispatch signal " << signal << " to " << *this << " state: " << state_string();
|
2019-03-24 00:03:17 +03:00
|
|
|
#endif
|
|
|
|
|
2020-09-07 17:31:00 +03:00
|
|
|
if (m_state == Invalid || !is_initialized()) {
|
|
|
|
// Thread has barely been created, we need to wait until it is
|
|
|
|
// at least in Runnable state and is_initialized() returns true,
|
|
|
|
// which indicates that it is fully set up an we actually have
|
|
|
|
// a register state on the stack that we can modify
|
2020-11-30 02:05:27 +03:00
|
|
|
return DispatchSignalResult::Deferred;
|
|
|
|
}
|
|
|
|
|
2021-01-25 23:19:34 +03:00
|
|
|
ASSERT(previous_mode() == PreviousMode::UserMode);
|
|
|
|
|
2019-03-24 00:03:17 +03:00
|
|
|
auto& action = m_signal_action_data[signal];
|
|
|
|
// FIXME: Implement SA_SIGINFO signal handlers.
|
|
|
|
ASSERT(!(action.flags & SA_SIGINFO));
|
|
|
|
|
|
|
|
// Mark this signal as handled.
|
2019-08-01 12:00:36 +03:00
|
|
|
m_pending_signals &= ~(1 << (signal - 1));
|
2020-09-07 17:31:00 +03:00
|
|
|
m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2020-12-09 07:18:45 +03:00
|
|
|
auto& process = this->process();
|
|
|
|
auto tracer = process.tracer();
|
|
|
|
if (signal == SIGSTOP || (tracer && default_signal_action(signal) == DefaultSignalAction::DumpCore)) {
|
2021-01-24 01:59:27 +03:00
|
|
|
dbgln<SIGNAL_DEBUG>("signal: signal {} sopping thread {}", signal, *this);
|
2020-12-09 07:18:45 +03:00
|
|
|
set_state(State::Stopped, signal);
|
2020-11-30 02:05:27 +03:00
|
|
|
return DispatchSignalResult::Yield;
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2020-12-08 07:29:41 +03:00
|
|
|
if (signal == SIGCONT) {
|
2021-01-13 00:30:52 +03:00
|
|
|
dbgln("signal: SIGCONT resuming {}", *this);
|
2020-08-14 19:24:31 +03:00
|
|
|
} else {
|
2020-12-09 07:18:45 +03:00
|
|
|
if (tracer) {
|
2020-03-28 11:47:16 +03:00
|
|
|
// when a thread is traced, it should be stopped whenever it receives a signal
|
|
|
|
// the tracer is notified of this by using waitpid()
|
|
|
|
// only "pending signals" from the tracer are sent to the tracee
|
2020-12-09 07:18:45 +03:00
|
|
|
if (!tracer->has_pending_signal(signal)) {
|
2021-01-13 00:30:52 +03:00
|
|
|
dbgln("signal: {} stopping {} for tracer", signal, *this);
|
2020-12-09 07:18:45 +03:00
|
|
|
set_state(Stopped, signal);
|
2020-11-30 02:05:27 +03:00
|
|
|
return DispatchSignalResult::Yield;
|
2020-03-28 11:47:16 +03:00
|
|
|
}
|
2020-12-09 07:18:45 +03:00
|
|
|
tracer->unset_signal(signal);
|
2020-03-28 11:47:16 +03:00
|
|
|
}
|
2020-03-01 17:14:17 +03:00
|
|
|
}
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2019-06-07 13:56:50 +03:00
|
|
|
auto handler_vaddr = action.handler_or_sigaction;
|
|
|
|
if (handler_vaddr.is_null()) {
|
2019-03-24 00:03:17 +03:00
|
|
|
switch (default_signal_action(signal)) {
|
|
|
|
case DefaultSignalAction::Stop:
|
2020-12-09 07:18:45 +03:00
|
|
|
set_state(Stopped, signal);
|
2020-11-30 02:05:27 +03:00
|
|
|
return DispatchSignalResult::Yield;
|
2019-08-06 20:43:07 +03:00
|
|
|
case DefaultSignalAction::DumpCore:
|
2020-11-06 11:09:51 +03:00
|
|
|
process.set_dump_core(true);
|
2020-12-09 07:18:45 +03:00
|
|
|
process.for_each_thread([](auto& thread) {
|
2019-08-06 20:43:07 +03:00
|
|
|
thread.set_dump_backtrace_on_finalization();
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
2019-07-25 22:02:19 +03:00
|
|
|
[[fallthrough]];
|
2019-03-24 00:03:17 +03:00
|
|
|
case DefaultSignalAction::Terminate:
|
2020-08-02 05:04:56 +03:00
|
|
|
m_process->terminate_due_to_signal(signal);
|
2020-11-30 02:05:27 +03:00
|
|
|
return DispatchSignalResult::Terminate;
|
2019-03-24 00:03:17 +03:00
|
|
|
case DefaultSignalAction::Ignore:
|
2019-07-19 10:34:11 +03:00
|
|
|
ASSERT_NOT_REACHED();
|
2019-03-24 00:03:17 +03:00
|
|
|
case DefaultSignalAction::Continue:
|
2020-11-30 02:05:27 +03:00
|
|
|
return DispatchSignalResult::Continue;
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
|
|
|
ASSERT_NOT_REACHED();
|
|
|
|
}
|
|
|
|
|
2019-06-07 13:56:50 +03:00
|
|
|
if (handler_vaddr.as_ptr() == SIG_IGN) {
|
2021-01-24 01:29:11 +03:00
|
|
|
#if SIGNAL_DEBUG
|
2020-08-02 21:08:22 +03:00
|
|
|
klog() << "signal: " << *this << " ignored signal " << signal;
|
2019-03-24 00:03:17 +03:00
|
|
|
#endif
|
2020-11-30 02:05:27 +03:00
|
|
|
return DispatchSignalResult::Continue;
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2021-01-25 23:19:34 +03:00
|
|
|
ASSERT(previous_mode() == PreviousMode::UserMode);
|
|
|
|
ASSERT(current_trap());
|
|
|
|
|
2019-09-04 16:14:54 +03:00
|
|
|
ProcessPagingScope paging_scope(m_process);
|
|
|
|
|
2019-07-03 22:17:35 +03:00
|
|
|
u32 old_signal_mask = m_signal_mask;
|
|
|
|
u32 new_signal_mask = action.mask;
|
2019-03-24 00:03:17 +03:00
|
|
|
if (action.flags & SA_NODEFER)
|
2019-08-01 12:00:36 +03:00
|
|
|
new_signal_mask &= ~(1 << (signal - 1));
|
2019-03-24 00:03:17 +03:00
|
|
|
else
|
2019-08-01 12:00:36 +03:00
|
|
|
new_signal_mask |= 1 << (signal - 1);
|
2019-03-24 00:03:17 +03:00
|
|
|
|
|
|
|
m_signal_mask |= new_signal_mask;
|
2020-09-07 17:31:00 +03:00
|
|
|
m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2020-09-07 17:31:00 +03:00
|
|
|
auto setup_stack = [&](RegisterState& state) {
|
|
|
|
u32* stack = &state.userspace_esp;
|
2019-11-04 11:29:47 +03:00
|
|
|
u32 old_esp = *stack;
|
|
|
|
u32 ret_eip = state.eip;
|
|
|
|
u32 ret_eflags = state.eflags;
|
|
|
|
|
2021-01-24 01:29:11 +03:00
|
|
|
#if SIGNAL_DEBUG
|
2021-01-11 17:35:55 +03:00
|
|
|
klog() << "signal: setting up user stack to return to eip: " << String::format("%p", (void*)ret_eip) << " esp: " << String::format("%p", (void*)old_esp);
|
2020-07-03 14:19:50 +03:00
|
|
|
#endif
|
|
|
|
|
2019-11-04 11:29:47 +03:00
|
|
|
// Align the stack to 16 bytes.
|
|
|
|
// Note that we push 56 bytes (4 * 14) on to the stack,
|
|
|
|
// so we need to account for this here.
|
|
|
|
u32 stack_alignment = (*stack - 56) % 16;
|
|
|
|
*stack -= stack_alignment;
|
|
|
|
|
|
|
|
push_value_on_user_stack(stack, ret_eflags);
|
|
|
|
|
|
|
|
push_value_on_user_stack(stack, ret_eip);
|
|
|
|
push_value_on_user_stack(stack, state.eax);
|
|
|
|
push_value_on_user_stack(stack, state.ecx);
|
|
|
|
push_value_on_user_stack(stack, state.edx);
|
|
|
|
push_value_on_user_stack(stack, state.ebx);
|
|
|
|
push_value_on_user_stack(stack, old_esp);
|
|
|
|
push_value_on_user_stack(stack, state.ebp);
|
|
|
|
push_value_on_user_stack(stack, state.esi);
|
|
|
|
push_value_on_user_stack(stack, state.edi);
|
|
|
|
|
|
|
|
// PUSH old_signal_mask
|
|
|
|
push_value_on_user_stack(stack, old_signal_mask);
|
|
|
|
|
|
|
|
push_value_on_user_stack(stack, signal);
|
|
|
|
push_value_on_user_stack(stack, handler_vaddr.get());
|
|
|
|
push_value_on_user_stack(stack, 0); //push fake return address
|
|
|
|
|
|
|
|
ASSERT((*stack % 16) == 0);
|
|
|
|
};
|
|
|
|
|
|
|
|
// We now place the thread state on the userspace stack.
|
2020-08-02 21:08:22 +03:00
|
|
|
// Note that we use a RegisterState.
|
2020-02-16 02:15:37 +03:00
|
|
|
// Conversely, when the thread isn't blocking the RegisterState may not be
|
2019-11-04 11:29:47 +03:00
|
|
|
// valid (fork, exec etc) but the tss will, so we use that instead.
|
2020-08-02 21:08:22 +03:00
|
|
|
auto& regs = get_register_dump_from_stack();
|
2020-09-07 17:31:00 +03:00
|
|
|
setup_stack(regs);
|
2020-08-02 21:08:22 +03:00
|
|
|
regs.eip = g_return_to_ring3_from_signal_trampoline.get();
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2021-01-24 01:29:11 +03:00
|
|
|
#if SIGNAL_DEBUG
|
2020-12-25 19:05:05 +03:00
|
|
|
dbgln("signal: Thread in state '{}' has been primed with signal handler {:04x}:{:08x} to deliver {}", state_string(), m_tss.cs, m_tss.eip, signal);
|
2019-03-24 00:03:17 +03:00
|
|
|
#endif
|
2020-11-30 02:05:27 +03:00
|
|
|
return DispatchSignalResult::Continue;
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void Thread::set_default_signal_dispositions()
|
|
|
|
{
|
|
|
|
// FIXME: Set up all the right default actions. See signal(7).
|
|
|
|
memset(&m_signal_action_data, 0, sizeof(m_signal_action_data));
|
2020-01-20 15:06:41 +03:00
|
|
|
m_signal_action_data[SIGCHLD].handler_or_sigaction = VirtualAddress(SIG_IGN);
|
|
|
|
m_signal_action_data[SIGWINCH].handler_or_sigaction = VirtualAddress(SIG_IGN);
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2020-09-12 06:11:07 +03:00
|
|
|
bool Thread::push_value_on_stack(FlatPtr value)
|
2019-03-24 00:03:17 +03:00
|
|
|
{
|
|
|
|
m_tss.esp -= 4;
|
2020-03-08 12:36:51 +03:00
|
|
|
FlatPtr* stack_ptr = (FlatPtr*)m_tss.esp;
|
2020-09-12 06:11:07 +03:00
|
|
|
return copy_to_user(stack_ptr, &value);
|
2019-03-24 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2020-02-16 02:15:37 +03:00
|
|
|
RegisterState& Thread::get_register_dump_from_stack()
|
2019-11-02 12:11:41 +03:00
|
|
|
{
|
2021-01-25 23:19:34 +03:00
|
|
|
auto* trap = current_trap();
|
|
|
|
|
|
|
|
// We should *always* have a trap. If we don't we're probably a kernel
|
|
|
|
// thread that hasn't been pre-empted. If we want to support this, we
|
|
|
|
// need to capture the registers probably into m_tss and return it
|
|
|
|
ASSERT(trap);
|
|
|
|
|
|
|
|
while (trap) {
|
|
|
|
if (!trap->next_trap)
|
|
|
|
break;
|
|
|
|
trap = trap->next_trap;
|
|
|
|
}
|
|
|
|
return *trap->regs;
|
2019-11-02 12:11:41 +03:00
|
|
|
}
|
|
|
|
|
2020-09-27 17:53:35 +03:00
|
|
|
RefPtr<Thread> Thread::clone(Process& process)
|
2019-03-24 00:03:17 +03:00
|
|
|
{
|
2020-09-27 17:53:35 +03:00
|
|
|
auto clone = adopt(*new Thread(process));
|
2020-09-06 00:52:14 +03:00
|
|
|
if (!clone->was_created()) {
|
|
|
|
// We failed to clone this thread
|
|
|
|
return {};
|
|
|
|
}
|
2019-03-24 00:03:17 +03:00
|
|
|
memcpy(clone->m_signal_action_data, m_signal_action_data, sizeof(m_signal_action_data));
|
|
|
|
clone->m_signal_mask = m_signal_mask;
|
2019-03-27 17:27:45 +03:00
|
|
|
memcpy(clone->m_fpu_state, m_fpu_state, sizeof(FPUState));
|
2019-09-07 16:50:44 +03:00
|
|
|
clone->m_thread_specific_data = m_thread_specific_data;
|
2019-03-24 00:03:17 +03:00
|
|
|
return clone;
|
|
|
|
}
|
|
|
|
|
2020-12-09 07:18:45 +03:00
|
|
|
void Thread::set_state(State new_state, u8 stop_signal)
|
2019-05-18 21:07:00 +03:00
|
|
|
{
|
2020-12-09 07:18:45 +03:00
|
|
|
State previous_state;
|
2020-10-26 05:22:59 +03:00
|
|
|
ASSERT(g_scheduler_lock.own_lock());
|
2019-12-01 17:54:47 +03:00
|
|
|
if (new_state == m_state)
|
|
|
|
return;
|
|
|
|
|
2020-12-09 07:18:45 +03:00
|
|
|
{
|
|
|
|
ScopedSpinLock thread_lock(m_lock);
|
|
|
|
previous_state = m_state;
|
|
|
|
if (previous_state == Invalid) {
|
|
|
|
// If we were *just* created, we may have already pending signals
|
|
|
|
if (has_unmasked_pending_signals()) {
|
2021-01-24 01:59:27 +03:00
|
|
|
dbgln<THREAD_DEBUG>("Dispatch pending signals to new thread {}", *this);
|
2020-12-09 07:18:45 +03:00
|
|
|
dispatch_one_pending_signal();
|
|
|
|
}
|
2020-09-07 17:31:00 +03:00
|
|
|
}
|
|
|
|
|
2020-12-09 07:18:45 +03:00
|
|
|
m_state = new_state;
|
2021-01-24 01:59:27 +03:00
|
|
|
dbgln<THREAD_DEBUG>("Set thread {} state to {}", *this, state_string());
|
2020-12-09 07:18:45 +03:00
|
|
|
}
|
2020-07-05 23:32:07 +03:00
|
|
|
|
2020-08-02 05:04:56 +03:00
|
|
|
if (m_process->pid() != 0) {
|
2020-08-03 01:59:01 +03:00
|
|
|
update_state_for_thread(previous_state);
|
|
|
|
ASSERT(g_scheduler_data->has_thread(*this));
|
2019-07-19 14:04:42 +03:00
|
|
|
}
|
2019-12-01 21:17:17 +03:00
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
if (previous_state == Stopped) {
|
|
|
|
m_stop_state = State::Invalid;
|
2020-12-09 07:18:45 +03:00
|
|
|
auto& process = this->process();
|
|
|
|
if (process.set_stopped(false) == true) {
|
|
|
|
process.for_each_thread([&](auto& thread) {
|
|
|
|
if (&thread == this || !thread.is_stopped())
|
|
|
|
return IterationDecision::Continue;
|
2021-01-24 01:59:27 +03:00
|
|
|
dbgln<THREAD_DEBUG>("Resuming peer thread {}", thread);
|
2020-12-09 07:18:45 +03:00
|
|
|
thread.resume_from_stopped();
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
process.unblock_waiters(Thread::WaitBlocker::UnblockFlags::Continued);
|
|
|
|
}
|
2020-11-30 02:05:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (m_state == Stopped) {
|
|
|
|
// We don't want to restore to Running state, only Runnable!
|
2020-12-09 07:18:45 +03:00
|
|
|
m_stop_state = previous_state != Running ? previous_state : Runnable;
|
|
|
|
auto& process = this->process();
|
|
|
|
if (process.set_stopped(true) == false) {
|
|
|
|
process.for_each_thread([&](auto& thread) {
|
|
|
|
if (&thread == this || thread.is_stopped())
|
|
|
|
return IterationDecision::Continue;
|
2021-01-24 01:59:27 +03:00
|
|
|
dbgln<THREAD_DEBUG>("Stopping peer thread {}", thread);
|
2020-12-09 07:18:45 +03:00
|
|
|
thread.set_state(Stopped, stop_signal);
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
process.unblock_waiters(Thread::WaitBlocker::UnblockFlags::Stopped, stop_signal);
|
|
|
|
}
|
2020-11-30 02:05:27 +03:00
|
|
|
} else if (m_state == Dying) {
|
2020-12-08 07:29:41 +03:00
|
|
|
ASSERT(previous_state != Blocked);
|
2020-08-06 04:13:28 +03:00
|
|
|
if (this != Thread::current() && is_finalizable()) {
|
|
|
|
// Some other thread set this thread to Dying, notify the
|
|
|
|
// finalizer right away as it can be cleaned up now
|
|
|
|
Scheduler::notify_finalizer();
|
|
|
|
}
|
2020-07-05 23:32:07 +03:00
|
|
|
}
|
2019-04-17 13:41:51 +03:00
|
|
|
}
|
2019-07-25 22:02:19 +03:00
|
|
|
|
2020-08-03 01:59:01 +03:00
|
|
|
void Thread::update_state_for_thread(Thread::State previous_state)
|
|
|
|
{
|
|
|
|
ASSERT_INTERRUPTS_DISABLED();
|
|
|
|
ASSERT(g_scheduler_data);
|
|
|
|
ASSERT(g_scheduler_lock.own_lock());
|
|
|
|
auto& previous_list = g_scheduler_data->thread_list_for_state(previous_state);
|
|
|
|
auto& list = g_scheduler_data->thread_list_for_state(state());
|
|
|
|
|
|
|
|
if (&previous_list != &list) {
|
|
|
|
previous_list.remove(*this);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (list.contains(*this))
|
|
|
|
return;
|
|
|
|
|
|
|
|
list.append(*this);
|
|
|
|
}
|
|
|
|
|
2020-08-02 05:04:56 +03:00
|
|
|
String Thread::backtrace()
|
2019-08-06 20:43:07 +03:00
|
|
|
{
|
|
|
|
return backtrace_impl();
|
|
|
|
}
|
|
|
|
|
2020-01-19 12:10:46 +03:00
|
|
|
struct RecognizedSymbol {
|
|
|
|
u32 address;
|
2020-04-08 14:30:50 +03:00
|
|
|
const KernelSymbol* symbol { nullptr };
|
2020-01-19 12:10:46 +03:00
|
|
|
};
|
|
|
|
|
2020-12-25 02:59:15 +03:00
|
|
|
static bool symbolicate(const RecognizedSymbol& symbol, const Process& process, StringBuilder& builder)
|
2020-01-19 12:10:46 +03:00
|
|
|
{
|
|
|
|
if (!symbol.address)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool mask_kernel_addresses = !process.is_superuser();
|
2020-04-08 14:30:50 +03:00
|
|
|
if (!symbol.symbol) {
|
2020-01-19 12:10:46 +03:00
|
|
|
if (!is_user_address(VirtualAddress(symbol.address))) {
|
|
|
|
builder.append("0xdeadc0de\n");
|
|
|
|
} else {
|
2020-12-25 02:59:15 +03:00
|
|
|
builder.appendff("{:p}\n", symbol.address);
|
2020-01-19 12:10:46 +03:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2020-04-08 14:30:50 +03:00
|
|
|
unsigned offset = symbol.address - symbol.symbol->address;
|
|
|
|
if (symbol.symbol->address == g_highest_kernel_symbol_address && offset > 4096) {
|
2021-01-11 16:30:22 +03:00
|
|
|
builder.appendf("%p\n", (void*)(mask_kernel_addresses ? 0xdeadc0de : symbol.address));
|
2020-01-19 12:10:46 +03:00
|
|
|
} else {
|
2021-01-11 16:30:22 +03:00
|
|
|
builder.appendf("%p %s +%u\n", (void*)(mask_kernel_addresses ? 0xdeadc0de : symbol.address), demangle(symbol.symbol->name).characters(), offset);
|
2020-01-19 12:10:46 +03:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-07-03 21:12:34 +03:00
|
|
|
String Thread::backtrace_impl()
|
2019-07-25 22:02:19 +03:00
|
|
|
{
|
2020-01-19 12:10:46 +03:00
|
|
|
Vector<RecognizedSymbol, 128> recognized_symbols;
|
|
|
|
|
2019-07-25 22:02:19 +03:00
|
|
|
auto& process = const_cast<Process&>(this->process());
|
2020-12-08 07:29:41 +03:00
|
|
|
auto stack_trace = Processor::capture_stack_trace(*this);
|
2020-12-09 07:18:45 +03:00
|
|
|
ASSERT(!g_scheduler_lock.own_lock());
|
2019-07-25 22:02:19 +03:00
|
|
|
ProcessPagingScope paging_scope(process);
|
2020-12-08 07:29:41 +03:00
|
|
|
for (auto& frame : stack_trace) {
|
|
|
|
if (is_user_range(VirtualAddress(frame), sizeof(FlatPtr) * 2)) {
|
|
|
|
recognized_symbols.append({ frame, symbolicate_kernel_address(frame) });
|
|
|
|
} else {
|
|
|
|
recognized_symbols.append({ frame, symbolicate_kernel_address(frame) });
|
2020-01-19 12:10:46 +03:00
|
|
|
}
|
2019-07-25 22:02:19 +03:00
|
|
|
}
|
|
|
|
|
2020-01-19 12:10:46 +03:00
|
|
|
StringBuilder builder;
|
2019-07-25 22:02:19 +03:00
|
|
|
for (auto& symbol : recognized_symbols) {
|
2020-12-25 02:59:15 +03:00
|
|
|
if (!symbolicate(symbol, process, builder))
|
2019-07-25 22:02:19 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return builder.to_string();
|
|
|
|
}
|
2019-09-07 16:50:44 +03:00
|
|
|
|
2020-04-11 21:39:27 +03:00
|
|
|
Vector<FlatPtr> Thread::raw_backtrace(FlatPtr ebp, FlatPtr eip) const
|
2019-12-11 22:36:56 +03:00
|
|
|
{
|
2020-02-29 23:36:00 +03:00
|
|
|
InterruptDisabler disabler;
|
2019-12-11 22:36:56 +03:00
|
|
|
auto& process = const_cast<Process&>(this->process());
|
|
|
|
ProcessPagingScope paging_scope(process);
|
2021-01-11 11:52:18 +03:00
|
|
|
Vector<FlatPtr, PerformanceEvent::max_stack_frame_count> backtrace;
|
2020-04-11 21:39:27 +03:00
|
|
|
backtrace.append(eip);
|
2020-09-12 06:11:07 +03:00
|
|
|
FlatPtr stack_ptr_copy;
|
|
|
|
FlatPtr stack_ptr = (FlatPtr)ebp;
|
2020-09-13 23:28:04 +03:00
|
|
|
while (stack_ptr) {
|
2020-09-12 06:11:07 +03:00
|
|
|
void* fault_at;
|
|
|
|
if (!safe_memcpy(&stack_ptr_copy, (void*)stack_ptr, sizeof(FlatPtr), fault_at))
|
|
|
|
break;
|
|
|
|
FlatPtr retaddr;
|
|
|
|
if (!safe_memcpy(&retaddr, (void*)(stack_ptr + sizeof(FlatPtr)), sizeof(FlatPtr), fault_at))
|
|
|
|
break;
|
2019-12-11 22:36:56 +03:00
|
|
|
backtrace.append(retaddr);
|
2021-01-11 11:52:18 +03:00
|
|
|
if (backtrace.size() == PerformanceEvent::max_stack_frame_count)
|
2020-01-19 15:53:22 +03:00
|
|
|
break;
|
2020-09-12 06:11:07 +03:00
|
|
|
stack_ptr = stack_ptr_copy;
|
2019-12-11 22:36:56 +03:00
|
|
|
}
|
|
|
|
return backtrace;
|
|
|
|
}
|
|
|
|
|
2020-12-25 18:45:35 +03:00
|
|
|
size_t Thread::thread_specific_region_alignment() const
|
|
|
|
{
|
|
|
|
return max(process().m_master_tls_alignment, alignof(ThreadSpecificData));
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t Thread::thread_specific_region_size() const
|
|
|
|
{
|
|
|
|
return align_up_to(process().m_master_tls_size, thread_specific_region_alignment()) + sizeof(ThreadSpecificData);
|
|
|
|
}
|
|
|
|
|
2020-09-16 20:47:47 +03:00
|
|
|
KResult Thread::make_thread_specific_region(Badge<Process>)
|
2019-09-07 16:50:44 +03:00
|
|
|
{
|
2020-10-10 12:13:21 +03:00
|
|
|
// The process may not require a TLS region
|
|
|
|
if (!process().m_master_tls_region)
|
|
|
|
return KSuccess;
|
|
|
|
|
2021-01-26 16:13:57 +03:00
|
|
|
auto range = process().allocate_range({}, thread_specific_region_size());
|
|
|
|
if (!range.is_valid())
|
|
|
|
return ENOMEM;
|
|
|
|
|
|
|
|
auto region_or_error = process().allocate_region(range, "Thread-specific", PROT_READ | PROT_WRITE);
|
2021-01-15 19:27:52 +03:00
|
|
|
if (region_or_error.is_error())
|
|
|
|
return region_or_error.error();
|
2020-12-25 18:45:35 +03:00
|
|
|
|
2020-01-05 20:00:15 +03:00
|
|
|
SmapDisabler disabler;
|
2021-01-15 19:27:52 +03:00
|
|
|
auto* thread_specific_data = (ThreadSpecificData*)region_or_error.value()->vaddr().offset(align_up_to(process().m_master_tls_size, thread_specific_region_alignment())).as_ptr();
|
2019-09-07 16:50:44 +03:00
|
|
|
auto* thread_local_storage = (u8*)((u8*)thread_specific_data) - align_up_to(process().m_master_tls_size, process().m_master_tls_alignment);
|
2020-01-20 15:06:41 +03:00
|
|
|
m_thread_specific_data = VirtualAddress(thread_specific_data);
|
2019-09-07 16:50:44 +03:00
|
|
|
thread_specific_data->self = thread_specific_data;
|
2019-09-07 18:06:25 +03:00
|
|
|
if (process().m_master_tls_size)
|
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-30 01:26:13 +03:00
|
|
|
memcpy(thread_local_storage, process().m_master_tls_region.unsafe_ptr()->vaddr().as_ptr(), process().m_master_tls_size);
|
2020-09-16 20:47:47 +03:00
|
|
|
return KSuccess;
|
2019-09-07 16:50:44 +03:00
|
|
|
}
|
2019-10-13 15:36:55 +03:00
|
|
|
|
|
|
|
const LogStream& operator<<(const LogStream& stream, const Thread& value)
|
|
|
|
{
|
2020-08-08 18:32:34 +03:00
|
|
|
return stream << value.process().name() << "(" << value.pid().value() << ":" << value.tid().value() << ")";
|
2019-10-13 15:36:55 +03:00
|
|
|
}
|
2019-11-06 18:26:51 +03:00
|
|
|
|
2020-09-27 17:53:35 +03:00
|
|
|
RefPtr<Thread> Thread::from_tid(ThreadID tid)
|
2019-12-30 21:23:13 +03:00
|
|
|
{
|
2020-09-27 17:53:35 +03:00
|
|
|
RefPtr<Thread> found_thread;
|
|
|
|
ScopedSpinLock lock(g_scheduler_lock);
|
2019-12-30 21:23:13 +03:00
|
|
|
Thread::for_each([&](auto& thread) {
|
2020-01-04 20:56:04 +03:00
|
|
|
if (thread.tid() == tid) {
|
2019-12-30 21:23:13 +03:00
|
|
|
found_thread = &thread;
|
2020-01-04 20:56:04 +03:00
|
|
|
return IterationDecision::Break;
|
|
|
|
}
|
2019-12-30 21:23:13 +03:00
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
return found_thread;
|
|
|
|
}
|
2020-02-16 03:27:42 +03:00
|
|
|
|
2020-02-18 15:44:27 +03:00
|
|
|
void Thread::reset_fpu_state()
|
|
|
|
{
|
2020-06-27 22:42:28 +03:00
|
|
|
memcpy(m_fpu_state, &Processor::current().clean_fpu_state(), sizeof(FPUState));
|
2020-02-18 15:44:27 +03:00
|
|
|
}
|
|
|
|
|
2020-12-09 07:18:45 +03:00
|
|
|
bool Thread::should_be_stopped() const
|
2020-04-07 18:23:37 +03:00
|
|
|
{
|
2020-12-09 07:18:45 +03:00
|
|
|
return process().is_stopped();
|
2020-04-07 18:23:37 +03:00
|
|
|
}
|
|
|
|
|
2020-02-16 03:27:42 +03:00
|
|
|
}
|
2021-01-09 02:42:44 +03:00
|
|
|
|
|
|
|
void AK::Formatter<Kernel::Thread>::format(FormatBuilder& builder, const Kernel::Thread& value)
|
|
|
|
{
|
|
|
|
return AK::Formatter<FormatString>::format(
|
|
|
|
builder,
|
|
|
|
"{}({}:{})", value.process().name(), value.pid().value(), value.tid().value());
|
|
|
|
}
|