2020-01-18 11:38:21 +03:00
/*
* Copyright ( c ) 2018 - 2020 , Andreas Kling < kling @ serenityos . org >
* All rights reserved .
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions are met :
*
* 1. Redistributions of source code must retain the above copyright notice , this
* list of conditions and the following disclaimer .
*
* 2. Redistributions in binary form must reproduce the above copyright notice ,
* this list of conditions and the following disclaimer in the documentation
* and / or other materials provided with the distribution .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS "
* AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL
* DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY ,
* OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*/
2019-11-29 16:55:07 +03:00
# include <AK/Demangle.h>
2019-07-25 22:02:19 +03:00
# include <AK/StringBuilder.h>
2020-01-05 20:00:15 +03:00
# include <Kernel/Arch/i386/CPU.h>
2019-06-07 10:36:51 +03:00
# include <Kernel/FileSystem/FileDescription.h>
2020-02-16 03:27:42 +03:00
# include <Kernel/KSyms.h>
2019-06-07 12:43:58 +03:00
# include <Kernel/Process.h>
2020-01-19 15:53:22 +03:00
# include <Kernel/Profiling.h>
2019-06-07 12:43:58 +03:00
# include <Kernel/Scheduler.h>
# include <Kernel/Thread.h>
2020-03-28 11:47:16 +03:00
# include <Kernel/ThreadTracer.h>
2020-04-26 12:32:37 +03:00
# include <Kernel/TimerQueue.h>
2019-04-03 16:13:07 +03:00
# include <Kernel/VM/MemoryManager.h>
2020-02-16 03:33:41 +03:00
# include <Kernel/VM/PageDirectory.h>
2020-03-01 17:38:09 +03:00
# include <Kernel/VM/ProcessPagingScope.h>
2019-03-24 00:03:17 +03:00
# include <LibC/signal_numbers.h>
2020-04-11 21:24:07 +03:00
# include <LibELF/Loader.h>
2019-03-24 00:03:17 +03:00
2019-05-22 14:23:41 +03:00
//#define SIGNAL_DEBUG
2020-02-01 12:27:25 +03:00
//#define THREAD_DEBUG
2019-05-22 14:23:41 +03:00
2020-02-16 03:27:42 +03:00
namespace Kernel {
2019-05-18 19:31:36 +03:00
HashTable < Thread * > & thread_table ( )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
static HashTable < Thread * > * table ;
if ( ! table )
table = new HashTable < Thread * > ;
return * table ;
}
2019-03-24 00:03:17 +03:00
Thread : : Thread ( Process & process )
: m_process ( process )
2019-12-07 22:45:26 +03:00
, m_name ( process . name ( ) )
2019-03-24 00:03:17 +03:00
{
2020-06-29 00:34:31 +03:00
if ( m_process . m_thread_count . fetch_add ( 1 , AK : : MemoryOrder : : memory_order_acq_rel ) = = 0 ) {
2019-12-22 13:51:24 +03:00
// First thread gets TID == PID
m_tid = process . pid ( ) ;
} else {
m_tid = Process : : allocate_pid ( ) ;
}
2020-02-01 12:27:25 +03:00
# ifdef THREAD_DEBUG
2020-01-21 18:14:39 +03:00
dbg ( ) < < " Created new thread " < < process . name ( ) < < " ( " < < process . pid ( ) < < " : " < < m_tid < < " ) " ;
2020-02-01 12:27:25 +03:00
# endif
2019-03-24 00:03:17 +03:00
set_default_signal_dispositions ( ) ;
2019-03-27 17:27:45 +03:00
m_fpu_state = ( FPUState * ) kmalloc_aligned ( sizeof ( FPUState ) , 16 ) ;
2020-02-18 15:44:27 +03:00
reset_fpu_state ( ) ;
2019-03-24 00:03:17 +03:00
memset ( & m_tss , 0 , sizeof ( m_tss ) ) ;
2020-01-01 19:26:25 +03:00
m_tss . iomapbase = sizeof ( TSS32 ) ;
2019-03-24 00:03:17 +03:00
// Only IF is set when a process boots.
m_tss . eflags = 0x0202 ;
if ( m_process . is_ring0 ( ) ) {
2020-06-27 22:42:28 +03:00
m_tss . cs = GDT_SELECTOR_CODE0 ;
m_tss . ds = GDT_SELECTOR_DATA0 ;
m_tss . es = GDT_SELECTOR_DATA0 ;
m_tss . fs = GDT_SELECTOR_PROC ;
m_tss . ss = GDT_SELECTOR_DATA0 ;
m_tss . gs = 0 ;
2019-03-24 00:03:17 +03:00
} else {
2020-06-27 22:42:28 +03:00
m_tss . cs = GDT_SELECTOR_CODE3 | 3 ;
m_tss . ds = GDT_SELECTOR_DATA3 | 3 ;
m_tss . es = GDT_SELECTOR_DATA3 | 3 ;
m_tss . fs = GDT_SELECTOR_DATA3 | 3 ;
m_tss . ss = GDT_SELECTOR_DATA3 | 3 ;
m_tss . gs = GDT_SELECTOR_TLS | 3 ;
2019-03-24 00:03:17 +03:00
}
m_tss . cr3 = m_process . page_directory ( ) . cr3 ( ) ;
2020-01-27 14:52:10 +03:00
m_kernel_stack_region = MM . allocate_kernel_region ( default_kernel_stack_size , String : : format ( " Kernel Stack (Thread %d) " , m_tid ) , Region : : Access : : Read | Region : : Access : : Write , false , true ) ;
m_kernel_stack_region - > set_stack ( true ) ;
m_kernel_stack_base = m_kernel_stack_region - > vaddr ( ) . get ( ) ;
m_kernel_stack_top = m_kernel_stack_region - > vaddr ( ) . offset ( default_kernel_stack_size ) . get ( ) & 0xfffffff8u ;
2019-03-24 00:03:17 +03:00
if ( m_process . is_ring0 ( ) ) {
2020-06-27 22:42:28 +03:00
m_tss . esp = m_tss . esp0 = m_kernel_stack_top ;
2019-03-24 00:03:17 +03:00
} else {
2020-01-27 14:52:10 +03:00
// Ring 3 processes get a separate stack for ring 0.
// The ring 3 stack will be assigned by exec().
2020-06-27 22:42:28 +03:00
m_tss . ss0 = GDT_SELECTOR_DATA0 ;
2019-09-04 05:31:38 +03:00
m_tss . esp0 = m_kernel_stack_top ;
2019-03-24 00:03:17 +03:00
}
2019-03-24 00:17:38 +03:00
if ( m_process . pid ( ) ! = 0 ) {
InterruptDisabler disabler ;
2019-05-18 19:31:36 +03:00
thread_table ( ) . set ( this ) ;
2019-07-19 18:21:13 +03:00
Scheduler : : init_thread ( * this ) ;
2019-03-24 00:17:38 +03:00
}
2019-03-24 00:03:17 +03:00
}
Thread : : ~ Thread ( )
{
2019-03-27 17:27:45 +03:00
kfree_aligned ( m_fpu_state ) ;
2019-03-24 00:03:17 +03:00
{
InterruptDisabler disabler ;
2019-05-18 19:31:36 +03:00
thread_table ( ) . remove ( this ) ;
2019-03-24 00:03:17 +03:00
}
2020-06-29 00:34:31 +03:00
auto thread_cnt_before = m_process . m_thread_count . fetch_sub ( 1 , AK : : MemoryOrder : : memory_order_acq_rel ) ;
ASSERT ( thread_cnt_before ! = 0 ) ;
2019-03-24 00:03:17 +03:00
}
void Thread : : unblock ( )
{
2020-04-06 15:38:33 +03:00
m_blocker = nullptr ;
2020-06-29 00:34:31 +03:00
if ( Thread : : current ( ) = = this ) {
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 15:28:28 +03:00
if ( m_should_die )
set_state ( Thread : : Dying ) ;
else
set_state ( Thread : : Running ) ;
2019-03-24 00:03:17 +03:00
return ;
}
ASSERT ( m_state ! = Thread : : Runnable & & m_state ! = Thread : : Running ) ;
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 15:28:28 +03:00
if ( m_should_die )
set_state ( Thread : : Dying ) ;
else
set_state ( Thread : : Runnable ) ;
2019-03-24 00:03:17 +03:00
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
void Thread : : set_should_die ( )
{
2019-12-22 13:35:02 +03:00
if ( m_should_die ) {
2020-02-01 12:27:25 +03:00
# ifdef THREAD_DEBUG
dbg ( ) < < * this < < " Should already die " ;
# endif
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
return ;
2019-12-22 13:35:02 +03:00
}
2020-07-05 23:32:07 +03:00
ScopedCritical critical ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
// Remember that we should die instead of returning to
// the userspace.
m_should_die = true ;
if ( is_blocked ( ) ) {
ASSERT ( in_kernel ( ) ) ;
ASSERT ( m_blocker ! = nullptr ) ;
2020-01-10 21:15:01 +03:00
// We're blocked in the kernel.
m_blocker - > set_interrupted_by_death ( ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
unblock ( ) ;
} else if ( ! in_kernel ( ) ) {
// We're executing in userspace (and we're clearly
// not the current thread). No need to unwind, so
// set the state to dying right away. This also
// makes sure we won't be scheduled anymore.
set_state ( Thread : : State : : Dying ) ;
}
}
void Thread : : die_if_needed ( )
{
2020-06-29 00:34:31 +03:00
ASSERT ( Thread : : current ( ) = = this ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
if ( ! m_should_die )
return ;
2020-07-05 23:32:07 +03:00
unlock_process_if_locked ( ) ;
2019-12-22 14:34:38 +03:00
2020-07-05 23:32:07 +03:00
ScopedCritical critical ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
set_state ( Thread : : State : : Dying ) ;
2020-07-05 23:32:07 +03:00
// Flag a context switch. Because we're in a critical section,
// Scheduler::yield will actually only mark a pending scontext switch
// Simply leaving the critical section would not necessarily trigger
// a switch.
2020-06-27 22:42:28 +03:00
Scheduler : : yield ( ) ;
2020-07-05 23:32:07 +03:00
// Now leave the critical section so that we can also trigger the
// actual context switch
u32 prev_flags ;
Processor : : current ( ) . clear_critical ( prev_flags , false ) ;
// We should never get here, but the scoped scheduler lock
// will be released by Scheduler::context_switch again
ASSERT_NOT_REACHED ( ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
}
2019-11-16 14:18:59 +03:00
void Thread : : yield_without_holding_big_lock ( )
2019-03-24 00:03:17 +03:00
{
2020-07-05 23:32:07 +03:00
bool did_unlock = unlock_process_if_locked ( ) ;
2019-03-24 00:03:17 +03:00
Scheduler : : yield ( ) ;
2020-07-05 23:32:07 +03:00
relock_process ( did_unlock ) ;
2019-03-24 00:03:17 +03:00
}
2019-12-01 13:57:20 +03:00
2020-07-05 23:32:07 +03:00
bool Thread : : unlock_process_if_locked ( )
2019-12-01 13:57:20 +03:00
{
2020-01-13 00:53:20 +03:00
return process ( ) . big_lock ( ) . force_unlock_if_locked ( ) ;
2019-12-01 17:54:47 +03:00
}
2020-07-05 23:32:07 +03:00
void Thread : : relock_process ( bool did_unlock )
2019-12-01 17:54:47 +03:00
{
2020-07-03 14:19:50 +03:00
if ( did_unlock )
process ( ) . big_lock ( ) . lock ( ) ;
2019-12-01 13:57:20 +03:00
}
2019-03-24 00:03:17 +03:00
2019-07-18 18:26:11 +03:00
u64 Thread : : sleep ( u32 ticks )
2019-03-24 00:03:17 +03:00
{
2019-03-24 03:52:10 +03:00
ASSERT ( state ( ) = = Thread : : Running ) ;
2019-07-18 18:26:11 +03:00
u64 wakeup_time = g_uptime + ticks ;
2020-06-29 00:34:31 +03:00
auto ret = Thread : : current ( ) - > block < Thread : : SleepBlocker > ( wakeup_time ) ;
2019-07-20 12:05:52 +03:00
if ( wakeup_time > g_uptime ) {
2020-01-10 21:15:01 +03:00
ASSERT ( ret ! = Thread : : BlockResult : : WokeNormally ) ;
2019-07-20 12:05:52 +03:00
}
2019-07-18 18:26:11 +03:00
return wakeup_time ;
2019-03-24 00:03:17 +03:00
}
2019-11-02 21:34:06 +03:00
u64 Thread : : sleep_until ( u64 wakeup_time )
{
ASSERT ( state ( ) = = Thread : : Running ) ;
2020-06-29 00:34:31 +03:00
auto ret = Thread : : current ( ) - > block < Thread : : SleepBlocker > ( wakeup_time ) ;
2019-11-02 21:34:06 +03:00
if ( wakeup_time > g_uptime )
2020-01-10 21:15:01 +03:00
ASSERT ( ret ! = Thread : : BlockResult : : WokeNormally ) ;
2019-11-02 21:34:06 +03:00
return wakeup_time ;
}
2019-07-19 10:51:48 +03:00
const char * Thread : : state_string ( ) const
2019-03-24 00:03:17 +03:00
{
2019-07-19 10:51:48 +03:00
switch ( state ( ) ) {
2019-06-07 12:43:58 +03:00
case Thread : : Invalid :
return " Invalid " ;
case Thread : : Runnable :
return " Runnable " ;
case Thread : : Running :
return " Running " ;
case Thread : : Dying :
return " Dying " ;
case Thread : : Dead :
return " Dead " ;
case Thread : : Stopped :
return " Stopped " ;
case Thread : : Skip1SchedulerPass :
return " Skip1 " ;
case Thread : : Skip0SchedulerPasses :
return " Skip0 " ;
2019-12-01 17:54:47 +03:00
case Thread : : Queued :
return " Queued " ;
2019-07-19 10:37:34 +03:00
case Thread : : Blocked :
2019-09-09 06:58:42 +03:00
ASSERT ( m_blocker ! = nullptr ) ;
return m_blocker - > state_string ( ) ;
2019-03-24 00:03:17 +03:00
}
2020-03-01 22:45:39 +03:00
klog ( ) < < " Thread::state_string(): Invalid state: " < < state ( ) ;
2019-03-24 00:03:17 +03:00
ASSERT_NOT_REACHED ( ) ;
return nullptr ;
}
void Thread : : finalize ( )
{
2020-06-29 00:34:31 +03:00
ASSERT ( Thread : : current ( ) = = g_finalizer ) ;
2020-07-05 23:32:07 +03:00
ASSERT ( Thread : : current ( ) ! = this ) ;
2019-08-01 21:01:23 +03:00
2020-02-01 12:27:25 +03:00
# ifdef THREAD_DEBUG
2020-01-21 18:14:39 +03:00
dbg ( ) < < " Finalizing thread " < < * this ;
2020-02-01 12:27:25 +03:00
# endif
2019-03-24 00:03:17 +03:00
set_state ( Thread : : State : : Dead ) ;
2019-11-14 22:58:23 +03:00
if ( m_joiner ) {
ASSERT ( m_joiner - > m_joinee = = this ) ;
2019-11-14 23:04:34 +03:00
static_cast < JoinBlocker * > ( m_joiner - > m_blocker ) - > set_joinee_exit_value ( m_exit_value ) ;
2020-02-27 09:40:40 +03:00
static_cast < JoinBlocker * > ( m_joiner - > m_blocker ) - > set_interrupted_by_death ( ) ;
2019-11-14 22:58:23 +03:00
m_joiner - > m_joinee = nullptr ;
// NOTE: We clear the joiner pointer here as well, to be tidy.
m_joiner = nullptr ;
}
2019-08-06 20:43:07 +03:00
if ( m_dump_backtrace_on_finalization )
dbg ( ) < < backtrace_impl ( ) ;
2019-03-24 00:03:17 +03:00
}
void Thread : : finalize_dying_threads ( )
{
2020-06-29 00:34:31 +03:00
ASSERT ( Thread : : current ( ) = = g_finalizer ) ;
2019-04-20 15:02:19 +03:00
Vector < Thread * , 32 > dying_threads ;
2019-03-24 00:03:17 +03:00
{
2020-07-05 23:32:07 +03:00
ScopedSpinLock lock ( g_scheduler_lock ) ;
2019-06-07 12:43:58 +03:00
for_each_in_state ( Thread : : State : : Dying , [ & ] ( Thread & thread ) {
2020-07-05 23:32:07 +03:00
if ( thread . is_finalizable ( ) )
dying_threads . append ( & thread ) ;
2019-07-19 13:16:00 +03:00
return IterationDecision : : Continue ;
2019-03-24 00:03:17 +03:00
} ) ;
}
2019-12-22 13:35:02 +03:00
for ( auto * thread : dying_threads ) {
auto & process = thread - > process ( ) ;
2019-03-24 00:03:17 +03:00
thread - > finalize ( ) ;
2019-12-22 13:35:02 +03:00
delete thread ;
2020-06-29 00:34:31 +03:00
if ( process . m_thread_count . load ( AK : : MemoryOrder : : memory_order_consume ) = = 0 )
2019-12-22 13:35:02 +03:00
process . finalize ( ) ;
}
2019-03-24 00:03:17 +03:00
}
bool Thread : : tick ( )
{
+ + m_ticks ;
if ( tss ( ) . cs & 3 )
+ + m_process . m_ticks_in_user ;
else
+ + m_process . m_ticks_in_kernel ;
return - - m_ticks_left ;
}
2020-02-01 12:27:25 +03:00
void Thread : : send_signal ( u8 signal , [[maybe_unused]] Process * sender )
2019-03-24 00:03:17 +03:00
{
ASSERT ( signal < 32 ) ;
2019-07-08 19:59:48 +03:00
InterruptDisabler disabler ;
// FIXME: Figure out what to do for masked signals. Should we also ignore them here?
if ( should_ignore_signal ( signal ) ) {
2020-02-01 12:27:25 +03:00
# ifdef SIGNAL_DEBUG
2020-02-29 14:51:44 +03:00
dbg ( ) < < " Signal " < < signal < < " was ignored by " < < process ( ) ;
2020-02-01 12:27:25 +03:00
# endif
2019-07-08 19:59:48 +03:00
return ;
}
2019-03-24 00:03:17 +03:00
2020-02-01 12:27:25 +03:00
# ifdef SIGNAL_DEBUG
2019-03-24 00:03:17 +03:00
if ( sender )
2020-02-29 14:51:44 +03:00
dbg ( ) < < " Signal: " < < * sender < < " sent " < < signal < < " to " < < process ( ) ;
2019-03-24 00:03:17 +03:00
else
2020-02-29 14:51:44 +03:00
dbg ( ) < < " Signal: Kernel sent " < < signal < < " to " < < process ( ) ;
2020-02-01 12:27:25 +03:00
# endif
2019-03-24 00:03:17 +03:00
2020-07-03 14:19:50 +03:00
ScopedSpinLock lock ( g_scheduler_lock ) ;
2019-08-01 12:00:36 +03:00
m_pending_signals | = 1 < < ( signal - 1 ) ;
2019-03-24 00:03:17 +03:00
}
2019-10-07 12:22:50 +03:00
// Certain exceptions, such as SIGSEGV and SIGILL, put a
// thread into a state where the signal handler must be
// invoked immediately, otherwise it will continue to fault.
// This function should be used in an exception handler to
// ensure that when the thread resumes, it's executing in
// the appropriate signal handler.
void Thread : : send_urgent_signal_to_self ( u8 signal )
{
2020-07-03 14:19:50 +03:00
ASSERT ( Thread : : current ( ) = = this ) ;
ScopedSpinLock lock ( g_scheduler_lock ) ;
if ( dispatch_signal ( signal ) = = ShouldUnblockThread : : No )
Scheduler : : yield ( ) ;
2019-10-07 12:22:50 +03:00
}
2019-03-24 00:03:17 +03:00
ShouldUnblockThread Thread : : dispatch_one_pending_signal ( )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-07-03 22:17:35 +03:00
u32 signal_candidates = m_pending_signals & ~ m_signal_mask ;
2019-03-24 00:03:17 +03:00
ASSERT ( signal_candidates ) ;
2019-08-01 12:00:36 +03:00
u8 signal = 1 ;
2019-03-24 00:03:17 +03:00
for ( ; signal < 32 ; + + signal ) {
2019-08-01 12:00:36 +03:00
if ( signal_candidates & ( 1 < < ( signal - 1 ) ) ) {
2019-03-24 00:03:17 +03:00
break ;
}
}
return dispatch_signal ( signal ) ;
}
2019-06-07 18:13:23 +03:00
enum class DefaultSignalAction {
2019-03-24 00:03:17 +03:00
Terminate ,
Ignore ,
DumpCore ,
Stop ,
Continue ,
} ;
2019-07-03 22:17:35 +03:00
DefaultSignalAction default_signal_action ( u8 signal )
2019-03-24 00:03:17 +03:00
{
ASSERT ( signal & & signal < NSIG ) ;
switch ( signal ) {
case SIGHUP :
case SIGINT :
case SIGKILL :
case SIGPIPE :
case SIGALRM :
case SIGUSR1 :
case SIGUSR2 :
case SIGVTALRM :
case SIGSTKFLT :
case SIGIO :
case SIGPROF :
case SIGTERM :
case SIGPWR :
return DefaultSignalAction : : Terminate ;
case SIGCHLD :
case SIGURG :
case SIGWINCH :
return DefaultSignalAction : : Ignore ;
case SIGQUIT :
case SIGILL :
case SIGTRAP :
case SIGABRT :
case SIGBUS :
case SIGFPE :
case SIGSEGV :
case SIGXCPU :
case SIGXFSZ :
case SIGSYS :
return DefaultSignalAction : : DumpCore ;
case SIGCONT :
return DefaultSignalAction : : Continue ;
case SIGSTOP :
case SIGTSTP :
case SIGTTIN :
case SIGTTOU :
return DefaultSignalAction : : Stop ;
}
ASSERT_NOT_REACHED ( ) ;
}
2019-07-08 19:59:48 +03:00
bool Thread : : should_ignore_signal ( u8 signal ) const
{
ASSERT ( signal < 32 ) ;
auto & action = m_signal_action_data [ signal ] ;
if ( action . handler_or_sigaction . is_null ( ) )
return default_signal_action ( signal ) = = DefaultSignalAction : : Ignore ;
if ( action . handler_or_sigaction . as_ptr ( ) = = SIG_IGN )
return true ;
return false ;
}
2019-10-07 12:22:50 +03:00
bool Thread : : has_signal_handler ( u8 signal ) const
{
ASSERT ( signal < 32 ) ;
auto & action = m_signal_action_data [ signal ] ;
return ! action . handler_or_sigaction . is_null ( ) ;
}
2019-11-04 11:29:47 +03:00
static void push_value_on_user_stack ( u32 * stack , u32 data )
{
* stack - = 4 ;
2020-01-20 15:38:31 +03:00
copy_to_user ( ( u32 * ) * stack , & data ) ;
2019-11-04 11:29:47 +03:00
}
2019-07-03 22:17:35 +03:00
ShouldUnblockThread Thread : : dispatch_signal ( u8 signal )
2019-03-24 00:03:17 +03:00
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2020-07-03 14:19:50 +03:00
ASSERT ( g_scheduler_lock . is_locked ( ) ) ;
2019-08-01 12:00:36 +03:00
ASSERT ( signal > 0 & & signal < = 32 ) ;
2019-09-04 16:14:54 +03:00
ASSERT ( ! process ( ) . is_ring0 ( ) ) ;
2019-03-24 00:03:17 +03:00
# ifdef SIGNAL_DEBUG
2020-03-01 22:45:39 +03:00
klog ( ) < < " dispatch_signal <- " < < signal ;
2019-03-24 00:03:17 +03:00
# endif
auto & action = m_signal_action_data [ signal ] ;
// FIXME: Implement SA_SIGINFO signal handlers.
ASSERT ( ! ( action . flags & SA_SIGINFO ) ) ;
// Mark this signal as handled.
2019-08-01 12:00:36 +03:00
m_pending_signals & = ~ ( 1 < < ( signal - 1 ) ) ;
2019-03-24 00:03:17 +03:00
if ( signal = = SIGSTOP ) {
2020-03-01 17:14:17 +03:00
if ( ! is_stopped ( ) ) {
m_stop_signal = SIGSTOP ;
set_state ( State : : Stopped ) ;
}
2019-03-24 00:03:17 +03:00
return ShouldUnblockThread : : No ;
}
2020-03-01 17:14:17 +03:00
if ( signal = = SIGCONT & & is_stopped ( ) ) {
ASSERT ( m_stop_state ! = State : : Invalid ) ;
set_state ( m_stop_state ) ;
m_stop_state = State : : Invalid ;
2020-03-28 11:47:16 +03:00
// make sure SemiPermanentBlocker is unblocked
if ( m_state ! = Thread : : Runnable & & m_state ! = Thread : : Running
& & m_blocker & & m_blocker - > is_reason_signal ( ) )
unblock ( ) ;
}
else {
auto * thread_tracer = tracer ( ) ;
if ( thread_tracer ! = nullptr ) {
// when a thread is traced, it should be stopped whenever it receives a signal
// the tracer is notified of this by using waitpid()
// only "pending signals" from the tracer are sent to the tracee
if ( ! thread_tracer - > has_pending_signal ( signal ) ) {
m_stop_signal = signal ;
// make sure SemiPermanentBlocker is unblocked
if ( m_blocker & & m_blocker - > is_reason_signal ( ) )
unblock ( ) ;
set_state ( Stopped ) ;
return ShouldUnblockThread : : No ;
}
thread_tracer - > unset_signal ( signal ) ;
}
2020-03-01 17:14:17 +03:00
}
2019-03-24 00:03:17 +03:00
2019-06-07 13:56:50 +03:00
auto handler_vaddr = action . handler_or_sigaction ;
if ( handler_vaddr . is_null ( ) ) {
2019-03-24 00:03:17 +03:00
switch ( default_signal_action ( signal ) ) {
case DefaultSignalAction : : Stop :
2020-01-27 22:47:10 +03:00
m_stop_signal = signal ;
2019-03-24 00:03:17 +03:00
set_state ( Stopped ) ;
return ShouldUnblockThread : : No ;
2019-08-06 20:43:07 +03:00
case DefaultSignalAction : : DumpCore :
process ( ) . for_each_thread ( [ ] ( auto & thread ) {
thread . set_dump_backtrace_on_finalization ( ) ;
return IterationDecision : : Continue ;
} ) ;
2019-07-25 22:02:19 +03:00
[[fallthrough]] ;
2019-03-24 00:03:17 +03:00
case DefaultSignalAction : : Terminate :
m_process . terminate_due_to_signal ( signal ) ;
return ShouldUnblockThread : : No ;
case DefaultSignalAction : : Ignore :
2019-07-19 10:34:11 +03:00
ASSERT_NOT_REACHED ( ) ;
2019-03-24 00:03:17 +03:00
case DefaultSignalAction : : Continue :
return ShouldUnblockThread : : Yes ;
}
ASSERT_NOT_REACHED ( ) ;
}
2019-06-07 13:56:50 +03:00
if ( handler_vaddr . as_ptr ( ) = = SIG_IGN ) {
2019-03-24 00:03:17 +03:00
# ifdef SIGNAL_DEBUG
2020-03-01 22:45:39 +03:00
klog ( ) < < " ignored signal " < < signal ;
2019-03-24 00:03:17 +03:00
# endif
return ShouldUnblockThread : : Yes ;
}
2019-09-04 16:14:54 +03:00
ProcessPagingScope paging_scope ( m_process ) ;
2019-07-03 22:17:35 +03:00
u32 old_signal_mask = m_signal_mask ;
u32 new_signal_mask = action . mask ;
2019-03-24 00:03:17 +03:00
if ( action . flags & SA_NODEFER )
2019-08-01 12:00:36 +03:00
new_signal_mask & = ~ ( 1 < < ( signal - 1 ) ) ;
2019-03-24 00:03:17 +03:00
else
2019-08-01 12:00:36 +03:00
new_signal_mask | = 1 < < ( signal - 1 ) ;
2019-03-24 00:03:17 +03:00
m_signal_mask | = new_signal_mask ;
2020-04-26 12:32:37 +03:00
auto setup_stack = [ & ] < typename ThreadState > ( ThreadState state , u32 * stack ) {
2019-11-04 11:29:47 +03:00
u32 old_esp = * stack ;
u32 ret_eip = state . eip ;
u32 ret_eflags = state . eflags ;
2020-07-03 14:19:50 +03:00
# ifdef SIGNAL_DEBUG
klog ( ) < < " signal: setting up user stack to return to eip: " < < String : : format ( " %p " , ret_eip ) < < " esp: " < < String : : format ( " %p " , old_esp ) ;
# endif
2019-11-04 11:29:47 +03:00
// Align the stack to 16 bytes.
// Note that we push 56 bytes (4 * 14) on to the stack,
// so we need to account for this here.
u32 stack_alignment = ( * stack - 56 ) % 16 ;
* stack - = stack_alignment ;
push_value_on_user_stack ( stack , ret_eflags ) ;
push_value_on_user_stack ( stack , ret_eip ) ;
push_value_on_user_stack ( stack , state . eax ) ;
push_value_on_user_stack ( stack , state . ecx ) ;
push_value_on_user_stack ( stack , state . edx ) ;
push_value_on_user_stack ( stack , state . ebx ) ;
push_value_on_user_stack ( stack , old_esp ) ;
push_value_on_user_stack ( stack , state . ebp ) ;
push_value_on_user_stack ( stack , state . esi ) ;
push_value_on_user_stack ( stack , state . edi ) ;
// PUSH old_signal_mask
push_value_on_user_stack ( stack , old_signal_mask ) ;
push_value_on_user_stack ( stack , signal ) ;
push_value_on_user_stack ( stack , handler_vaddr . get ( ) ) ;
push_value_on_user_stack ( stack , 0 ) ; //push fake return address
ASSERT ( ( * stack % 16 ) = = 0 ) ;
} ;
// We now place the thread state on the userspace stack.
// Note that when we are in the kernel (ie. blocking) we cannot use the
2020-02-16 02:15:37 +03:00
// tss, as that will contain kernel state; instead, we use a RegisterState.
// Conversely, when the thread isn't blocking the RegisterState may not be
2019-11-04 11:29:47 +03:00
// valid (fork, exec etc) but the tss will, so we use that instead.
2019-09-04 16:14:54 +03:00
if ( ! in_kernel ( ) ) {
2019-11-04 11:29:47 +03:00
u32 * stack = & m_tss . esp ;
setup_stack ( m_tss , stack ) ;
2020-06-27 22:42:28 +03:00
m_tss . cs = GDT_SELECTOR_CODE3 | 3 ;
m_tss . ds = GDT_SELECTOR_DATA3 | 3 ;
m_tss . es = GDT_SELECTOR_DATA3 | 3 ;
m_tss . fs = GDT_SELECTOR_DATA3 | 3 ;
m_tss . gs = GDT_SELECTOR_TLS | 3 ;
2019-11-04 11:29:47 +03:00
m_tss . eip = g_return_to_ring3_from_signal_trampoline . get ( ) ;
2019-09-04 16:14:54 +03:00
// FIXME: This state is such a hack. It avoids trouble if 'current' is the process receiving a signal.
set_state ( Skip1SchedulerPass ) ;
2019-11-04 11:29:47 +03:00
} else {
2019-12-15 19:58:53 +03:00
auto & regs = get_register_dump_from_stack ( ) ;
2020-01-09 20:02:01 +03:00
u32 * stack = & regs . userspace_esp ;
2019-11-04 11:29:47 +03:00
setup_stack ( regs , stack ) ;
regs . eip = g_return_to_ring3_from_signal_trampoline . get ( ) ;
2019-09-04 16:14:54 +03:00
}
2019-03-24 00:03:17 +03:00
# ifdef SIGNAL_DEBUG
2020-03-01 22:45:39 +03:00
klog ( ) < < " signal: Okay, { " < < state_string ( ) < < " } has been primed with signal handler " < < String : : format ( " %w " , m_tss . cs ) < < " : " < < String : : format ( " %x " , m_tss . eip ) ;
2019-03-24 00:03:17 +03:00
# endif
return ShouldUnblockThread : : Yes ;
}
void Thread : : set_default_signal_dispositions ( )
{
// FIXME: Set up all the right default actions. See signal(7).
memset ( & m_signal_action_data , 0 , sizeof ( m_signal_action_data ) ) ;
2020-01-20 15:06:41 +03:00
m_signal_action_data [ SIGCHLD ] . handler_or_sigaction = VirtualAddress ( SIG_IGN ) ;
m_signal_action_data [ SIGWINCH ] . handler_or_sigaction = VirtualAddress ( SIG_IGN ) ;
2019-03-24 00:03:17 +03:00
}
2020-03-08 12:36:51 +03:00
void Thread : : push_value_on_stack ( FlatPtr value )
2019-03-24 00:03:17 +03:00
{
m_tss . esp - = 4 ;
2020-03-08 12:36:51 +03:00
FlatPtr * stack_ptr = ( FlatPtr * ) m_tss . esp ;
2020-01-20 15:38:31 +03:00
copy_to_user ( stack_ptr , & value ) ;
2019-03-24 00:03:17 +03:00
}
2020-02-16 02:15:37 +03:00
RegisterState & Thread : : get_register_dump_from_stack ( )
2019-11-02 12:11:41 +03:00
{
// The userspace registers should be stored at the top of the stack
// We have to subtract 2 because the processor decrements the kernel
// stack before pushing the args.
2020-02-16 02:15:37 +03:00
return * ( RegisterState * ) ( kernel_stack_top ( ) - sizeof ( RegisterState ) ) ;
2019-11-02 12:11:41 +03:00
}
2019-12-19 01:03:23 +03:00
u32 Thread : : make_userspace_stack_for_main_thread ( Vector < String > arguments , Vector < String > environment )
2019-03-24 00:03:17 +03:00
{
2019-10-31 15:57:07 +03:00
auto * region = m_process . allocate_region ( VirtualAddress ( ) , default_userspace_stack_size , " Stack (Main thread) " , PROT_READ | PROT_WRITE , false ) ;
2019-03-24 00:03:17 +03:00
ASSERT ( region ) ;
2019-11-17 14:11:43 +03:00
region - > set_stack ( true ) ;
2019-03-24 00:03:17 +03:00
2019-12-19 01:03:23 +03:00
u32 new_esp = region - > vaddr ( ) . offset ( default_userspace_stack_size ) . get ( ) ;
// FIXME: This is weird, we put the argument contents at the base of the stack,
// and the argument pointers at the top? Why?
2019-06-07 13:56:50 +03:00
char * stack_base = ( char * ) region - > vaddr ( ) . get ( ) ;
2019-03-24 00:03:17 +03:00
int argc = arguments . size ( ) ;
char * * argv = ( char * * ) stack_base ;
char * * env = argv + arguments . size ( ) + 1 ;
char * bufptr = stack_base + ( sizeof ( char * ) * ( arguments . size ( ) + 1 ) ) + ( sizeof ( char * ) * ( environment . size ( ) + 1 ) ) ;
2020-01-05 20:00:15 +03:00
SmapDisabler disabler ;
2020-02-25 16:49:47 +03:00
for ( size_t i = 0 ; i < arguments . size ( ) ; + + i ) {
2019-03-24 00:03:17 +03:00
argv [ i ] = bufptr ;
memcpy ( bufptr , arguments [ i ] . characters ( ) , arguments [ i ] . length ( ) ) ;
bufptr + = arguments [ i ] . length ( ) ;
* ( bufptr + + ) = ' \0 ' ;
}
argv [ arguments . size ( ) ] = nullptr ;
2020-02-25 16:49:47 +03:00
for ( size_t i = 0 ; i < environment . size ( ) ; + + i ) {
2019-03-24 00:03:17 +03:00
env [ i ] = bufptr ;
memcpy ( bufptr , environment [ i ] . characters ( ) , environment [ i ] . length ( ) ) ;
bufptr + = environment [ i ] . length ( ) ;
* ( bufptr + + ) = ' \0 ' ;
}
env [ environment . size ( ) ] = nullptr ;
2020-01-01 18:49:08 +03:00
auto push_on_new_stack = [ & new_esp ] ( u32 value ) {
2019-12-19 01:03:23 +03:00
new_esp - = 4 ;
u32 * stack_ptr = ( u32 * ) new_esp ;
* stack_ptr = value ;
} ;
2019-03-24 00:03:17 +03:00
// NOTE: The stack needs to be 16-byte aligned.
2020-03-08 12:36:51 +03:00
push_on_new_stack ( ( FlatPtr ) env ) ;
push_on_new_stack ( ( FlatPtr ) argv ) ;
push_on_new_stack ( ( FlatPtr ) argc ) ;
2019-12-19 01:03:23 +03:00
push_on_new_stack ( 0 ) ;
return new_esp ;
2019-03-24 00:03:17 +03:00
}
Thread * Thread : : clone ( Process & process )
{
auto * clone = new Thread ( process ) ;
memcpy ( clone - > m_signal_action_data , m_signal_action_data , sizeof ( m_signal_action_data ) ) ;
clone - > m_signal_mask = m_signal_mask ;
2019-03-27 17:27:45 +03:00
memcpy ( clone - > m_fpu_state , m_fpu_state , sizeof ( FPUState ) ) ;
2019-09-07 16:50:44 +03:00
clone - > m_thread_specific_data = m_thread_specific_data ;
2020-06-27 22:42:28 +03:00
clone - > m_thread_specific_region_size = m_thread_specific_region_size ;
2019-03-24 00:03:17 +03:00
return clone ;
}
2019-03-24 01:50:34 +03:00
Vector < Thread * > Thread : : all_threads ( )
{
Vector < Thread * > threads ;
InterruptDisabler disabler ;
2019-05-18 19:31:36 +03:00
threads . ensure_capacity ( thread_table ( ) . size ( ) ) ;
for ( auto * thread : thread_table ( ) )
threads . unchecked_append ( thread ) ;
2019-03-24 01:50:34 +03:00
return threads ;
}
2019-04-17 13:41:51 +03:00
bool Thread : : is_thread ( void * ptr )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-05-18 19:31:36 +03:00
return thread_table ( ) . contains ( ( Thread * ) ptr ) ;
}
2019-05-18 21:07:00 +03:00
void Thread : : set_state ( State new_state )
{
2020-07-03 14:19:50 +03:00
ScopedSpinLock lock ( g_scheduler_lock ) ;
2019-12-01 17:54:47 +03:00
if ( new_state = = m_state )
return ;
2019-07-19 18:58:45 +03:00
if ( new_state = = Blocked ) {
2019-07-21 13:14:58 +03:00
// we should always have a Blocker while blocked
2019-09-09 06:58:42 +03:00
ASSERT ( m_blocker ! = nullptr ) ;
2019-07-19 18:58:45 +03:00
}
2020-04-12 00:39:46 +03:00
if ( new_state = = Stopped ) {
m_stop_state = m_state ;
}
2019-05-18 21:07:00 +03:00
m_state = new_state ;
2020-07-03 14:19:50 +03:00
# ifdef THREAD_DEBUG
2020-07-05 23:32:07 +03:00
dbg ( ) < < " Set Thread " < < * this < < " state to " < < state_string ( ) ;
2020-07-03 14:19:50 +03:00
# endif
2020-07-05 23:32:07 +03:00
2019-07-19 14:04:42 +03:00
if ( m_process . pid ( ) ! = 0 ) {
2019-07-19 18:21:13 +03:00
Scheduler : : update_state_for_thread ( * this ) ;
2019-07-19 14:04:42 +03:00
}
2019-12-01 21:17:17 +03:00
2020-07-05 23:32:07 +03:00
if ( m_state = = Dying & & this ! = Thread : : current ( ) & & is_finalizable ( ) ) {
// Some other thread set this thread to Dying, notify the
// finalizer right away as it can be cleaned up now
Scheduler : : notify_finalizer ( ) ;
}
2019-04-17 13:41:51 +03:00
}
2019-07-25 22:02:19 +03:00
2020-07-03 21:12:34 +03:00
String Thread : : backtrace ( ProcessInspectionHandle & )
2019-08-06 20:43:07 +03:00
{
return backtrace_impl ( ) ;
}
2020-01-19 12:10:46 +03:00
struct RecognizedSymbol {
u32 address ;
2020-04-08 14:30:50 +03:00
const KernelSymbol * symbol { nullptr } ;
2020-01-19 12:10:46 +03:00
} ;
2020-03-02 12:40:40 +03:00
static bool symbolicate ( const RecognizedSymbol & symbol , const Process & process , StringBuilder & builder , Process : : ELFBundle * elf_bundle )
2020-01-19 12:10:46 +03:00
{
if ( ! symbol . address )
return false ;
bool mask_kernel_addresses = ! process . is_superuser ( ) ;
2020-04-08 14:30:50 +03:00
if ( ! symbol . symbol ) {
2020-01-19 12:10:46 +03:00
if ( ! is_user_address ( VirtualAddress ( symbol . address ) ) ) {
builder . append ( " 0xdeadc0de \n " ) ;
} else {
2020-06-27 22:42:28 +03:00
if ( elf_bundle & & elf_bundle - > elf_loader - > has_symbols ( ) )
2020-03-02 12:40:40 +03:00
builder . appendf ( " %p %s \n " , symbol . address , elf_bundle - > elf_loader - > symbolicate ( symbol . address ) . characters ( ) ) ;
2020-01-19 12:10:46 +03:00
else
builder . appendf ( " %p \n " , symbol . address ) ;
}
return true ;
}
2020-04-08 14:30:50 +03:00
unsigned offset = symbol . address - symbol . symbol - > address ;
if ( symbol . symbol - > address = = g_highest_kernel_symbol_address & & offset > 4096 ) {
2020-01-19 12:10:46 +03:00
builder . appendf ( " %p \n " , mask_kernel_addresses ? 0xdeadc0de : symbol . address ) ;
} else {
2020-04-08 14:30:50 +03:00
builder . appendf ( " %p %s +%u \n " , mask_kernel_addresses ? 0xdeadc0de : symbol . address , demangle ( symbol . symbol - > name ) . characters ( ) , offset ) ;
2020-01-19 12:10:46 +03:00
}
return true ;
}
2020-07-03 21:12:34 +03:00
String Thread : : backtrace_impl ( )
2019-07-25 22:02:19 +03:00
{
2020-01-19 12:10:46 +03:00
Vector < RecognizedSymbol , 128 > recognized_symbols ;
2019-07-25 22:02:19 +03:00
auto & process = const_cast < Process & > ( this - > process ( ) ) ;
2020-03-02 12:40:40 +03:00
auto elf_bundle = process . elf_bundle ( ) ;
2019-07-25 22:02:19 +03:00
ProcessPagingScope paging_scope ( process ) ;
2020-01-19 12:10:46 +03:00
2020-07-03 21:12:34 +03:00
// To prevent a context switch involving this thread, which may happen
// on another processor, we need to acquire the scheduler lock while
// walking the stack
{
ScopedSpinLock lock ( g_scheduler_lock ) ;
FlatPtr stack_ptr , eip ;
if ( Processor : : get_context_frame_ptr ( * this , stack_ptr , eip ) ) {
recognized_symbols . append ( { eip , symbolicate_kernel_address ( eip ) } ) ;
for ( ; ; ) {
if ( ! process . validate_read_from_kernel ( VirtualAddress ( stack_ptr ) , sizeof ( void * ) * 2 ) )
break ;
FlatPtr retaddr ;
if ( is_user_range ( VirtualAddress ( stack_ptr ) , sizeof ( FlatPtr ) * 2 ) ) {
copy_from_user ( & retaddr , & ( ( FlatPtr * ) stack_ptr ) [ 1 ] ) ;
recognized_symbols . append ( { retaddr , symbolicate_kernel_address ( retaddr ) } ) ;
copy_from_user ( & stack_ptr , ( FlatPtr * ) stack_ptr ) ;
} else {
memcpy ( & retaddr , & ( ( FlatPtr * ) stack_ptr ) [ 1 ] , sizeof ( FlatPtr ) ) ;
recognized_symbols . append ( { retaddr , symbolicate_kernel_address ( retaddr ) } ) ;
memcpy ( & stack_ptr , ( FlatPtr * ) stack_ptr , sizeof ( FlatPtr ) ) ;
}
}
2020-01-19 12:10:46 +03:00
}
2019-07-25 22:02:19 +03:00
}
2020-01-19 12:10:46 +03:00
StringBuilder builder ;
2019-07-25 22:02:19 +03:00
for ( auto & symbol : recognized_symbols ) {
2020-03-02 12:40:40 +03:00
if ( ! symbolicate ( symbol , process , builder , elf_bundle . ptr ( ) ) )
2019-07-25 22:02:19 +03:00
break ;
}
return builder . to_string ( ) ;
}
2019-09-07 16:50:44 +03:00
2020-04-11 21:39:27 +03:00
Vector < FlatPtr > Thread : : raw_backtrace ( FlatPtr ebp , FlatPtr eip ) const
2019-12-11 22:36:56 +03:00
{
2020-02-29 23:36:00 +03:00
InterruptDisabler disabler ;
2019-12-11 22:36:56 +03:00
auto & process = const_cast < Process & > ( this - > process ( ) ) ;
ProcessPagingScope paging_scope ( process ) ;
2020-03-08 12:36:51 +03:00
Vector < FlatPtr , Profiling : : max_stack_frame_count > backtrace ;
2020-04-11 21:39:27 +03:00
backtrace . append ( eip ) ;
2020-03-08 12:36:51 +03:00
for ( FlatPtr * stack_ptr = ( FlatPtr * ) ebp ; process . validate_read_from_kernel ( VirtualAddress ( stack_ptr ) , sizeof ( FlatPtr ) * 2 ) & & MM . can_read_without_faulting ( process , VirtualAddress ( stack_ptr ) , sizeof ( FlatPtr ) * 2 ) ; stack_ptr = ( FlatPtr * ) * stack_ptr ) {
FlatPtr retaddr = stack_ptr [ 1 ] ;
2019-12-11 22:36:56 +03:00
backtrace . append ( retaddr ) ;
2020-01-19 15:53:22 +03:00
if ( backtrace . size ( ) = = Profiling : : max_stack_frame_count )
break ;
2019-12-11 22:36:56 +03:00
}
return backtrace ;
}
2019-09-07 16:50:44 +03:00
void Thread : : make_thread_specific_region ( Badge < Process > )
{
size_t thread_specific_region_alignment = max ( process ( ) . m_master_tls_alignment , alignof ( ThreadSpecificData ) ) ;
2020-06-27 22:42:28 +03:00
m_thread_specific_region_size = align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ) + sizeof ( ThreadSpecificData ) ;
auto * region = process ( ) . allocate_region ( { } , m_thread_specific_region_size , " Thread-specific " , PROT_READ | PROT_WRITE , true ) ;
2020-01-05 20:00:15 +03:00
SmapDisabler disabler ;
2019-09-07 16:50:44 +03:00
auto * thread_specific_data = ( ThreadSpecificData * ) region - > vaddr ( ) . offset ( align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ) ) . as_ptr ( ) ;
auto * thread_local_storage = ( u8 * ) ( ( u8 * ) thread_specific_data ) - align_up_to ( process ( ) . m_master_tls_size , process ( ) . m_master_tls_alignment ) ;
2020-01-20 15:06:41 +03:00
m_thread_specific_data = VirtualAddress ( thread_specific_data ) ;
2019-09-07 16:50:44 +03:00
thread_specific_data - > self = thread_specific_data ;
2019-09-07 18:06:25 +03:00
if ( process ( ) . m_master_tls_size )
memcpy ( thread_local_storage , process ( ) . m_master_tls_region - > vaddr ( ) . as_ptr ( ) , process ( ) . m_master_tls_size ) ;
2019-09-07 16:50:44 +03:00
}
2019-10-13 15:36:55 +03:00
const LogStream & operator < < ( const LogStream & stream , const Thread & value )
{
return stream < < value . process ( ) . name ( ) < < " ( " < < value . pid ( ) < < " : " < < value . tid ( ) < < " ) " ;
}
2019-11-06 18:26:51 +03:00
2020-07-05 00:55:20 +03:00
Thread : : BlockResult Thread : : wait_on ( WaitQueue & queue , const char * reason , timeval * timeout , Atomic < bool > * lock , Thread * beneficiary )
2019-12-22 14:23:44 +03:00
{
2020-04-26 22:59:27 +03:00
TimerId timer_id { } ;
2020-06-27 22:42:28 +03:00
bool did_unlock ;
{
2020-07-05 23:32:07 +03:00
ScopedCritical critical ;
// We need to be in a critical section *and* then also acquire the
// scheduler lock. The only way acquiring the scheduler lock could
// block us is if another core were to be holding it, in which case
// we need to wait until the scheduler lock is released again
{
ScopedSpinLock sched_lock ( g_scheduler_lock ) ;
2020-07-06 00:46:51 +03:00
if ( ! queue . enqueue ( * Thread : : current ( ) ) ) {
// The WaitQueue was already requested to wake someone when
// nobody was waiting. So return right away as we shouldn't
// be waiting
return BlockResult : : NotBlocked ;
}
2020-07-05 23:32:07 +03:00
did_unlock = unlock_process_if_locked ( ) ;
if ( lock )
* lock = false ;
set_state ( State : : Queued ) ;
m_wait_reason = reason ;
2020-06-27 22:42:28 +03:00
2020-07-05 23:32:07 +03:00
if ( timeout ) {
timer_id = TimerQueue : : the ( ) . add_timer ( * timeout , [ & ] ( ) {
ScopedSpinLock sched_lock ( g_scheduler_lock ) ;
wake_from_queue ( ) ;
} ) ;
}
// Yield and wait for the queue to wake us up again.
if ( beneficiary )
Scheduler : : donate_to ( beneficiary , reason ) ;
else
Scheduler : : yield ( ) ;
2020-06-27 22:42:28 +03:00
}
2020-07-05 23:32:07 +03:00
// Clearing the critical section may trigger the context switch
// flagged by calling Scheduler::donate_to or Scheduler::yield
// above. We have to do it this way because we intentionally
// leave the critical section here to be able to switch contexts.
u32 prev_flags ;
u32 prev_crit = Processor : : current ( ) . clear_critical ( prev_flags , true ) ;
2020-04-26 12:32:37 +03:00
2020-07-05 23:32:07 +03:00
// We've unblocked, relock the process if needed and carry on.
relock_process ( did_unlock ) ;
2020-06-27 22:42:28 +03:00
2020-07-05 23:32:07 +03:00
// NOTE: We may be on a differenct CPU now!
Processor : : current ( ) . restore_critical ( prev_crit , prev_flags ) ;
2020-04-26 12:32:37 +03:00
2020-07-05 23:32:07 +03:00
// This looks counter productive, but we may not actually leave
// the critical section we just restored. It depends on whether
// we were in one while being called.
}
2020-04-26 12:32:37 +03:00
2020-07-05 23:32:07 +03:00
BlockResult result ;
{
// To be able to look at m_wait_queue_node we once again need the
// scheduler lock, which is held when we insert into the queue
ScopedSpinLock sched_lock ( g_scheduler_lock ) ;
result = m_wait_queue_node . is_in_list ( ) ? BlockResult : : InterruptedByTimeout : BlockResult : : WokeNormally ;
// Make sure we cancel the timer if woke normally.
if ( timeout & & result = = BlockResult : : WokeNormally )
TimerQueue : : the ( ) . cancel_timer ( timer_id ) ;
}
2020-04-26 12:32:37 +03:00
2020-07-05 23:32:07 +03:00
// The API contract guarantees we return with interrupts enabled,
// regardless of how we got called
sti ( ) ;
2020-04-26 12:32:37 +03:00
return result ;
2019-12-22 14:23:44 +03:00
}
void Thread : : wake_from_queue ( )
{
2020-07-03 14:19:50 +03:00
ScopedSpinLock lock ( g_scheduler_lock ) ;
2019-12-22 14:23:44 +03:00
ASSERT ( state ( ) = = State : : Queued ) ;
2020-07-05 00:55:20 +03:00
m_wait_reason = nullptr ;
2020-06-29 00:34:31 +03:00
if ( this ! = Thread : : current ( ) )
2020-06-27 22:42:28 +03:00
set_state ( State : : Runnable ) ;
else
set_state ( State : : Running ) ;
2019-12-22 14:23:44 +03:00
}
2019-12-30 21:23:13 +03:00
Thread * Thread : : from_tid ( int tid )
{
2020-01-04 20:56:04 +03:00
InterruptDisabler disabler ;
2019-12-30 21:23:13 +03:00
Thread * found_thread = nullptr ;
Thread : : for_each ( [ & ] ( auto & thread ) {
2020-01-04 20:56:04 +03:00
if ( thread . tid ( ) = = tid ) {
2019-12-30 21:23:13 +03:00
found_thread = & thread ;
2020-01-04 20:56:04 +03:00
return IterationDecision : : Break ;
}
2019-12-30 21:23:13 +03:00
return IterationDecision : : Continue ;
} ) ;
return found_thread ;
}
2020-02-16 03:27:42 +03:00
2020-02-18 15:44:27 +03:00
void Thread : : reset_fpu_state ( )
{
2020-06-27 22:42:28 +03:00
memcpy ( m_fpu_state , & Processor : : current ( ) . clean_fpu_state ( ) , sizeof ( FPUState ) ) ;
2020-02-18 15:44:27 +03:00
}
2020-03-28 11:47:16 +03:00
void Thread : : start_tracing_from ( pid_t tracer )
{
m_tracer = ThreadTracer : : create ( tracer ) ;
}
void Thread : : stop_tracing ( )
{
m_tracer = nullptr ;
}
void Thread : : tracer_trap ( const RegisterState & regs )
{
ASSERT ( m_tracer . ptr ( ) ) ;
m_tracer - > set_regs ( regs ) ;
send_urgent_signal_to_self ( SIGTRAP ) ;
}
2020-04-07 18:23:37 +03:00
const Thread : : Blocker & Thread : : blocker ( ) const
{
ASSERT ( m_blocker ) ;
return * m_blocker ;
}
2020-02-16 03:27:42 +03:00
}