2019-11-29 16:55:07 +03:00
# include <AK/Demangle.h>
2019-07-25 22:02:19 +03:00
# include <AK/StringBuilder.h>
2019-06-07 10:36:51 +03:00
# include <Kernel/FileSystem/FileDescription.h>
2019-06-07 12:43:58 +03:00
# include <Kernel/Process.h>
# include <Kernel/Scheduler.h>
# include <Kernel/Thread.h>
2019-04-03 16:13:07 +03:00
# include <Kernel/VM/MemoryManager.h>
2019-03-24 00:03:17 +03:00
# include <LibC/signal_numbers.h>
2019-11-06 15:42:38 +03:00
# include <LibELF/ELFLoader.h>
2019-03-24 00:03:17 +03:00
2019-05-22 14:23:41 +03:00
//#define SIGNAL_DEBUG
2019-09-07 16:50:44 +03:00
u16 thread_specific_selector ( )
{
static u16 selector ;
if ( ! selector ) {
selector = gdt_alloc_entry ( ) ;
auto & descriptor = get_gdt_entry ( selector ) ;
descriptor . dpl = 3 ;
descriptor . segment_present = 1 ;
descriptor . granularity = 0 ;
descriptor . zero = 0 ;
descriptor . operation_size = 1 ;
descriptor . descriptor_type = 1 ;
descriptor . type = 2 ;
}
return selector ;
}
Descriptor & thread_specific_descriptor ( )
{
return get_gdt_entry ( thread_specific_selector ( ) ) ;
}
2019-05-18 19:31:36 +03:00
HashTable < Thread * > & thread_table ( )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
static HashTable < Thread * > * table ;
if ( ! table )
table = new HashTable < Thread * > ;
return * table ;
}
2019-03-24 00:03:17 +03:00
Thread : : Thread ( Process & process )
: m_process ( process )
2019-12-07 22:45:26 +03:00
, m_name ( process . name ( ) )
2019-03-24 00:03:17 +03:00
{
2019-12-22 13:51:24 +03:00
if ( m_process . m_thread_count = = 0 ) {
// First thread gets TID == PID
m_tid = process . pid ( ) ;
} else {
m_tid = Process : : allocate_pid ( ) ;
}
2019-12-22 13:35:02 +03:00
process . m_thread_count + + ;
2019-04-23 23:17:01 +03:00
dbgprintf ( " Thread{%p}: New thread TID=%u in %s(%u) \n " , this , m_tid , process . name ( ) . characters ( ) , process . pid ( ) ) ;
2019-03-24 00:03:17 +03:00
set_default_signal_dispositions ( ) ;
2019-03-27 17:27:45 +03:00
m_fpu_state = ( FPUState * ) kmalloc_aligned ( sizeof ( FPUState ) , 16 ) ;
2019-10-13 15:36:55 +03:00
memset ( m_fpu_state , 0 , sizeof ( FPUState ) ) ;
2019-03-24 00:03:17 +03:00
memset ( & m_tss , 0 , sizeof ( m_tss ) ) ;
// Only IF is set when a process boots.
m_tss . eflags = 0x0202 ;
2019-09-07 16:50:44 +03:00
u16 cs , ds , ss , gs ;
2019-03-24 00:03:17 +03:00
if ( m_process . is_ring0 ( ) ) {
cs = 0x08 ;
ds = 0x10 ;
ss = 0x10 ;
2019-09-07 16:50:44 +03:00
gs = 0 ;
2019-03-24 00:03:17 +03:00
} else {
cs = 0x1b ;
ds = 0x23 ;
ss = 0x23 ;
2019-09-07 16:50:44 +03:00
gs = thread_specific_selector ( ) | 3 ;
2019-03-24 00:03:17 +03:00
}
m_tss . ds = ds ;
m_tss . es = ds ;
m_tss . fs = ds ;
2019-09-07 16:50:44 +03:00
m_tss . gs = gs ;
2019-03-24 00:03:17 +03:00
m_tss . ss = ss ;
m_tss . cs = cs ;
m_tss . cr3 = m_process . page_directory ( ) . cr3 ( ) ;
if ( m_process . is_ring0 ( ) ) {
2019-12-26 00:41:34 +03:00
m_kernel_stack_region = MM . allocate_kernel_region ( default_kernel_stack_size , String : : format ( " Kernel Stack (Thread %d) " , m_tid ) , Region : : Access : : Read | Region : : Access : : Write , false , true ) ;
2019-12-24 03:19:40 +03:00
m_kernel_stack_region - > set_stack ( true ) ;
2019-12-24 03:16:50 +03:00
m_kernel_stack_base = m_kernel_stack_region - > vaddr ( ) . get ( ) ;
m_kernel_stack_top = m_kernel_stack_region - > vaddr ( ) . offset ( default_kernel_stack_size ) . get ( ) & 0xfffffff8u ;
2019-09-04 05:31:38 +03:00
m_tss . esp = m_kernel_stack_top ;
2019-03-24 00:03:17 +03:00
} else {
// Ring3 processes need a separate stack for Ring0.
2019-12-26 00:41:34 +03:00
m_kernel_stack_region = MM . allocate_kernel_region ( default_kernel_stack_size , String : : format ( " Kernel Stack (Thread %d) " , m_tid ) , Region : : Access : : Read | Region : : Access : : Write , false , true ) ;
2019-12-24 03:19:40 +03:00
m_kernel_stack_region - > set_stack ( true ) ;
2019-06-07 13:56:50 +03:00
m_kernel_stack_base = m_kernel_stack_region - > vaddr ( ) . get ( ) ;
2019-09-04 05:31:38 +03:00
m_kernel_stack_top = m_kernel_stack_region - > vaddr ( ) . offset ( default_kernel_stack_size ) . get ( ) & 0xfffffff8u ;
2019-03-24 00:03:17 +03:00
m_tss . ss0 = 0x10 ;
2019-09-04 05:31:38 +03:00
m_tss . esp0 = m_kernel_stack_top ;
2019-03-24 00:03:17 +03:00
}
// HACK: Ring2 SS in the TSS is the current PID.
m_tss . ss2 = m_process . pid ( ) ;
m_far_ptr . offset = 0x98765432 ;
2019-03-24 00:17:38 +03:00
if ( m_process . pid ( ) ! = 0 ) {
InterruptDisabler disabler ;
2019-05-18 19:31:36 +03:00
thread_table ( ) . set ( this ) ;
2019-07-19 18:21:13 +03:00
Scheduler : : init_thread ( * this ) ;
2019-03-24 00:17:38 +03:00
}
2019-03-24 00:03:17 +03:00
}
Thread : : ~ Thread ( )
{
dbgprintf ( " ~Thread{%p} \n " , this ) ;
2019-03-27 17:27:45 +03:00
kfree_aligned ( m_fpu_state ) ;
2019-03-24 00:03:17 +03:00
{
InterruptDisabler disabler ;
2019-05-18 19:31:36 +03:00
thread_table ( ) . remove ( this ) ;
2019-03-24 00:03:17 +03:00
}
if ( g_last_fpu_thread = = this )
g_last_fpu_thread = nullptr ;
if ( selector ( ) )
gdt_free_entry ( selector ( ) ) ;
2019-08-01 21:17:12 +03:00
if ( m_userspace_stack_region )
m_process . deallocate_region ( * m_userspace_stack_region ) ;
2019-12-22 13:35:02 +03:00
ASSERT ( m_process . m_thread_count ) ;
m_process . m_thread_count - - ;
2019-03-24 00:03:17 +03:00
}
void Thread : : unblock ( )
{
if ( current = = this ) {
2019-05-18 19:31:36 +03:00
set_state ( Thread : : Running ) ;
2019-03-24 00:03:17 +03:00
return ;
}
ASSERT ( m_state ! = Thread : : Runnable & & m_state ! = Thread : : Running ) ;
2019-05-18 19:31:36 +03:00
set_state ( Thread : : Runnable ) ;
2019-03-24 00:03:17 +03:00
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
void Thread : : set_should_die ( )
{
2019-12-22 13:35:02 +03:00
if ( m_should_die ) {
dbgprintf ( " Should already die (%u) \n " , m_tid ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
return ;
2019-12-22 13:35:02 +03:00
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
InterruptDisabler disabler ;
// Remember that we should die instead of returning to
// the userspace.
m_should_die = true ;
if ( is_blocked ( ) ) {
ASSERT ( in_kernel ( ) ) ;
ASSERT ( m_blocker ! = nullptr ) ;
// We're blocked in the kernel. Pretend to have
// been interrupted by a signal (perhaps that is
// what has actually killed us).
m_blocker - > set_interrupted_by_signal ( ) ;
unblock ( ) ;
} else if ( ! in_kernel ( ) ) {
// We're executing in userspace (and we're clearly
// not the current thread). No need to unwind, so
// set the state to dying right away. This also
// makes sure we won't be scheduled anymore.
set_state ( Thread : : State : : Dying ) ;
}
}
void Thread : : die_if_needed ( )
{
ASSERT ( current = = this ) ;
if ( ! m_should_die )
return ;
2019-12-22 14:34:38 +03:00
m_process . big_lock ( ) . unlock_if_locked ( ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
InterruptDisabler disabler ;
set_state ( Thread : : State : : Dying ) ;
2019-12-22 14:34:38 +03:00
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
if ( ! Scheduler : : is_active ( ) )
Scheduler : : pick_next_and_switch_now ( ) ;
}
2019-11-16 14:18:59 +03:00
void Thread : : yield_without_holding_big_lock ( )
2019-03-24 00:03:17 +03:00
{
2019-04-01 21:02:05 +03:00
bool did_unlock = process ( ) . big_lock ( ) . unlock_if_locked ( ) ;
2019-03-24 00:03:17 +03:00
Scheduler : : yield ( ) ;
2019-04-01 21:02:05 +03:00
if ( did_unlock )
process ( ) . big_lock ( ) . lock ( ) ;
2019-03-24 00:03:17 +03:00
}
2019-12-01 13:57:20 +03:00
2019-12-01 17:54:47 +03:00
bool Thread : : unlock_process_if_locked ( )
2019-12-01 13:57:20 +03:00
{
2019-12-01 17:54:47 +03:00
return process ( ) . big_lock ( ) . unlock_if_locked ( ) ;
}
void Thread : : relock_process ( )
{
process ( ) . big_lock ( ) . lock ( ) ;
2019-12-01 13:57:20 +03:00
}
2019-03-24 00:03:17 +03:00
2019-07-18 18:26:11 +03:00
u64 Thread : : sleep ( u32 ticks )
2019-03-24 00:03:17 +03:00
{
2019-03-24 03:52:10 +03:00
ASSERT ( state ( ) = = Thread : : Running ) ;
2019-07-18 18:26:11 +03:00
u64 wakeup_time = g_uptime + ticks ;
2019-07-20 12:05:52 +03:00
auto ret = current - > block < Thread : : SleepBlocker > ( wakeup_time ) ;
if ( wakeup_time > g_uptime ) {
ASSERT ( ret = = Thread : : BlockResult : : InterruptedBySignal ) ;
}
2019-07-18 18:26:11 +03:00
return wakeup_time ;
2019-03-24 00:03:17 +03:00
}
2019-11-02 21:34:06 +03:00
u64 Thread : : sleep_until ( u64 wakeup_time )
{
ASSERT ( state ( ) = = Thread : : Running ) ;
auto ret = current - > block < Thread : : SleepBlocker > ( wakeup_time ) ;
if ( wakeup_time > g_uptime )
ASSERT ( ret = = Thread : : BlockResult : : InterruptedBySignal ) ;
return wakeup_time ;
}
2019-07-19 10:51:48 +03:00
const char * Thread : : state_string ( ) const
2019-03-24 00:03:17 +03:00
{
2019-07-19 10:51:48 +03:00
switch ( state ( ) ) {
2019-06-07 12:43:58 +03:00
case Thread : : Invalid :
return " Invalid " ;
case Thread : : Runnable :
return " Runnable " ;
case Thread : : Running :
return " Running " ;
case Thread : : Dying :
return " Dying " ;
case Thread : : Dead :
return " Dead " ;
case Thread : : Stopped :
return " Stopped " ;
case Thread : : Skip1SchedulerPass :
return " Skip1 " ;
case Thread : : Skip0SchedulerPasses :
return " Skip0 " ;
2019-12-01 17:54:47 +03:00
case Thread : : Queued :
return " Queued " ;
2019-07-19 10:37:34 +03:00
case Thread : : Blocked :
2019-09-09 06:58:42 +03:00
ASSERT ( m_blocker ! = nullptr ) ;
return m_blocker - > state_string ( ) ;
2019-03-24 00:03:17 +03:00
}
2019-09-08 15:29:59 +03:00
kprintf ( " Thread::state_string(): Invalid state: %u \n " , state ( ) ) ;
2019-03-24 00:03:17 +03:00
ASSERT_NOT_REACHED ( ) ;
return nullptr ;
}
void Thread : : finalize ( )
{
2019-08-01 21:01:23 +03:00
ASSERT ( current = = g_finalizer ) ;
2019-03-24 00:03:17 +03:00
dbgprintf ( " Finalizing Thread %u in %s(%u) \n " , tid ( ) , m_process . name ( ) . characters ( ) , pid ( ) ) ;
set_state ( Thread : : State : : Dead ) ;
2019-11-14 22:58:23 +03:00
if ( m_joiner ) {
ASSERT ( m_joiner - > m_joinee = = this ) ;
2019-11-14 23:04:34 +03:00
static_cast < JoinBlocker * > ( m_joiner - > m_blocker ) - > set_joinee_exit_value ( m_exit_value ) ;
2019-11-14 22:58:23 +03:00
m_joiner - > m_joinee = nullptr ;
// NOTE: We clear the joiner pointer here as well, to be tidy.
m_joiner = nullptr ;
}
2019-08-06 20:43:07 +03:00
if ( m_dump_backtrace_on_finalization )
dbg ( ) < < backtrace_impl ( ) ;
2019-03-24 00:03:17 +03:00
}
void Thread : : finalize_dying_threads ( )
{
2019-08-01 21:01:23 +03:00
ASSERT ( current = = g_finalizer ) ;
2019-04-20 15:02:19 +03:00
Vector < Thread * , 32 > dying_threads ;
2019-03-24 00:03:17 +03:00
{
InterruptDisabler disabler ;
2019-06-07 12:43:58 +03:00
for_each_in_state ( Thread : : State : : Dying , [ & ] ( Thread & thread ) {
2019-03-24 00:03:17 +03:00
dying_threads . append ( & thread ) ;
2019-07-19 13:16:00 +03:00
return IterationDecision : : Continue ;
2019-03-24 00:03:17 +03:00
} ) ;
}
2019-12-22 13:35:02 +03:00
dbgprintf ( " Finalizing %u dying threads \n " , dying_threads . size ( ) ) ;
for ( auto * thread : dying_threads ) {
auto & process = thread - > process ( ) ;
2019-03-24 00:03:17 +03:00
thread - > finalize ( ) ;
2019-12-22 13:35:02 +03:00
delete thread ;
if ( process . m_thread_count = = 0 )
process . finalize ( ) ;
}
dbgprintf ( " Done \n " ) ;
2019-03-24 00:03:17 +03:00
}
bool Thread : : tick ( )
{
+ + m_ticks ;
if ( tss ( ) . cs & 3 )
+ + m_process . m_ticks_in_user ;
else
+ + m_process . m_ticks_in_kernel ;
return - - m_ticks_left ;
}
2019-07-03 22:17:35 +03:00
void Thread : : send_signal ( u8 signal , Process * sender )
2019-03-24 00:03:17 +03:00
{
ASSERT ( signal < 32 ) ;
2019-07-08 19:59:48 +03:00
InterruptDisabler disabler ;
// FIXME: Figure out what to do for masked signals. Should we also ignore them here?
if ( should_ignore_signal ( signal ) ) {
dbg ( ) < < " signal " < < signal < < " was ignored by " < < process ( ) ;
return ;
}
2019-03-24 00:03:17 +03:00
if ( sender )
dbgprintf ( " signal: %s(%u) sent %d to %s(%u) \n " , sender - > name ( ) . characters ( ) , sender - > pid ( ) , signal , process ( ) . name ( ) . characters ( ) , pid ( ) ) ;
else
dbgprintf ( " signal: kernel sent %d to %s(%u) \n " , signal , process ( ) . name ( ) . characters ( ) , pid ( ) ) ;
2019-08-01 12:00:36 +03:00
m_pending_signals | = 1 < < ( signal - 1 ) ;
2019-03-24 00:03:17 +03:00
}
2019-10-07 12:22:50 +03:00
// Certain exceptions, such as SIGSEGV and SIGILL, put a
// thread into a state where the signal handler must be
// invoked immediately, otherwise it will continue to fault.
// This function should be used in an exception handler to
// ensure that when the thread resumes, it's executing in
// the appropriate signal handler.
void Thread : : send_urgent_signal_to_self ( u8 signal )
{
// FIXME: because of a bug in dispatch_signal we can't
// setup a signal while we are the current thread. Because of
// this we use a work-around where we send the signal and then
// block, allowing the scheduler to properly dispatch the signal
// before the thread is next run.
send_signal ( signal , & process ( ) ) ;
( void ) block < SemiPermanentBlocker > ( SemiPermanentBlocker : : Reason : : Signal ) ;
}
2019-03-24 00:03:17 +03:00
bool Thread : : has_unmasked_pending_signals ( ) const
{
return m_pending_signals & ~ m_signal_mask ;
}
ShouldUnblockThread Thread : : dispatch_one_pending_signal ( )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-07-03 22:17:35 +03:00
u32 signal_candidates = m_pending_signals & ~ m_signal_mask ;
2019-03-24 00:03:17 +03:00
ASSERT ( signal_candidates ) ;
2019-08-01 12:00:36 +03:00
u8 signal = 1 ;
2019-03-24 00:03:17 +03:00
for ( ; signal < 32 ; + + signal ) {
2019-08-01 12:00:36 +03:00
if ( signal_candidates & ( 1 < < ( signal - 1 ) ) ) {
2019-03-24 00:03:17 +03:00
break ;
}
}
return dispatch_signal ( signal ) ;
}
2019-06-07 18:13:23 +03:00
enum class DefaultSignalAction {
2019-03-24 00:03:17 +03:00
Terminate ,
Ignore ,
DumpCore ,
Stop ,
Continue ,
} ;
2019-07-03 22:17:35 +03:00
DefaultSignalAction default_signal_action ( u8 signal )
2019-03-24 00:03:17 +03:00
{
ASSERT ( signal & & signal < NSIG ) ;
switch ( signal ) {
case SIGHUP :
case SIGINT :
case SIGKILL :
case SIGPIPE :
case SIGALRM :
case SIGUSR1 :
case SIGUSR2 :
case SIGVTALRM :
case SIGSTKFLT :
case SIGIO :
case SIGPROF :
case SIGTERM :
case SIGPWR :
return DefaultSignalAction : : Terminate ;
case SIGCHLD :
case SIGURG :
case SIGWINCH :
return DefaultSignalAction : : Ignore ;
case SIGQUIT :
case SIGILL :
case SIGTRAP :
case SIGABRT :
case SIGBUS :
case SIGFPE :
case SIGSEGV :
case SIGXCPU :
case SIGXFSZ :
case SIGSYS :
return DefaultSignalAction : : DumpCore ;
case SIGCONT :
return DefaultSignalAction : : Continue ;
case SIGSTOP :
case SIGTSTP :
case SIGTTIN :
case SIGTTOU :
return DefaultSignalAction : : Stop ;
}
ASSERT_NOT_REACHED ( ) ;
}
2019-07-08 19:59:48 +03:00
bool Thread : : should_ignore_signal ( u8 signal ) const
{
ASSERT ( signal < 32 ) ;
auto & action = m_signal_action_data [ signal ] ;
if ( action . handler_or_sigaction . is_null ( ) )
return default_signal_action ( signal ) = = DefaultSignalAction : : Ignore ;
if ( action . handler_or_sigaction . as_ptr ( ) = = SIG_IGN )
return true ;
return false ;
}
2019-10-07 12:22:50 +03:00
bool Thread : : has_signal_handler ( u8 signal ) const
{
ASSERT ( signal < 32 ) ;
auto & action = m_signal_action_data [ signal ] ;
return ! action . handler_or_sigaction . is_null ( ) ;
}
2019-11-04 11:29:47 +03:00
static void push_value_on_user_stack ( u32 * stack , u32 data )
{
* stack - = 4 ;
* ( u32 * ) * stack = data ;
}
2019-07-03 22:17:35 +03:00
ShouldUnblockThread Thread : : dispatch_signal ( u8 signal )
2019-03-24 00:03:17 +03:00
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-08-01 12:00:36 +03:00
ASSERT ( signal > 0 & & signal < = 32 ) ;
2019-09-04 16:14:54 +03:00
ASSERT ( ! process ( ) . is_ring0 ( ) ) ;
2019-03-24 00:03:17 +03:00
# ifdef SIGNAL_DEBUG
2019-05-22 14:23:41 +03:00
kprintf ( " dispatch_signal %s(%u) <- %u \n " , process ( ) . name ( ) . characters ( ) , pid ( ) , signal ) ;
2019-03-24 00:03:17 +03:00
# endif
auto & action = m_signal_action_data [ signal ] ;
// FIXME: Implement SA_SIGINFO signal handlers.
ASSERT ( ! ( action . flags & SA_SIGINFO ) ) ;
// Mark this signal as handled.
2019-08-01 12:00:36 +03:00
m_pending_signals & = ~ ( 1 < < ( signal - 1 ) ) ;
2019-03-24 00:03:17 +03:00
if ( signal = = SIGSTOP ) {
set_state ( Stopped ) ;
return ShouldUnblockThread : : No ;
}
if ( signal = = SIGCONT & & state ( ) = = Stopped )
set_state ( Runnable ) ;
2019-06-07 13:56:50 +03:00
auto handler_vaddr = action . handler_or_sigaction ;
if ( handler_vaddr . is_null ( ) ) {
2019-03-24 00:03:17 +03:00
switch ( default_signal_action ( signal ) ) {
case DefaultSignalAction : : Stop :
set_state ( Stopped ) ;
return ShouldUnblockThread : : No ;
2019-08-06 20:43:07 +03:00
case DefaultSignalAction : : DumpCore :
process ( ) . for_each_thread ( [ ] ( auto & thread ) {
thread . set_dump_backtrace_on_finalization ( ) ;
return IterationDecision : : Continue ;
} ) ;
2019-07-25 22:02:19 +03:00
[[fallthrough]] ;
2019-03-24 00:03:17 +03:00
case DefaultSignalAction : : Terminate :
m_process . terminate_due_to_signal ( signal ) ;
return ShouldUnblockThread : : No ;
case DefaultSignalAction : : Ignore :
2019-07-19 10:34:11 +03:00
ASSERT_NOT_REACHED ( ) ;
2019-03-24 00:03:17 +03:00
case DefaultSignalAction : : Continue :
return ShouldUnblockThread : : Yes ;
}
ASSERT_NOT_REACHED ( ) ;
}
2019-06-07 13:56:50 +03:00
if ( handler_vaddr . as_ptr ( ) = = SIG_IGN ) {
2019-03-24 00:03:17 +03:00
# ifdef SIGNAL_DEBUG
2019-05-22 14:23:41 +03:00
kprintf ( " %s(%u) ignored signal %u \n " , process ( ) . name ( ) . characters ( ) , pid ( ) , signal ) ;
2019-03-24 00:03:17 +03:00
# endif
return ShouldUnblockThread : : Yes ;
}
2019-09-04 16:14:54 +03:00
ProcessPagingScope paging_scope ( m_process ) ;
2019-07-03 22:17:35 +03:00
u32 old_signal_mask = m_signal_mask ;
u32 new_signal_mask = action . mask ;
2019-03-24 00:03:17 +03:00
if ( action . flags & SA_NODEFER )
2019-08-01 12:00:36 +03:00
new_signal_mask & = ~ ( 1 < < ( signal - 1 ) ) ;
2019-03-24 00:03:17 +03:00
else
2019-08-01 12:00:36 +03:00
new_signal_mask | = 1 < < ( signal - 1 ) ;
2019-03-24 00:03:17 +03:00
m_signal_mask | = new_signal_mask ;
2019-11-04 11:29:47 +03:00
auto setup_stack = [ & ] < typename ThreadState > ( ThreadState state , u32 * stack )
{
u32 old_esp = * stack ;
u32 ret_eip = state . eip ;
u32 ret_eflags = state . eflags ;
// Align the stack to 16 bytes.
// Note that we push 56 bytes (4 * 14) on to the stack,
// so we need to account for this here.
u32 stack_alignment = ( * stack - 56 ) % 16 ;
* stack - = stack_alignment ;
push_value_on_user_stack ( stack , ret_eflags ) ;
push_value_on_user_stack ( stack , ret_eip ) ;
push_value_on_user_stack ( stack , state . eax ) ;
push_value_on_user_stack ( stack , state . ecx ) ;
push_value_on_user_stack ( stack , state . edx ) ;
push_value_on_user_stack ( stack , state . ebx ) ;
push_value_on_user_stack ( stack , old_esp ) ;
push_value_on_user_stack ( stack , state . ebp ) ;
push_value_on_user_stack ( stack , state . esi ) ;
push_value_on_user_stack ( stack , state . edi ) ;
// PUSH old_signal_mask
push_value_on_user_stack ( stack , old_signal_mask ) ;
push_value_on_user_stack ( stack , signal ) ;
push_value_on_user_stack ( stack , handler_vaddr . get ( ) ) ;
push_value_on_user_stack ( stack , 0 ) ; //push fake return address
ASSERT ( ( * stack % 16 ) = = 0 ) ;
} ;
// We now place the thread state on the userspace stack.
// Note that when we are in the kernel (ie. blocking) we cannot use the
// tss, as that will contain kernel state; instead, we use a RegisterDump.
// Conversely, when the thread isn't blocking the RegisterDump may not be
// valid (fork, exec etc) but the tss will, so we use that instead.
2019-09-04 16:14:54 +03:00
if ( ! in_kernel ( ) ) {
2019-11-04 11:29:47 +03:00
u32 * stack = & m_tss . esp ;
setup_stack ( m_tss , stack ) ;
2019-09-04 16:14:54 +03:00
Scheduler : : prepare_to_modify_tss ( * this ) ;
m_tss . cs = 0x1b ;
m_tss . ds = 0x23 ;
m_tss . es = 0x23 ;
m_tss . fs = 0x23 ;
2019-09-07 16:50:44 +03:00
m_tss . gs = thread_specific_selector ( ) | 3 ;
2019-11-04 11:29:47 +03:00
m_tss . eip = g_return_to_ring3_from_signal_trampoline . get ( ) ;
2019-09-04 16:14:54 +03:00
// FIXME: This state is such a hack. It avoids trouble if 'current' is the process receiving a signal.
set_state ( Skip1SchedulerPass ) ;
2019-11-04 11:29:47 +03:00
} else {
2019-12-15 19:58:53 +03:00
auto & regs = get_register_dump_from_stack ( ) ;
2019-11-04 11:29:47 +03:00
u32 * stack = & regs . esp_if_crossRing ;
setup_stack ( regs , stack ) ;
regs . eip = g_return_to_ring3_from_signal_trampoline . get ( ) ;
2019-09-04 16:14:54 +03:00
}
2019-03-24 00:03:17 +03:00
# ifdef SIGNAL_DEBUG
2019-09-08 15:29:59 +03:00
kprintf ( " signal: Okay, %s(%u) {%s} has been primed with signal handler %w:%x \n " , process ( ) . name ( ) . characters ( ) , pid ( ) , state_string ( ) , m_tss . cs , m_tss . eip ) ;
2019-03-24 00:03:17 +03:00
# endif
return ShouldUnblockThread : : Yes ;
}
void Thread : : set_default_signal_dispositions ( )
{
// FIXME: Set up all the right default actions. See signal(7).
memset ( & m_signal_action_data , 0 , sizeof ( m_signal_action_data ) ) ;
2019-07-03 22:17:35 +03:00
m_signal_action_data [ SIGCHLD ] . handler_or_sigaction = VirtualAddress ( ( u32 ) SIG_IGN ) ;
m_signal_action_data [ SIGWINCH ] . handler_or_sigaction = VirtualAddress ( ( u32 ) SIG_IGN ) ;
2019-03-24 00:03:17 +03:00
}
2019-07-03 22:17:35 +03:00
void Thread : : push_value_on_stack ( u32 value )
2019-03-24 00:03:17 +03:00
{
m_tss . esp - = 4 ;
2019-07-03 22:17:35 +03:00
u32 * stack_ptr = ( u32 * ) m_tss . esp ;
2019-03-24 00:03:17 +03:00
* stack_ptr = value ;
}
2019-12-15 19:58:53 +03:00
RegisterDump & Thread : : get_register_dump_from_stack ( )
2019-11-02 12:11:41 +03:00
{
// The userspace registers should be stored at the top of the stack
// We have to subtract 2 because the processor decrements the kernel
// stack before pushing the args.
2019-12-15 19:58:53 +03:00
return * ( RegisterDump * ) ( kernel_stack_top ( ) - sizeof ( RegisterDump ) ) ;
2019-11-02 12:11:41 +03:00
}
2019-12-19 01:03:23 +03:00
u32 Thread : : make_userspace_stack_for_main_thread ( Vector < String > arguments , Vector < String > environment )
2019-03-24 00:03:17 +03:00
{
2019-10-31 15:57:07 +03:00
auto * region = m_process . allocate_region ( VirtualAddress ( ) , default_userspace_stack_size , " Stack (Main thread) " , PROT_READ | PROT_WRITE , false ) ;
2019-03-24 00:03:17 +03:00
ASSERT ( region ) ;
2019-11-17 14:11:43 +03:00
region - > set_stack ( true ) ;
2019-03-24 00:03:17 +03:00
2019-12-19 01:03:23 +03:00
u32 new_esp = region - > vaddr ( ) . offset ( default_userspace_stack_size ) . get ( ) ;
// FIXME: This is weird, we put the argument contents at the base of the stack,
// and the argument pointers at the top? Why?
2019-06-07 13:56:50 +03:00
char * stack_base = ( char * ) region - > vaddr ( ) . get ( ) ;
2019-03-24 00:03:17 +03:00
int argc = arguments . size ( ) ;
char * * argv = ( char * * ) stack_base ;
char * * env = argv + arguments . size ( ) + 1 ;
char * bufptr = stack_base + ( sizeof ( char * ) * ( arguments . size ( ) + 1 ) ) + ( sizeof ( char * ) * ( environment . size ( ) + 1 ) ) ;
for ( int i = 0 ; i < arguments . size ( ) ; + + i ) {
argv [ i ] = bufptr ;
memcpy ( bufptr , arguments [ i ] . characters ( ) , arguments [ i ] . length ( ) ) ;
bufptr + = arguments [ i ] . length ( ) ;
* ( bufptr + + ) = ' \0 ' ;
}
argv [ arguments . size ( ) ] = nullptr ;
for ( int i = 0 ; i < environment . size ( ) ; + + i ) {
env [ i ] = bufptr ;
memcpy ( bufptr , environment [ i ] . characters ( ) , environment [ i ] . length ( ) ) ;
bufptr + = environment [ i ] . length ( ) ;
* ( bufptr + + ) = ' \0 ' ;
}
env [ environment . size ( ) ] = nullptr ;
2019-12-19 01:03:23 +03:00
auto push_on_new_stack = [ & new_esp ] ( u32 value )
{
new_esp - = 4 ;
u32 * stack_ptr = ( u32 * ) new_esp ;
* stack_ptr = value ;
} ;
2019-03-24 00:03:17 +03:00
// NOTE: The stack needs to be 16-byte aligned.
2019-12-19 01:03:23 +03:00
push_on_new_stack ( ( u32 ) env ) ;
push_on_new_stack ( ( u32 ) argv ) ;
push_on_new_stack ( ( u32 ) argc ) ;
push_on_new_stack ( 0 ) ;
return new_esp ;
2019-03-24 00:03:17 +03:00
}
Thread * Thread : : clone ( Process & process )
{
auto * clone = new Thread ( process ) ;
memcpy ( clone - > m_signal_action_data , m_signal_action_data , sizeof ( m_signal_action_data ) ) ;
clone - > m_signal_mask = m_signal_mask ;
2019-03-27 17:27:45 +03:00
memcpy ( clone - > m_fpu_state , m_fpu_state , sizeof ( FPUState ) ) ;
2019-03-24 00:03:17 +03:00
clone - > m_has_used_fpu = m_has_used_fpu ;
2019-09-07 16:50:44 +03:00
clone - > m_thread_specific_data = m_thread_specific_data ;
2019-03-24 00:03:17 +03:00
return clone ;
}
void Thread : : initialize ( )
{
Scheduler : : initialize ( ) ;
}
2019-03-24 01:50:34 +03:00
Vector < Thread * > Thread : : all_threads ( )
{
Vector < Thread * > threads ;
InterruptDisabler disabler ;
2019-05-18 19:31:36 +03:00
threads . ensure_capacity ( thread_table ( ) . size ( ) ) ;
for ( auto * thread : thread_table ( ) )
threads . unchecked_append ( thread ) ;
2019-03-24 01:50:34 +03:00
return threads ;
}
2019-04-17 13:41:51 +03:00
bool Thread : : is_thread ( void * ptr )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-05-18 19:31:36 +03:00
return thread_table ( ) . contains ( ( Thread * ) ptr ) ;
}
2019-05-18 21:07:00 +03:00
void Thread : : set_state ( State new_state )
{
2019-06-30 12:40:23 +03:00
InterruptDisabler disabler ;
2019-12-01 17:54:47 +03:00
if ( new_state = = m_state )
return ;
2019-07-19 18:58:45 +03:00
if ( new_state = = Blocked ) {
2019-07-21 13:14:58 +03:00
// we should always have a Blocker while blocked
2019-09-09 06:58:42 +03:00
ASSERT ( m_blocker ! = nullptr ) ;
2019-07-19 18:58:45 +03:00
}
2019-05-18 21:07:00 +03:00
m_state = new_state ;
2019-07-19 14:04:42 +03:00
if ( m_process . pid ( ) ! = 0 ) {
2019-07-19 18:21:13 +03:00
Scheduler : : update_state_for_thread ( * this ) ;
2019-07-19 14:04:42 +03:00
}
2019-12-01 21:17:17 +03:00
if ( new_state = = Dying )
g_finalizer_wait_queue - > wake_all ( ) ;
2019-04-17 13:41:51 +03:00
}
2019-07-25 22:02:19 +03:00
String Thread : : backtrace ( ProcessInspectionHandle & ) const
2019-08-06 20:43:07 +03:00
{
return backtrace_impl ( ) ;
}
String Thread : : backtrace_impl ( ) const
2019-07-25 22:02:19 +03:00
{
auto & process = const_cast < Process & > ( this - > process ( ) ) ;
ProcessPagingScope paging_scope ( process ) ;
struct RecognizedSymbol {
u32 address ;
const KSym * ksym ;
} ;
StringBuilder builder ;
Vector < RecognizedSymbol , 64 > recognized_symbols ;
recognized_symbols . append ( { tss ( ) . eip , ksymbolicate ( tss ( ) . eip ) } ) ;
2019-12-24 03:01:23 +03:00
for ( u32 * stack_ptr = ( u32 * ) frame_ptr ( ) ; process . validate_read_from_kernel ( VirtualAddress ( ( u32 ) stack_ptr ) , sizeof ( void * ) * 2 ) ; stack_ptr = ( u32 * ) * stack_ptr ) {
2019-07-25 22:02:19 +03:00
u32 retaddr = stack_ptr [ 1 ] ;
recognized_symbols . append ( { retaddr , ksymbolicate ( retaddr ) } ) ;
}
for ( auto & symbol : recognized_symbols ) {
if ( ! symbol . address )
break ;
if ( ! symbol . ksym ) {
2019-07-27 13:01:14 +03:00
if ( ! Scheduler : : is_active ( ) & & process . elf_loader ( ) & & process . elf_loader ( ) - > has_symbols ( ) )
builder . appendf ( " %p %s \n " , symbol . address , process . elf_loader ( ) - > symbolicate ( symbol . address ) . characters ( ) ) ;
else
builder . appendf ( " %p \n " , symbol . address ) ;
2019-07-25 22:02:19 +03:00
continue ;
}
unsigned offset = symbol . address - symbol . ksym - > address ;
if ( symbol . ksym - > address = = ksym_highest_address & & offset > 4096 )
builder . appendf ( " %p \n " , symbol . address ) ;
else
2019-11-29 16:55:07 +03:00
builder . appendf ( " %p %s +%u \n " , symbol . address , demangle ( symbol . ksym - > name ) . characters ( ) , offset ) ;
2019-07-25 22:02:19 +03:00
}
return builder . to_string ( ) ;
}
2019-09-07 16:50:44 +03:00
2019-12-11 22:36:56 +03:00
Vector < u32 > Thread : : raw_backtrace ( u32 ebp ) const
{
auto & process = const_cast < Process & > ( this - > process ( ) ) ;
ProcessPagingScope paging_scope ( process ) ;
Vector < u32 > backtrace ;
backtrace . append ( ebp ) ;
2019-12-24 03:01:23 +03:00
for ( u32 * stack_ptr = ( u32 * ) ebp ; process . validate_read_from_kernel ( VirtualAddress ( ( u32 ) stack_ptr ) , sizeof ( void * ) * 2 ) ; stack_ptr = ( u32 * ) * stack_ptr ) {
2019-12-11 22:36:56 +03:00
u32 retaddr = stack_ptr [ 1 ] ;
backtrace . append ( retaddr ) ;
}
return backtrace ;
}
2019-09-07 16:50:44 +03:00
void Thread : : make_thread_specific_region ( Badge < Process > )
{
size_t thread_specific_region_alignment = max ( process ( ) . m_master_tls_alignment , alignof ( ThreadSpecificData ) ) ;
size_t thread_specific_region_size = align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ) + sizeof ( ThreadSpecificData ) ;
auto * region = process ( ) . allocate_region ( { } , thread_specific_region_size , " Thread-specific " , PROT_READ | PROT_WRITE , true ) ;
auto * thread_specific_data = ( ThreadSpecificData * ) region - > vaddr ( ) . offset ( align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ) ) . as_ptr ( ) ;
auto * thread_local_storage = ( u8 * ) ( ( u8 * ) thread_specific_data ) - align_up_to ( process ( ) . m_master_tls_size , process ( ) . m_master_tls_alignment ) ;
m_thread_specific_data = VirtualAddress ( ( u32 ) thread_specific_data ) ;
thread_specific_data - > self = thread_specific_data ;
2019-09-07 18:06:25 +03:00
if ( process ( ) . m_master_tls_size )
memcpy ( thread_local_storage , process ( ) . m_master_tls_region - > vaddr ( ) . as_ptr ( ) , process ( ) . m_master_tls_size ) ;
2019-09-07 16:50:44 +03:00
}
2019-10-13 15:36:55 +03:00
const LogStream & operator < < ( const LogStream & stream , const Thread & value )
{
return stream < < value . process ( ) . name ( ) < < " ( " < < value . pid ( ) < < " : " < < value . tid ( ) < < " ) " ;
}
2019-11-06 18:26:51 +03:00
2019-12-22 14:23:44 +03:00
void Thread : : wait_on ( WaitQueue & queue , Thread * beneficiary , const char * reason )
{
bool did_unlock = unlock_process_if_locked ( ) ;
cli ( ) ;
set_state ( State : : Queued ) ;
queue . enqueue ( * current ) ;
// Yield and wait for the queue to wake us up again.
if ( beneficiary )
Scheduler : : donate_to ( beneficiary , reason ) ;
else
Scheduler : : yield ( ) ;
// We've unblocked, relock the process if needed and carry on.
if ( did_unlock )
relock_process ( ) ;
}
void Thread : : wake_from_queue ( )
{
ASSERT ( state ( ) = = State : : Queued ) ;
set_state ( State : : Runnable ) ;
}
2019-12-30 21:23:13 +03:00
Thread * Thread : : from_tid ( int tid )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
Thread * found_thread = nullptr ;
Thread : : for_each ( [ & ] ( auto & thread ) {
if ( thread . tid ( ) = = tid )
found_thread = & thread ;
return IterationDecision : : Continue ;
} ) ;
return found_thread ;
}