2020-01-18 11:38:21 +03:00
/*
* Copyright ( c ) 2018 - 2020 , Andreas Kling < kling @ serenityos . org >
* All rights reserved .
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions are met :
*
* 1. Redistributions of source code must retain the above copyright notice , this
* list of conditions and the following disclaimer .
*
* 2. Redistributions in binary form must reproduce the above copyright notice ,
* this list of conditions and the following disclaimer in the documentation
* and / or other materials provided with the distribution .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS "
* AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR CONSEQUENTIAL
* DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY ,
* OR TORT ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*/
2019-11-29 16:55:07 +03:00
# include <AK/Demangle.h>
2019-07-25 22:02:19 +03:00
# include <AK/StringBuilder.h>
2020-01-05 20:00:15 +03:00
# include <Kernel/Arch/i386/CPU.h>
2019-06-07 10:36:51 +03:00
# include <Kernel/FileSystem/FileDescription.h>
2020-02-16 03:27:42 +03:00
# include <Kernel/KSyms.h>
2019-06-07 12:43:58 +03:00
# include <Kernel/Process.h>
2020-01-19 15:53:22 +03:00
# include <Kernel/Profiling.h>
2019-06-07 12:43:58 +03:00
# include <Kernel/Scheduler.h>
# include <Kernel/Thread.h>
2019-04-03 16:13:07 +03:00
# include <Kernel/VM/MemoryManager.h>
2020-02-16 03:33:41 +03:00
# include <Kernel/VM/PageDirectory.h>
2020-03-01 17:38:09 +03:00
# include <Kernel/VM/ProcessPagingScope.h>
2019-03-24 00:03:17 +03:00
# include <LibC/signal_numbers.h>
2019-11-06 15:42:38 +03:00
# include <LibELF/ELFLoader.h>
2019-03-24 00:03:17 +03:00
2019-05-22 14:23:41 +03:00
//#define SIGNAL_DEBUG
2020-02-01 12:27:25 +03:00
//#define THREAD_DEBUG
2019-05-22 14:23:41 +03:00
2020-02-16 03:27:42 +03:00
namespace Kernel {
2020-02-17 17:04:27 +03:00
Thread * Thread : : current ;
2020-01-01 18:49:08 +03:00
static FPUState s_clean_fpu_state ;
2019-09-07 16:50:44 +03:00
u16 thread_specific_selector ( )
{
static u16 selector ;
if ( ! selector ) {
selector = gdt_alloc_entry ( ) ;
auto & descriptor = get_gdt_entry ( selector ) ;
descriptor . dpl = 3 ;
descriptor . segment_present = 1 ;
descriptor . granularity = 0 ;
descriptor . zero = 0 ;
descriptor . operation_size = 1 ;
descriptor . descriptor_type = 1 ;
descriptor . type = 2 ;
}
return selector ;
}
Descriptor & thread_specific_descriptor ( )
{
return get_gdt_entry ( thread_specific_selector ( ) ) ;
}
2019-05-18 19:31:36 +03:00
HashTable < Thread * > & thread_table ( )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
static HashTable < Thread * > * table ;
if ( ! table )
table = new HashTable < Thread * > ;
return * table ;
}
2019-03-24 00:03:17 +03:00
Thread : : Thread ( Process & process )
: m_process ( process )
2019-12-07 22:45:26 +03:00
, m_name ( process . name ( ) )
2019-03-24 00:03:17 +03:00
{
2019-12-22 13:51:24 +03:00
if ( m_process . m_thread_count = = 0 ) {
// First thread gets TID == PID
m_tid = process . pid ( ) ;
} else {
m_tid = Process : : allocate_pid ( ) ;
}
2019-12-22 13:35:02 +03:00
process . m_thread_count + + ;
2020-02-01 12:27:25 +03:00
# ifdef THREAD_DEBUG
2020-01-21 18:14:39 +03:00
dbg ( ) < < " Created new thread " < < process . name ( ) < < " ( " < < process . pid ( ) < < " : " < < m_tid < < " ) " ;
2020-02-01 12:27:25 +03:00
# endif
2019-03-24 00:03:17 +03:00
set_default_signal_dispositions ( ) ;
2019-03-27 17:27:45 +03:00
m_fpu_state = ( FPUState * ) kmalloc_aligned ( sizeof ( FPUState ) , 16 ) ;
2020-02-18 15:44:27 +03:00
reset_fpu_state ( ) ;
2019-03-24 00:03:17 +03:00
memset ( & m_tss , 0 , sizeof ( m_tss ) ) ;
2020-01-01 19:26:25 +03:00
m_tss . iomapbase = sizeof ( TSS32 ) ;
2019-03-24 00:03:17 +03:00
// Only IF is set when a process boots.
m_tss . eflags = 0x0202 ;
2019-09-07 16:50:44 +03:00
u16 cs , ds , ss , gs ;
2019-03-24 00:03:17 +03:00
if ( m_process . is_ring0 ( ) ) {
cs = 0x08 ;
ds = 0x10 ;
ss = 0x10 ;
2019-09-07 16:50:44 +03:00
gs = 0 ;
2019-03-24 00:03:17 +03:00
} else {
cs = 0x1b ;
ds = 0x23 ;
ss = 0x23 ;
2019-09-07 16:50:44 +03:00
gs = thread_specific_selector ( ) | 3 ;
2019-03-24 00:03:17 +03:00
}
m_tss . ds = ds ;
m_tss . es = ds ;
m_tss . fs = ds ;
2019-09-07 16:50:44 +03:00
m_tss . gs = gs ;
2019-03-24 00:03:17 +03:00
m_tss . ss = ss ;
m_tss . cs = cs ;
m_tss . cr3 = m_process . page_directory ( ) . cr3 ( ) ;
2020-01-27 14:52:10 +03:00
m_kernel_stack_region = MM . allocate_kernel_region ( default_kernel_stack_size , String : : format ( " Kernel Stack (Thread %d) " , m_tid ) , Region : : Access : : Read | Region : : Access : : Write , false , true ) ;
m_kernel_stack_region - > set_stack ( true ) ;
m_kernel_stack_base = m_kernel_stack_region - > vaddr ( ) . get ( ) ;
m_kernel_stack_top = m_kernel_stack_region - > vaddr ( ) . offset ( default_kernel_stack_size ) . get ( ) & 0xfffffff8u ;
2019-03-24 00:03:17 +03:00
if ( m_process . is_ring0 ( ) ) {
2019-09-04 05:31:38 +03:00
m_tss . esp = m_kernel_stack_top ;
2019-03-24 00:03:17 +03:00
} else {
2020-01-27 14:52:10 +03:00
// Ring 3 processes get a separate stack for ring 0.
// The ring 3 stack will be assigned by exec().
2019-03-24 00:03:17 +03:00
m_tss . ss0 = 0x10 ;
2019-09-04 05:31:38 +03:00
m_tss . esp0 = m_kernel_stack_top ;
2019-03-24 00:03:17 +03:00
}
2019-03-24 00:17:38 +03:00
if ( m_process . pid ( ) ! = 0 ) {
InterruptDisabler disabler ;
2019-05-18 19:31:36 +03:00
thread_table ( ) . set ( this ) ;
2019-07-19 18:21:13 +03:00
Scheduler : : init_thread ( * this ) ;
2019-03-24 00:17:38 +03:00
}
2019-03-24 00:03:17 +03:00
}
Thread : : ~ Thread ( )
{
2019-03-27 17:27:45 +03:00
kfree_aligned ( m_fpu_state ) ;
2019-03-24 00:03:17 +03:00
{
InterruptDisabler disabler ;
2019-05-18 19:31:36 +03:00
thread_table ( ) . remove ( this ) ;
2019-03-24 00:03:17 +03:00
}
if ( selector ( ) )
gdt_free_entry ( selector ( ) ) ;
2019-08-01 21:17:12 +03:00
2019-12-22 13:35:02 +03:00
ASSERT ( m_process . m_thread_count ) ;
m_process . m_thread_count - - ;
2019-03-24 00:03:17 +03:00
}
void Thread : : unblock ( )
{
if ( current = = this ) {
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 15:28:28 +03:00
if ( m_should_die )
set_state ( Thread : : Dying ) ;
else
set_state ( Thread : : Running ) ;
2019-03-24 00:03:17 +03:00
return ;
}
ASSERT ( m_state ! = Thread : : Runnable & & m_state ! = Thread : : Running ) ;
Kernel: Allow process with multiple threads to call exec and exit
This allows a process wich has more than 1 thread to call exec, even
from a thread. This kills all the other threads, but it won't wait for
them to finish, just makes sure that they are not in a running/runable
state.
In the case where a thread does exec, the new program PID will be the
thread TID, to keep the PID == TID in the new process.
This introduces a new function inside the Process class,
kill_threads_except_self which is called on exit() too (exit with
multiple threads wasn't properly working either).
Inside the Lock class, there is the need for a new function,
clear_waiters, which removes all the waiters from the
Process::big_lock. This is needed since after a exit/exec, there should
be no other threads waiting for this lock, the threads should be simply
killed. Only queued threads should wait for this lock at this point,
since blocked threads are handled in set_should_die.
2020-02-18 15:28:28 +03:00
if ( m_should_die )
set_state ( Thread : : Dying ) ;
else
set_state ( Thread : : Runnable ) ;
2019-03-24 00:03:17 +03:00
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
void Thread : : set_should_die ( )
{
2019-12-22 13:35:02 +03:00
if ( m_should_die ) {
2020-02-01 12:27:25 +03:00
# ifdef THREAD_DEBUG
dbg ( ) < < * this < < " Should already die " ;
# endif
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
return ;
2019-12-22 13:35:02 +03:00
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
InterruptDisabler disabler ;
// Remember that we should die instead of returning to
// the userspace.
m_should_die = true ;
if ( is_blocked ( ) ) {
ASSERT ( in_kernel ( ) ) ;
ASSERT ( m_blocker ! = nullptr ) ;
2020-01-10 21:15:01 +03:00
// We're blocked in the kernel.
m_blocker - > set_interrupted_by_death ( ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
unblock ( ) ;
} else if ( ! in_kernel ( ) ) {
// We're executing in userspace (and we're clearly
// not the current thread). No need to unwind, so
// set the state to dying right away. This also
// makes sure we won't be scheduled anymore.
set_state ( Thread : : State : : Dying ) ;
}
}
void Thread : : die_if_needed ( )
{
ASSERT ( current = = this ) ;
if ( ! m_should_die )
return ;
2020-01-13 00:53:20 +03:00
unlock_process_if_locked ( ) ;
2019-12-22 14:34:38 +03:00
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
InterruptDisabler disabler ;
set_state ( Thread : : State : : Dying ) ;
2019-12-22 14:34:38 +03:00
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
if ( ! Scheduler : : is_active ( ) )
Scheduler : : pick_next_and_switch_now ( ) ;
}
2019-11-16 14:18:59 +03:00
void Thread : : yield_without_holding_big_lock ( )
2019-03-24 00:03:17 +03:00
{
2020-01-13 00:53:20 +03:00
bool did_unlock = unlock_process_if_locked ( ) ;
2019-03-24 00:03:17 +03:00
Scheduler : : yield ( ) ;
2019-04-01 21:02:05 +03:00
if ( did_unlock )
2020-01-13 00:53:20 +03:00
relock_process ( ) ;
2019-03-24 00:03:17 +03:00
}
2019-12-01 13:57:20 +03:00
2019-12-01 17:54:47 +03:00
bool Thread : : unlock_process_if_locked ( )
2019-12-01 13:57:20 +03:00
{
2020-01-13 00:53:20 +03:00
return process ( ) . big_lock ( ) . force_unlock_if_locked ( ) ;
2019-12-01 17:54:47 +03:00
}
void Thread : : relock_process ( )
{
process ( ) . big_lock ( ) . lock ( ) ;
2019-12-01 13:57:20 +03:00
}
2019-03-24 00:03:17 +03:00
2019-07-18 18:26:11 +03:00
u64 Thread : : sleep ( u32 ticks )
2019-03-24 00:03:17 +03:00
{
2019-03-24 03:52:10 +03:00
ASSERT ( state ( ) = = Thread : : Running ) ;
2019-07-18 18:26:11 +03:00
u64 wakeup_time = g_uptime + ticks ;
2020-02-17 17:04:27 +03:00
auto ret = Thread : : current - > block < Thread : : SleepBlocker > ( wakeup_time ) ;
2019-07-20 12:05:52 +03:00
if ( wakeup_time > g_uptime ) {
2020-01-10 21:15:01 +03:00
ASSERT ( ret ! = Thread : : BlockResult : : WokeNormally ) ;
2019-07-20 12:05:52 +03:00
}
2019-07-18 18:26:11 +03:00
return wakeup_time ;
2019-03-24 00:03:17 +03:00
}
2019-11-02 21:34:06 +03:00
u64 Thread : : sleep_until ( u64 wakeup_time )
{
ASSERT ( state ( ) = = Thread : : Running ) ;
2020-02-17 17:04:27 +03:00
auto ret = Thread : : current - > block < Thread : : SleepBlocker > ( wakeup_time ) ;
2019-11-02 21:34:06 +03:00
if ( wakeup_time > g_uptime )
2020-01-10 21:15:01 +03:00
ASSERT ( ret ! = Thread : : BlockResult : : WokeNormally ) ;
2019-11-02 21:34:06 +03:00
return wakeup_time ;
}
2019-07-19 10:51:48 +03:00
const char * Thread : : state_string ( ) const
2019-03-24 00:03:17 +03:00
{
2019-07-19 10:51:48 +03:00
switch ( state ( ) ) {
2019-06-07 12:43:58 +03:00
case Thread : : Invalid :
return " Invalid " ;
case Thread : : Runnable :
return " Runnable " ;
case Thread : : Running :
return " Running " ;
case Thread : : Dying :
return " Dying " ;
case Thread : : Dead :
return " Dead " ;
case Thread : : Stopped :
return " Stopped " ;
case Thread : : Skip1SchedulerPass :
return " Skip1 " ;
case Thread : : Skip0SchedulerPasses :
return " Skip0 " ;
2019-12-01 17:54:47 +03:00
case Thread : : Queued :
return " Queued " ;
2019-07-19 10:37:34 +03:00
case Thread : : Blocked :
2019-09-09 06:58:42 +03:00
ASSERT ( m_blocker ! = nullptr ) ;
return m_blocker - > state_string ( ) ;
2019-03-24 00:03:17 +03:00
}
2019-09-08 15:29:59 +03:00
kprintf ( " Thread::state_string(): Invalid state: %u \n " , state ( ) ) ;
2019-03-24 00:03:17 +03:00
ASSERT_NOT_REACHED ( ) ;
return nullptr ;
}
void Thread : : finalize ( )
{
2019-08-01 21:01:23 +03:00
ASSERT ( current = = g_finalizer ) ;
2020-02-01 12:27:25 +03:00
# ifdef THREAD_DEBUG
2020-01-21 18:14:39 +03:00
dbg ( ) < < " Finalizing thread " < < * this ;
2020-02-01 12:27:25 +03:00
# endif
2019-03-24 00:03:17 +03:00
set_state ( Thread : : State : : Dead ) ;
2019-11-14 22:58:23 +03:00
if ( m_joiner ) {
ASSERT ( m_joiner - > m_joinee = = this ) ;
2019-11-14 23:04:34 +03:00
static_cast < JoinBlocker * > ( m_joiner - > m_blocker ) - > set_joinee_exit_value ( m_exit_value ) ;
2020-02-27 09:40:40 +03:00
static_cast < JoinBlocker * > ( m_joiner - > m_blocker ) - > set_interrupted_by_death ( ) ;
2019-11-14 22:58:23 +03:00
m_joiner - > m_joinee = nullptr ;
// NOTE: We clear the joiner pointer here as well, to be tidy.
m_joiner = nullptr ;
}
2019-08-06 20:43:07 +03:00
if ( m_dump_backtrace_on_finalization )
dbg ( ) < < backtrace_impl ( ) ;
2019-03-24 00:03:17 +03:00
}
void Thread : : finalize_dying_threads ( )
{
2019-08-01 21:01:23 +03:00
ASSERT ( current = = g_finalizer ) ;
2019-04-20 15:02:19 +03:00
Vector < Thread * , 32 > dying_threads ;
2019-03-24 00:03:17 +03:00
{
InterruptDisabler disabler ;
2019-06-07 12:43:58 +03:00
for_each_in_state ( Thread : : State : : Dying , [ & ] ( Thread & thread ) {
2019-03-24 00:03:17 +03:00
dying_threads . append ( & thread ) ;
2019-07-19 13:16:00 +03:00
return IterationDecision : : Continue ;
2019-03-24 00:03:17 +03:00
} ) ;
}
2019-12-22 13:35:02 +03:00
for ( auto * thread : dying_threads ) {
auto & process = thread - > process ( ) ;
2019-03-24 00:03:17 +03:00
thread - > finalize ( ) ;
2019-12-22 13:35:02 +03:00
delete thread ;
if ( process . m_thread_count = = 0 )
process . finalize ( ) ;
}
2019-03-24 00:03:17 +03:00
}
bool Thread : : tick ( )
{
+ + m_ticks ;
if ( tss ( ) . cs & 3 )
+ + m_process . m_ticks_in_user ;
else
+ + m_process . m_ticks_in_kernel ;
return - - m_ticks_left ;
}
2020-02-01 12:27:25 +03:00
void Thread : : send_signal ( u8 signal , [[maybe_unused]] Process * sender )
2019-03-24 00:03:17 +03:00
{
ASSERT ( signal < 32 ) ;
2019-07-08 19:59:48 +03:00
InterruptDisabler disabler ;
// FIXME: Figure out what to do for masked signals. Should we also ignore them here?
if ( should_ignore_signal ( signal ) ) {
2020-02-01 12:27:25 +03:00
# ifdef SIGNAL_DEBUG
2020-02-29 14:51:44 +03:00
dbg ( ) < < " Signal " < < signal < < " was ignored by " < < process ( ) ;
2020-02-01 12:27:25 +03:00
# endif
2019-07-08 19:59:48 +03:00
return ;
}
2019-03-24 00:03:17 +03:00
2020-02-01 12:27:25 +03:00
# ifdef SIGNAL_DEBUG
2019-03-24 00:03:17 +03:00
if ( sender )
2020-02-29 14:51:44 +03:00
dbg ( ) < < " Signal: " < < * sender < < " sent " < < signal < < " to " < < process ( ) ;
2019-03-24 00:03:17 +03:00
else
2020-02-29 14:51:44 +03:00
dbg ( ) < < " Signal: Kernel sent " < < signal < < " to " < < process ( ) ;
2020-02-01 12:27:25 +03:00
# endif
2019-03-24 00:03:17 +03:00
2019-08-01 12:00:36 +03:00
m_pending_signals | = 1 < < ( signal - 1 ) ;
2019-03-24 00:03:17 +03:00
}
2019-10-07 12:22:50 +03:00
// Certain exceptions, such as SIGSEGV and SIGILL, put a
// thread into a state where the signal handler must be
// invoked immediately, otherwise it will continue to fault.
// This function should be used in an exception handler to
// ensure that when the thread resumes, it's executing in
// the appropriate signal handler.
void Thread : : send_urgent_signal_to_self ( u8 signal )
{
// FIXME: because of a bug in dispatch_signal we can't
// setup a signal while we are the current thread. Because of
// this we use a work-around where we send the signal and then
// block, allowing the scheduler to properly dispatch the signal
// before the thread is next run.
send_signal ( signal , & process ( ) ) ;
( void ) block < SemiPermanentBlocker > ( SemiPermanentBlocker : : Reason : : Signal ) ;
}
2019-03-24 00:03:17 +03:00
bool Thread : : has_unmasked_pending_signals ( ) const
{
return m_pending_signals & ~ m_signal_mask ;
}
ShouldUnblockThread Thread : : dispatch_one_pending_signal ( )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-07-03 22:17:35 +03:00
u32 signal_candidates = m_pending_signals & ~ m_signal_mask ;
2019-03-24 00:03:17 +03:00
ASSERT ( signal_candidates ) ;
2019-08-01 12:00:36 +03:00
u8 signal = 1 ;
2019-03-24 00:03:17 +03:00
for ( ; signal < 32 ; + + signal ) {
2019-08-01 12:00:36 +03:00
if ( signal_candidates & ( 1 < < ( signal - 1 ) ) ) {
2019-03-24 00:03:17 +03:00
break ;
}
}
return dispatch_signal ( signal ) ;
}
2019-06-07 18:13:23 +03:00
enum class DefaultSignalAction {
2019-03-24 00:03:17 +03:00
Terminate ,
Ignore ,
DumpCore ,
Stop ,
Continue ,
} ;
2019-07-03 22:17:35 +03:00
DefaultSignalAction default_signal_action ( u8 signal )
2019-03-24 00:03:17 +03:00
{
ASSERT ( signal & & signal < NSIG ) ;
switch ( signal ) {
case SIGHUP :
case SIGINT :
case SIGKILL :
case SIGPIPE :
case SIGALRM :
case SIGUSR1 :
case SIGUSR2 :
case SIGVTALRM :
case SIGSTKFLT :
case SIGIO :
case SIGPROF :
case SIGTERM :
case SIGPWR :
return DefaultSignalAction : : Terminate ;
case SIGCHLD :
case SIGURG :
case SIGWINCH :
return DefaultSignalAction : : Ignore ;
case SIGQUIT :
case SIGILL :
case SIGTRAP :
case SIGABRT :
case SIGBUS :
case SIGFPE :
case SIGSEGV :
case SIGXCPU :
case SIGXFSZ :
case SIGSYS :
return DefaultSignalAction : : DumpCore ;
case SIGCONT :
return DefaultSignalAction : : Continue ;
case SIGSTOP :
case SIGTSTP :
case SIGTTIN :
case SIGTTOU :
return DefaultSignalAction : : Stop ;
}
ASSERT_NOT_REACHED ( ) ;
}
2019-07-08 19:59:48 +03:00
bool Thread : : should_ignore_signal ( u8 signal ) const
{
ASSERT ( signal < 32 ) ;
auto & action = m_signal_action_data [ signal ] ;
if ( action . handler_or_sigaction . is_null ( ) )
return default_signal_action ( signal ) = = DefaultSignalAction : : Ignore ;
if ( action . handler_or_sigaction . as_ptr ( ) = = SIG_IGN )
return true ;
return false ;
}
2019-10-07 12:22:50 +03:00
bool Thread : : has_signal_handler ( u8 signal ) const
{
ASSERT ( signal < 32 ) ;
auto & action = m_signal_action_data [ signal ] ;
return ! action . handler_or_sigaction . is_null ( ) ;
}
2019-11-04 11:29:47 +03:00
static void push_value_on_user_stack ( u32 * stack , u32 data )
{
* stack - = 4 ;
2020-01-20 15:38:31 +03:00
copy_to_user ( ( u32 * ) * stack , & data ) ;
2019-11-04 11:29:47 +03:00
}
2019-07-03 22:17:35 +03:00
ShouldUnblockThread Thread : : dispatch_signal ( u8 signal )
2019-03-24 00:03:17 +03:00
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-08-01 12:00:36 +03:00
ASSERT ( signal > 0 & & signal < = 32 ) ;
2019-09-04 16:14:54 +03:00
ASSERT ( ! process ( ) . is_ring0 ( ) ) ;
2019-03-24 00:03:17 +03:00
# ifdef SIGNAL_DEBUG
2019-05-22 14:23:41 +03:00
kprintf ( " dispatch_signal %s(%u) <- %u \n " , process ( ) . name ( ) . characters ( ) , pid ( ) , signal ) ;
2019-03-24 00:03:17 +03:00
# endif
auto & action = m_signal_action_data [ signal ] ;
// FIXME: Implement SA_SIGINFO signal handlers.
ASSERT ( ! ( action . flags & SA_SIGINFO ) ) ;
// Mark this signal as handled.
2019-08-01 12:00:36 +03:00
m_pending_signals & = ~ ( 1 < < ( signal - 1 ) ) ;
2019-03-24 00:03:17 +03:00
if ( signal = = SIGSTOP ) {
2020-03-01 17:14:17 +03:00
if ( ! is_stopped ( ) ) {
m_stop_signal = SIGSTOP ;
m_stop_state = m_state ;
set_state ( State : : Stopped ) ;
}
2019-03-24 00:03:17 +03:00
return ShouldUnblockThread : : No ;
}
2020-03-01 17:14:17 +03:00
if ( signal = = SIGCONT & & is_stopped ( ) ) {
ASSERT ( m_stop_state ! = State : : Invalid ) ;
set_state ( m_stop_state ) ;
m_stop_state = State : : Invalid ;
}
2019-03-24 00:03:17 +03:00
2019-06-07 13:56:50 +03:00
auto handler_vaddr = action . handler_or_sigaction ;
if ( handler_vaddr . is_null ( ) ) {
2019-03-24 00:03:17 +03:00
switch ( default_signal_action ( signal ) ) {
case DefaultSignalAction : : Stop :
2020-01-27 22:47:10 +03:00
m_stop_signal = signal ;
2019-03-24 00:03:17 +03:00
set_state ( Stopped ) ;
return ShouldUnblockThread : : No ;
2019-08-06 20:43:07 +03:00
case DefaultSignalAction : : DumpCore :
process ( ) . for_each_thread ( [ ] ( auto & thread ) {
thread . set_dump_backtrace_on_finalization ( ) ;
return IterationDecision : : Continue ;
} ) ;
2019-07-25 22:02:19 +03:00
[[fallthrough]] ;
2019-03-24 00:03:17 +03:00
case DefaultSignalAction : : Terminate :
m_process . terminate_due_to_signal ( signal ) ;
return ShouldUnblockThread : : No ;
case DefaultSignalAction : : Ignore :
2019-07-19 10:34:11 +03:00
ASSERT_NOT_REACHED ( ) ;
2019-03-24 00:03:17 +03:00
case DefaultSignalAction : : Continue :
return ShouldUnblockThread : : Yes ;
}
ASSERT_NOT_REACHED ( ) ;
}
2019-06-07 13:56:50 +03:00
if ( handler_vaddr . as_ptr ( ) = = SIG_IGN ) {
2019-03-24 00:03:17 +03:00
# ifdef SIGNAL_DEBUG
2019-05-22 14:23:41 +03:00
kprintf ( " %s(%u) ignored signal %u \n " , process ( ) . name ( ) . characters ( ) , pid ( ) , signal ) ;
2019-03-24 00:03:17 +03:00
# endif
return ShouldUnblockThread : : Yes ;
}
2019-09-04 16:14:54 +03:00
ProcessPagingScope paging_scope ( m_process ) ;
2019-07-03 22:17:35 +03:00
u32 old_signal_mask = m_signal_mask ;
u32 new_signal_mask = action . mask ;
2019-03-24 00:03:17 +03:00
if ( action . flags & SA_NODEFER )
2019-08-01 12:00:36 +03:00
new_signal_mask & = ~ ( 1 < < ( signal - 1 ) ) ;
2019-03-24 00:03:17 +03:00
else
2019-08-01 12:00:36 +03:00
new_signal_mask | = 1 < < ( signal - 1 ) ;
2019-03-24 00:03:17 +03:00
m_signal_mask | = new_signal_mask ;
2019-11-04 11:29:47 +03:00
auto setup_stack = [ & ] < typename ThreadState > ( ThreadState state , u32 * stack )
{
u32 old_esp = * stack ;
u32 ret_eip = state . eip ;
u32 ret_eflags = state . eflags ;
// Align the stack to 16 bytes.
// Note that we push 56 bytes (4 * 14) on to the stack,
// so we need to account for this here.
u32 stack_alignment = ( * stack - 56 ) % 16 ;
* stack - = stack_alignment ;
push_value_on_user_stack ( stack , ret_eflags ) ;
push_value_on_user_stack ( stack , ret_eip ) ;
push_value_on_user_stack ( stack , state . eax ) ;
push_value_on_user_stack ( stack , state . ecx ) ;
push_value_on_user_stack ( stack , state . edx ) ;
push_value_on_user_stack ( stack , state . ebx ) ;
push_value_on_user_stack ( stack , old_esp ) ;
push_value_on_user_stack ( stack , state . ebp ) ;
push_value_on_user_stack ( stack , state . esi ) ;
push_value_on_user_stack ( stack , state . edi ) ;
// PUSH old_signal_mask
push_value_on_user_stack ( stack , old_signal_mask ) ;
push_value_on_user_stack ( stack , signal ) ;
push_value_on_user_stack ( stack , handler_vaddr . get ( ) ) ;
push_value_on_user_stack ( stack , 0 ) ; //push fake return address
ASSERT ( ( * stack % 16 ) = = 0 ) ;
} ;
// We now place the thread state on the userspace stack.
// Note that when we are in the kernel (ie. blocking) we cannot use the
2020-02-16 02:15:37 +03:00
// tss, as that will contain kernel state; instead, we use a RegisterState.
// Conversely, when the thread isn't blocking the RegisterState may not be
2019-11-04 11:29:47 +03:00
// valid (fork, exec etc) but the tss will, so we use that instead.
2019-09-04 16:14:54 +03:00
if ( ! in_kernel ( ) ) {
2019-11-04 11:29:47 +03:00
u32 * stack = & m_tss . esp ;
setup_stack ( m_tss , stack ) ;
2019-09-04 16:14:54 +03:00
Scheduler : : prepare_to_modify_tss ( * this ) ;
m_tss . cs = 0x1b ;
m_tss . ds = 0x23 ;
m_tss . es = 0x23 ;
m_tss . fs = 0x23 ;
2019-09-07 16:50:44 +03:00
m_tss . gs = thread_specific_selector ( ) | 3 ;
2019-11-04 11:29:47 +03:00
m_tss . eip = g_return_to_ring3_from_signal_trampoline . get ( ) ;
2019-09-04 16:14:54 +03:00
// FIXME: This state is such a hack. It avoids trouble if 'current' is the process receiving a signal.
set_state ( Skip1SchedulerPass ) ;
2019-11-04 11:29:47 +03:00
} else {
2019-12-15 19:58:53 +03:00
auto & regs = get_register_dump_from_stack ( ) ;
2020-01-09 20:02:01 +03:00
u32 * stack = & regs . userspace_esp ;
2019-11-04 11:29:47 +03:00
setup_stack ( regs , stack ) ;
regs . eip = g_return_to_ring3_from_signal_trampoline . get ( ) ;
2019-09-04 16:14:54 +03:00
}
2019-03-24 00:03:17 +03:00
# ifdef SIGNAL_DEBUG
2019-09-08 15:29:59 +03:00
kprintf ( " signal: Okay, %s(%u) {%s} has been primed with signal handler %w:%x \n " , process ( ) . name ( ) . characters ( ) , pid ( ) , state_string ( ) , m_tss . cs , m_tss . eip ) ;
2019-03-24 00:03:17 +03:00
# endif
return ShouldUnblockThread : : Yes ;
}
void Thread : : set_default_signal_dispositions ( )
{
// FIXME: Set up all the right default actions. See signal(7).
memset ( & m_signal_action_data , 0 , sizeof ( m_signal_action_data ) ) ;
2020-01-20 15:06:41 +03:00
m_signal_action_data [ SIGCHLD ] . handler_or_sigaction = VirtualAddress ( SIG_IGN ) ;
m_signal_action_data [ SIGWINCH ] . handler_or_sigaction = VirtualAddress ( SIG_IGN ) ;
2019-03-24 00:03:17 +03:00
}
2020-01-20 15:38:31 +03:00
void Thread : : push_value_on_stack ( uintptr_t value )
2019-03-24 00:03:17 +03:00
{
m_tss . esp - = 4 ;
2020-01-20 15:38:31 +03:00
uintptr_t * stack_ptr = ( uintptr_t * ) m_tss . esp ;
copy_to_user ( stack_ptr , & value ) ;
2019-03-24 00:03:17 +03:00
}
2020-02-16 02:15:37 +03:00
RegisterState & Thread : : get_register_dump_from_stack ( )
2019-11-02 12:11:41 +03:00
{
// The userspace registers should be stored at the top of the stack
// We have to subtract 2 because the processor decrements the kernel
// stack before pushing the args.
2020-02-16 02:15:37 +03:00
return * ( RegisterState * ) ( kernel_stack_top ( ) - sizeof ( RegisterState ) ) ;
2019-11-02 12:11:41 +03:00
}
2019-12-19 01:03:23 +03:00
u32 Thread : : make_userspace_stack_for_main_thread ( Vector < String > arguments , Vector < String > environment )
2019-03-24 00:03:17 +03:00
{
2019-10-31 15:57:07 +03:00
auto * region = m_process . allocate_region ( VirtualAddress ( ) , default_userspace_stack_size , " Stack (Main thread) " , PROT_READ | PROT_WRITE , false ) ;
2019-03-24 00:03:17 +03:00
ASSERT ( region ) ;
2019-11-17 14:11:43 +03:00
region - > set_stack ( true ) ;
2019-03-24 00:03:17 +03:00
2019-12-19 01:03:23 +03:00
u32 new_esp = region - > vaddr ( ) . offset ( default_userspace_stack_size ) . get ( ) ;
// FIXME: This is weird, we put the argument contents at the base of the stack,
// and the argument pointers at the top? Why?
2019-06-07 13:56:50 +03:00
char * stack_base = ( char * ) region - > vaddr ( ) . get ( ) ;
2019-03-24 00:03:17 +03:00
int argc = arguments . size ( ) ;
char * * argv = ( char * * ) stack_base ;
char * * env = argv + arguments . size ( ) + 1 ;
char * bufptr = stack_base + ( sizeof ( char * ) * ( arguments . size ( ) + 1 ) ) + ( sizeof ( char * ) * ( environment . size ( ) + 1 ) ) ;
2020-01-05 20:00:15 +03:00
SmapDisabler disabler ;
2020-02-25 16:49:47 +03:00
for ( size_t i = 0 ; i < arguments . size ( ) ; + + i ) {
2019-03-24 00:03:17 +03:00
argv [ i ] = bufptr ;
memcpy ( bufptr , arguments [ i ] . characters ( ) , arguments [ i ] . length ( ) ) ;
bufptr + = arguments [ i ] . length ( ) ;
* ( bufptr + + ) = ' \0 ' ;
}
argv [ arguments . size ( ) ] = nullptr ;
2020-02-25 16:49:47 +03:00
for ( size_t i = 0 ; i < environment . size ( ) ; + + i ) {
2019-03-24 00:03:17 +03:00
env [ i ] = bufptr ;
memcpy ( bufptr , environment [ i ] . characters ( ) , environment [ i ] . length ( ) ) ;
bufptr + = environment [ i ] . length ( ) ;
* ( bufptr + + ) = ' \0 ' ;
}
env [ environment . size ( ) ] = nullptr ;
2020-01-01 18:49:08 +03:00
auto push_on_new_stack = [ & new_esp ] ( u32 value ) {
2019-12-19 01:03:23 +03:00
new_esp - = 4 ;
u32 * stack_ptr = ( u32 * ) new_esp ;
* stack_ptr = value ;
} ;
2019-03-24 00:03:17 +03:00
// NOTE: The stack needs to be 16-byte aligned.
2020-01-20 15:06:14 +03:00
push_on_new_stack ( ( uintptr_t ) env ) ;
push_on_new_stack ( ( uintptr_t ) argv ) ;
push_on_new_stack ( ( uintptr_t ) argc ) ;
2019-12-19 01:03:23 +03:00
push_on_new_stack ( 0 ) ;
return new_esp ;
2019-03-24 00:03:17 +03:00
}
Thread * Thread : : clone ( Process & process )
{
auto * clone = new Thread ( process ) ;
memcpy ( clone - > m_signal_action_data , m_signal_action_data , sizeof ( m_signal_action_data ) ) ;
clone - > m_signal_mask = m_signal_mask ;
2019-03-27 17:27:45 +03:00
memcpy ( clone - > m_fpu_state , m_fpu_state , sizeof ( FPUState ) ) ;
2019-09-07 16:50:44 +03:00
clone - > m_thread_specific_data = m_thread_specific_data ;
2019-03-24 00:03:17 +03:00
return clone ;
}
void Thread : : initialize ( )
{
Scheduler : : initialize ( ) ;
2020-01-01 18:49:08 +03:00
asm volatile ( " fninit " ) ;
asm volatile ( " fxsave %0 "
: " =m " ( s_clean_fpu_state ) ) ;
2019-03-24 00:03:17 +03:00
}
2019-03-24 01:50:34 +03:00
Vector < Thread * > Thread : : all_threads ( )
{
Vector < Thread * > threads ;
InterruptDisabler disabler ;
2019-05-18 19:31:36 +03:00
threads . ensure_capacity ( thread_table ( ) . size ( ) ) ;
for ( auto * thread : thread_table ( ) )
threads . unchecked_append ( thread ) ;
2019-03-24 01:50:34 +03:00
return threads ;
}
2019-04-17 13:41:51 +03:00
bool Thread : : is_thread ( void * ptr )
{
ASSERT_INTERRUPTS_DISABLED ( ) ;
2019-05-18 19:31:36 +03:00
return thread_table ( ) . contains ( ( Thread * ) ptr ) ;
}
2019-05-18 21:07:00 +03:00
void Thread : : set_state ( State new_state )
{
2019-06-30 12:40:23 +03:00
InterruptDisabler disabler ;
2019-12-01 17:54:47 +03:00
if ( new_state = = m_state )
return ;
2019-07-19 18:58:45 +03:00
if ( new_state = = Blocked ) {
2019-07-21 13:14:58 +03:00
// we should always have a Blocker while blocked
2019-09-09 06:58:42 +03:00
ASSERT ( m_blocker ! = nullptr ) ;
2019-07-19 18:58:45 +03:00
}
2019-05-18 21:07:00 +03:00
m_state = new_state ;
2019-07-19 14:04:42 +03:00
if ( m_process . pid ( ) ! = 0 ) {
2019-07-19 18:21:13 +03:00
Scheduler : : update_state_for_thread ( * this ) ;
2019-07-19 14:04:42 +03:00
}
2019-12-01 21:17:17 +03:00
2020-02-01 12:27:25 +03:00
if ( new_state = = Dying ) {
g_finalizer_has_work = true ;
2019-12-01 21:17:17 +03:00
g_finalizer_wait_queue - > wake_all ( ) ;
2020-02-01 12:27:25 +03:00
}
2019-04-17 13:41:51 +03:00
}
2019-07-25 22:02:19 +03:00
String Thread : : backtrace ( ProcessInspectionHandle & ) const
2019-08-06 20:43:07 +03:00
{
return backtrace_impl ( ) ;
}
2020-01-19 12:10:46 +03:00
struct RecognizedSymbol {
u32 address ;
const KSym * ksym ;
} ;
2020-03-02 12:40:40 +03:00
static bool symbolicate ( const RecognizedSymbol & symbol , const Process & process , StringBuilder & builder , Process : : ELFBundle * elf_bundle )
2020-01-19 12:10:46 +03:00
{
if ( ! symbol . address )
return false ;
bool mask_kernel_addresses = ! process . is_superuser ( ) ;
if ( ! symbol . ksym ) {
if ( ! is_user_address ( VirtualAddress ( symbol . address ) ) ) {
builder . append ( " 0xdeadc0de \n " ) ;
} else {
2020-03-02 12:40:40 +03:00
if ( ! Scheduler : : is_active ( ) & & elf_bundle & & elf_bundle - > elf_loader - > has_symbols ( ) )
builder . appendf ( " %p %s \n " , symbol . address , elf_bundle - > elf_loader - > symbolicate ( symbol . address ) . characters ( ) ) ;
2020-01-19 12:10:46 +03:00
else
builder . appendf ( " %p \n " , symbol . address ) ;
}
return true ;
}
unsigned offset = symbol . address - symbol . ksym - > address ;
if ( symbol . ksym - > address = = ksym_highest_address & & offset > 4096 ) {
builder . appendf ( " %p \n " , mask_kernel_addresses ? 0xdeadc0de : symbol . address ) ;
} else {
builder . appendf ( " %p %s +%u \n " , mask_kernel_addresses ? 0xdeadc0de : symbol . address , demangle ( symbol . ksym - > name ) . characters ( ) , offset ) ;
}
return true ;
}
2019-08-06 20:43:07 +03:00
String Thread : : backtrace_impl ( ) const
2019-07-25 22:02:19 +03:00
{
2020-01-19 12:10:46 +03:00
Vector < RecognizedSymbol , 128 > recognized_symbols ;
2020-01-12 12:19:37 +03:00
u32 start_frame ;
if ( current = = this ) {
asm volatile ( " movl %%ebp, %%eax "
: " =a " ( start_frame ) ) ;
} else {
start_frame = frame_ptr ( ) ;
2020-01-19 12:10:46 +03:00
recognized_symbols . append ( { tss ( ) . eip , ksymbolicate ( tss ( ) . eip ) } ) ;
2020-01-12 12:19:37 +03:00
}
2019-07-25 22:02:19 +03:00
auto & process = const_cast < Process & > ( this - > process ( ) ) ;
2020-03-02 12:40:40 +03:00
auto elf_bundle = process . elf_bundle ( ) ;
2019-07-25 22:02:19 +03:00
ProcessPagingScope paging_scope ( process ) ;
2020-01-19 12:10:46 +03:00
2020-01-20 15:06:14 +03:00
uintptr_t stack_ptr = start_frame ;
2020-01-12 12:47:01 +03:00
for ( ; ; ) {
2020-01-20 15:06:41 +03:00
if ( ! process . validate_read_from_kernel ( VirtualAddress ( stack_ptr ) , sizeof ( void * ) * 2 ) )
2020-01-12 12:19:37 +03:00
break ;
2020-01-20 15:06:14 +03:00
uintptr_t retaddr ;
2020-01-19 12:10:46 +03:00
2020-01-20 15:06:14 +03:00
if ( is_user_range ( VirtualAddress ( stack_ptr ) , sizeof ( uintptr_t ) * 2 ) ) {
copy_from_user ( & retaddr , & ( ( uintptr_t * ) stack_ptr ) [ 1 ] ) ;
2020-01-19 12:10:46 +03:00
recognized_symbols . append ( { retaddr , ksymbolicate ( retaddr ) } ) ;
2020-01-20 15:06:14 +03:00
copy_from_user ( & stack_ptr , ( uintptr_t * ) stack_ptr ) ;
2020-01-19 12:10:46 +03:00
} else {
2020-01-20 15:06:14 +03:00
memcpy ( & retaddr , & ( ( uintptr_t * ) stack_ptr ) [ 1 ] , sizeof ( uintptr_t ) ) ;
2020-01-19 12:10:46 +03:00
recognized_symbols . append ( { retaddr , ksymbolicate ( retaddr ) } ) ;
2020-01-20 15:06:14 +03:00
memcpy ( & stack_ptr , ( uintptr_t * ) stack_ptr , sizeof ( uintptr_t ) ) ;
2020-01-19 12:10:46 +03:00
}
2019-07-25 22:02:19 +03:00
}
2020-01-19 12:10:46 +03:00
StringBuilder builder ;
2019-07-25 22:02:19 +03:00
for ( auto & symbol : recognized_symbols ) {
2020-03-02 12:40:40 +03:00
if ( ! symbolicate ( symbol , process , builder , elf_bundle . ptr ( ) ) )
2019-07-25 22:02:19 +03:00
break ;
}
return builder . to_string ( ) ;
}
2019-09-07 16:50:44 +03:00
2020-01-20 15:06:14 +03:00
Vector < uintptr_t > Thread : : raw_backtrace ( uintptr_t ebp ) const
2019-12-11 22:36:56 +03:00
{
2020-02-29 23:36:00 +03:00
InterruptDisabler disabler ;
2019-12-11 22:36:56 +03:00
auto & process = const_cast < Process & > ( this - > process ( ) ) ;
ProcessPagingScope paging_scope ( process ) ;
2020-01-20 15:06:14 +03:00
Vector < uintptr_t , Profiling : : max_stack_frame_count > backtrace ;
2019-12-11 22:36:56 +03:00
backtrace . append ( ebp ) ;
2020-02-22 11:56:22 +03:00
for ( uintptr_t * stack_ptr = ( uintptr_t * ) ebp ; process . validate_read_from_kernel ( VirtualAddress ( stack_ptr ) , sizeof ( uintptr_t ) * 2 ) & & MM . can_read_without_faulting ( process , VirtualAddress ( stack_ptr ) , sizeof ( uintptr_t ) * 2 ) ; stack_ptr = ( uintptr_t * ) * stack_ptr ) {
2020-01-20 15:06:14 +03:00
uintptr_t retaddr = stack_ptr [ 1 ] ;
2019-12-11 22:36:56 +03:00
backtrace . append ( retaddr ) ;
2020-01-19 15:53:22 +03:00
if ( backtrace . size ( ) = = Profiling : : max_stack_frame_count )
break ;
2019-12-11 22:36:56 +03:00
}
return backtrace ;
}
2019-09-07 16:50:44 +03:00
void Thread : : make_thread_specific_region ( Badge < Process > )
{
size_t thread_specific_region_alignment = max ( process ( ) . m_master_tls_alignment , alignof ( ThreadSpecificData ) ) ;
size_t thread_specific_region_size = align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ) + sizeof ( ThreadSpecificData ) ;
auto * region = process ( ) . allocate_region ( { } , thread_specific_region_size , " Thread-specific " , PROT_READ | PROT_WRITE , true ) ;
2020-01-05 20:00:15 +03:00
SmapDisabler disabler ;
2019-09-07 16:50:44 +03:00
auto * thread_specific_data = ( ThreadSpecificData * ) region - > vaddr ( ) . offset ( align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ) ) . as_ptr ( ) ;
auto * thread_local_storage = ( u8 * ) ( ( u8 * ) thread_specific_data ) - align_up_to ( process ( ) . m_master_tls_size , process ( ) . m_master_tls_alignment ) ;
2020-01-20 15:06:41 +03:00
m_thread_specific_data = VirtualAddress ( thread_specific_data ) ;
2019-09-07 16:50:44 +03:00
thread_specific_data - > self = thread_specific_data ;
2019-09-07 18:06:25 +03:00
if ( process ( ) . m_master_tls_size )
memcpy ( thread_local_storage , process ( ) . m_master_tls_region - > vaddr ( ) . as_ptr ( ) , process ( ) . m_master_tls_size ) ;
2019-09-07 16:50:44 +03:00
}
2019-10-13 15:36:55 +03:00
const LogStream & operator < < ( const LogStream & stream , const Thread & value )
{
return stream < < value . process ( ) . name ( ) < < " ( " < < value . pid ( ) < < " : " < < value . tid ( ) < < " ) " ;
}
2019-11-06 18:26:51 +03:00
2020-01-12 20:46:41 +03:00
void Thread : : wait_on ( WaitQueue & queue , Atomic < bool > * lock , Thread * beneficiary , const char * reason )
2019-12-22 14:23:44 +03:00
{
cli ( ) ;
2020-01-13 00:53:20 +03:00
bool did_unlock = unlock_process_if_locked ( ) ;
2020-01-12 20:46:41 +03:00
if ( lock )
* lock = false ;
2019-12-22 14:23:44 +03:00
set_state ( State : : Queued ) ;
queue . enqueue ( * current ) ;
// Yield and wait for the queue to wake us up again.
if ( beneficiary )
Scheduler : : donate_to ( beneficiary , reason ) ;
else
Scheduler : : yield ( ) ;
// We've unblocked, relock the process if needed and carry on.
if ( did_unlock )
relock_process ( ) ;
}
void Thread : : wake_from_queue ( )
{
ASSERT ( state ( ) = = State : : Queued ) ;
set_state ( State : : Runnable ) ;
}
2019-12-30 21:23:13 +03:00
Thread * Thread : : from_tid ( int tid )
{
2020-01-04 20:56:04 +03:00
InterruptDisabler disabler ;
2019-12-30 21:23:13 +03:00
Thread * found_thread = nullptr ;
Thread : : for_each ( [ & ] ( auto & thread ) {
2020-01-04 20:56:04 +03:00
if ( thread . tid ( ) = = tid ) {
2019-12-30 21:23:13 +03:00
found_thread = & thread ;
2020-01-04 20:56:04 +03:00
return IterationDecision : : Break ;
}
2019-12-30 21:23:13 +03:00
return IterationDecision : : Continue ;
} ) ;
return found_thread ;
}
2020-02-16 03:27:42 +03:00
2020-02-18 15:44:27 +03:00
void Thread : : reset_fpu_state ( )
{
memcpy ( m_fpu_state , & s_clean_fpu_state , sizeof ( FPUState ) ) ;
}
2020-02-16 03:27:42 +03:00
}