2020-01-18 11:38:21 +03:00
/*
2021-03-10 00:35:13 +03:00
* Copyright ( c ) 2018 - 2021 , Andreas Kling < kling @ serenityos . org >
2020-01-18 11:38:21 +03:00
*
2021-04-22 11:24:48 +03:00
* SPDX - License - Identifier : BSD - 2 - Clause
2020-01-18 11:38:21 +03:00
*/
2021-01-01 08:45:16 +03:00
# include <AK/ScopeGuard.h>
2021-08-15 13:38:02 +03:00
# include <AK/Singleton.h>
2019-07-25 22:02:19 +03:00
# include <AK/StringBuilder.h>
2022-01-29 15:08:37 +03:00
# include <AK/TemporaryChange.h>
2020-11-15 21:58:19 +03:00
# include <AK/Time.h>
2023-01-07 20:54:01 +03:00
# include <Kernel/API/POSIX/signal_numbers.h>
2023-01-20 00:36:48 +03:00
# include <Kernel/Arch/PageDirectory.h>
2021-10-15 00:53:48 +03:00
# include <Kernel/Arch/SmapDisabler.h>
2022-10-16 17:57:21 +03:00
# include <Kernel/Arch/TrapFrame.h>
2021-01-25 18:07:10 +03:00
# include <Kernel/Debug.h>
2021-06-07 02:15:07 +03:00
# include <Kernel/Devices/KCOVDevice.h>
2021-09-07 14:39:11 +03:00
# include <Kernel/FileSystem/OpenFileDescription.h>
2022-10-05 20:27:36 +03:00
# include <Kernel/InterruptDisabler.h>
2020-02-16 03:27:42 +03:00
# include <Kernel/KSyms.h>
2021-08-06 11:45:34 +03:00
# include <Kernel/Memory/MemoryManager.h>
2021-09-06 18:22:36 +03:00
# include <Kernel/Memory/ScopedAddressSpaceSwitcher.h>
2021-03-10 00:35:13 +03:00
# include <Kernel/Panic.h>
2021-01-11 11:52:18 +03:00
# include <Kernel/PerformanceEventBuffer.h>
2019-06-07 12:43:58 +03:00
# include <Kernel/Process.h>
# include <Kernel/Scheduler.h>
2021-06-22 18:40:16 +03:00
# include <Kernel/Sections.h>
2019-06-07 12:43:58 +03:00
# include <Kernel/Thread.h>
2020-03-28 11:47:16 +03:00
# include <Kernel/ThreadTracer.h>
2020-04-26 12:32:37 +03:00
# include <Kernel/TimerQueue.h>
2022-01-15 22:19:41 +03:00
# include <Kernel/kstdio.h>
2019-03-24 00:03:17 +03:00
2020-02-16 03:27:42 +03:00
namespace Kernel {
2022-11-09 13:39:58 +03:00
static Singleton < SpinlockProtected < Thread : : GlobalList , LockRank : : None > > s_list ;
2021-01-28 08:58:24 +03:00
2022-11-09 13:39:58 +03:00
SpinlockProtected < Thread : : GlobalList , LockRank : : None > & Thread : : all_instances ( )
2021-01-28 08:58:24 +03:00
{
2021-08-15 13:38:02 +03:00
return * s_list ;
}
2023-04-02 21:40:47 +03:00
ErrorOr < NonnullRefPtr < Thread > > Thread : : create ( NonnullRefPtr < Process > process )
2021-02-07 20:13:51 +03:00
{
2021-09-06 02:36:14 +03:00
auto kernel_stack_region = TRY ( MM . allocate_kernel_region ( default_kernel_stack_size , { } , Memory : : Region : : Access : : ReadWrite , AllocationStrategy : : AllocateNow ) ) ;
2021-02-07 22:13:51 +03:00
kernel_stack_region - > set_stack ( true ) ;
2021-05-11 13:53:38 +03:00
2023-04-03 14:09:38 +03:00
auto block_timer = TRY ( try_make_ref_counted < Timer > ( ) ) ;
2021-05-20 01:41:51 +03:00
2023-02-04 16:01:46 +03:00
auto name = TRY ( process - > name ( ) . with ( [ ] ( auto & name ) { return name - > try_clone ( ) ; } ) ) ;
2023-04-02 21:40:47 +03:00
return adopt_nonnull_ref_or_enomem ( new ( nothrow ) Thread ( move ( process ) , move ( kernel_stack_region ) , move ( block_timer ) , move ( name ) ) ) ;
2021-02-07 20:13:51 +03:00
}
2023-04-03 14:09:38 +03:00
Thread : : Thread ( NonnullRefPtr < Process > process , NonnullOwnPtr < Memory : : Region > kernel_stack_region , NonnullRefPtr < Timer > block_timer , NonnullOwnPtr < KString > name )
2020-08-02 05:04:56 +03:00
: m_process ( move ( process ) )
2021-02-07 20:13:51 +03:00
, m_kernel_stack_region ( move ( kernel_stack_region ) )
2021-08-05 23:22:26 +03:00
, m_name ( move ( name ) )
2021-08-22 11:44:43 +03:00
, m_block_timer ( move ( block_timer ) )
2019-05-18 19:31:36 +03:00
{
2021-01-23 09:24:33 +03:00
bool is_first_thread = m_process - > add_thread ( * this ) ;
2021-01-01 08:45:16 +03:00
if ( is_first_thread ) {
2019-12-22 13:51:24 +03:00
// First thread gets TID == PID
2020-08-08 18:32:34 +03:00
m_tid = m_process - > pid ( ) . value ( ) ;
2019-12-22 13:51:24 +03:00
} else {
2020-08-08 18:32:34 +03:00
m_tid = Process : : allocate_pid ( ) . value ( ) ;
2019-12-22 13:51:24 +03:00
}
2021-02-07 20:13:51 +03:00
2021-12-28 11:38:41 +03:00
// FIXME: Handle KString allocation failure.
m_kernel_stack_region - > set_name ( MUST ( KString : : formatted ( " Kernel stack (thread {}) " , m_tid . value ( ) ) ) ) ;
2021-02-07 20:13:51 +03:00
2021-08-16 22:52:42 +03:00
Thread : : all_instances ( ) . with ( [ & ] ( auto & list ) {
2021-08-15 13:38:02 +03:00
list . append ( * this ) ;
} ) ;
2023-02-04 16:01:46 +03:00
if constexpr ( THREAD_DEBUG ) {
m_process - > name ( ) . with ( [ & ] ( auto & process_name ) {
dbgln ( " Created new thread {}({}:{}) " , process_name - > view ( ) , m_process - > pid ( ) . value ( ) , m_tid . value ( ) ) ;
} ) ;
}
2021-02-21 13:03:49 +03:00
2020-02-18 15:44:27 +03:00
reset_fpu_state ( ) ;
2019-03-24 00:03:17 +03:00
2020-01-27 14:52:10 +03:00
m_kernel_stack_base = m_kernel_stack_region - > vaddr ( ) . get ( ) ;
2021-07-17 03:09:45 +03:00
m_kernel_stack_top = m_kernel_stack_region - > vaddr ( ) . offset ( default_kernel_stack_size ) . get ( ) & ~ ( FlatPtr ) 0x7u ;
2020-01-27 14:52:10 +03:00
2022-12-27 16:04:07 +03:00
m_process - > address_space ( ) . with ( [ & ] ( auto & space ) {
m_regs . set_initial_state ( m_process - > is_kernel_process ( ) , * space , m_kernel_stack_top ) ;
} ) ;
2019-03-24 00:03:17 +03:00
2020-09-27 17:53:35 +03:00
// We need to add another reference if we could successfully create
// all the resources needed for this thread. The reason for this is that
// we don't want to delete this thread after dropping the reference,
// it may still be running or scheduled to be run.
// The finalizer is responsible for dropping this reference once this
// thread is ready to be cleaned up.
ref ( ) ;
2019-03-24 00:03:17 +03:00
}
Thread : : ~ Thread ( )
{
2022-08-19 14:59:15 +03:00
VERIFY ( ! m_process_thread_list_node . is_in_list ( ) ) ;
// We shouldn't be queued
VERIFY ( m_runnable_priority < 0 ) ;
2020-11-30 02:05:27 +03:00
}
2020-11-12 02:05:00 +03:00
2022-01-29 14:46:04 +03:00
Thread : : BlockResult Thread : : block_impl ( BlockTimeout const & timeout , Blocker & blocker )
{
VERIFY ( ! Processor : : current_in_irq ( ) ) ;
VERIFY ( this = = Thread : : current ( ) ) ;
ScopedCritical critical ;
2022-08-17 21:14:49 +03:00
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
2022-01-29 14:46:04 +03:00
SpinlockLocker block_lock ( m_block_lock ) ;
// We need to hold m_block_lock so that nobody can unblock a blocker as soon
// as it is constructed and registered elsewhere
ScopeGuard finalize_guard ( [ & ] {
blocker . finalize ( ) ;
} ) ;
if ( ! blocker . setup_blocker ( ) ) {
blocker . will_unblock_immediately_without_blocking ( Blocker : : UnblockImmediatelyReason : : UnblockConditionAlreadyMet ) ;
return BlockResult : : NotBlocked ;
}
// Relaxed semantics are fine for timeout_unblocked because we
// synchronize on the spin locks already.
Atomic < bool , AK : : MemoryOrder : : memory_order_relaxed > timeout_unblocked ( false ) ;
bool timer_was_added = false ;
switch ( state ( ) ) {
2022-01-30 13:38:50 +03:00
case Thread : : State : : Stopped :
2022-01-29 14:46:04 +03:00
// It's possible that we were requested to be stopped!
break ;
2022-01-30 13:38:50 +03:00
case Thread : : State : : Running :
2022-01-29 14:46:04 +03:00
VERIFY ( m_blocker = = nullptr ) ;
break ;
default :
VERIFY_NOT_REACHED ( ) ;
}
m_blocker = & blocker ;
if ( auto & block_timeout = blocker . override_timeout ( timeout ) ; ! block_timeout . is_infinite ( ) ) {
// Process::kill_all_threads may be called at any time, which will mark all
// threads to die. In that case
timer_was_added = TimerQueue : : the ( ) . add_timer_without_id ( * m_block_timer , block_timeout . clock_id ( ) , block_timeout . absolute_time ( ) , [ & ] ( ) {
VERIFY ( ! Processor : : current_in_irq ( ) ) ;
VERIFY ( ! g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
VERIFY ( ! m_block_lock . is_locked_by_current_processor ( ) ) ;
// NOTE: this may execute on the same or any other processor!
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
SpinlockLocker block_lock ( m_block_lock ) ;
if ( m_blocker & & ! timeout_unblocked . exchange ( true ) )
unblock ( ) ;
} ) ;
if ( ! timer_was_added ) {
// Timeout is already in the past
blocker . will_unblock_immediately_without_blocking ( Blocker : : UnblockImmediatelyReason : : TimeoutInThePast ) ;
m_blocker = nullptr ;
return BlockResult : : InterruptedByTimeout ;
}
}
blocker . begin_blocking ( { } ) ;
2022-01-30 13:38:50 +03:00
set_state ( Thread : : State : : Blocked ) ;
2022-01-29 14:46:04 +03:00
block_lock . unlock ( ) ;
2022-08-17 21:14:49 +03:00
scheduler_lock . unlock ( ) ;
2022-01-29 14:46:04 +03:00
dbgln_if ( THREAD_DEBUG , " Thread {} blocking on {} ({}) --> " , * this , & blocker , blocker . state_string ( ) ) ;
bool did_timeout = false ;
u32 lock_count_to_restore = 0 ;
auto previous_locked = unlock_process_if_locked ( lock_count_to_restore ) ;
for ( ; ; ) {
// Yield to the scheduler, and wait for us to resume unblocked.
VERIFY ( ! g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
VERIFY ( Processor : : in_critical ( ) ) ;
yield_without_releasing_big_lock ( ) ;
VERIFY ( Processor : : in_critical ( ) ) ;
SpinlockLocker block_lock2 ( m_block_lock ) ;
if ( m_blocker & & ! m_blocker - > can_be_interrupted ( ) & & ! m_should_die ) {
block_lock2 . unlock ( ) ;
dbgln ( " Thread should not be unblocking, current state: {} " , state_string ( ) ) ;
2022-01-30 13:38:50 +03:00
set_state ( Thread : : State : : Blocked ) ;
2022-01-29 14:46:04 +03:00
continue ;
}
// Prevent the timeout from unblocking this thread if it happens to
// be in the process of firing already
did_timeout | = timeout_unblocked . exchange ( true ) ;
if ( m_blocker ) {
// Remove ourselves...
VERIFY ( m_blocker = = & blocker ) ;
m_blocker = nullptr ;
}
dbgln_if ( THREAD_DEBUG , " <-- Thread {} unblocked from {} ({}) " , * this , & blocker , blocker . state_string ( ) ) ;
break ;
}
// Notify the blocker that we are no longer blocking. It may need
// to clean up now while we're still holding m_lock
auto result = blocker . end_blocking ( { } , did_timeout ) ; // calls was_unblocked internally
if ( timer_was_added & & ! did_timeout ) {
// Cancel the timer while not holding any locks. This allows
// the timer function to complete before we remove it
// (e.g. if it's on another processor)
TimerQueue : : the ( ) . cancel_timer ( * m_block_timer ) ;
}
if ( previous_locked ! = LockMode : : Unlocked ) {
2022-01-29 15:57:39 +03:00
// NOTE: This may trigger another call to Thread::block().
2022-01-29 14:46:04 +03:00
relock_process ( previous_locked , lock_count_to_restore ) ;
}
return result ;
}
2022-11-09 13:39:58 +03:00
void Thread : : block ( Kernel : : Mutex & lock , SpinlockLocker < Spinlock < LockRank : : None > > & lock_lock , u32 lock_count )
2021-07-10 19:23:16 +03:00
{
2021-08-22 13:21:31 +03:00
VERIFY ( ! Processor : : current_in_irq ( ) ) ;
2021-07-10 19:23:16 +03:00
VERIFY ( this = = Thread : : current ( ) ) ;
ScopedCritical critical ;
2021-08-22 02:49:22 +03:00
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
2022-01-30 16:46:07 +03:00
SpinlockLocker block_lock ( m_block_lock ) ;
2021-07-10 19:23:16 +03:00
switch ( state ( ) ) {
2022-01-30 13:38:50 +03:00
case Thread : : State : : Stopped :
2021-07-10 19:23:16 +03:00
// It's possible that we were requested to be stopped!
break ;
2022-01-30 13:38:50 +03:00
case Thread : : State : : Running :
2021-07-10 19:23:16 +03:00
VERIFY ( m_blocker = = nullptr ) ;
break ;
default :
2022-01-26 14:49:12 +03:00
dbgln ( " Error: Attempting to block with invalid thread state - {} " , state_string ( ) ) ;
2021-07-10 19:23:16 +03:00
VERIFY_NOT_REACHED ( ) ;
}
2021-07-17 00:48:22 +03:00
// If we're blocking on the big-lock we may actually be in the process
2022-01-30 13:43:03 +03:00
// of unblocking from another lock. If that's the case m_blocking_mutex
2021-07-17 00:48:22 +03:00
// is already set
auto & big_lock = process ( ) . big_lock ( ) ;
2022-01-30 13:43:03 +03:00
VERIFY ( ( & lock = = & big_lock & & m_blocking_mutex ! = & big_lock ) | | ! m_blocking_mutex ) ;
2021-07-17 00:48:22 +03:00
2022-01-30 13:43:03 +03:00
auto * previous_blocking_mutex = m_blocking_mutex ;
m_blocking_mutex = & lock ;
2021-07-10 19:23:16 +03:00
m_lock_requested_count = lock_count ;
2022-01-30 13:38:50 +03:00
set_state ( Thread : : State : : Blocked ) ;
2021-07-10 19:23:16 +03:00
block_lock . unlock ( ) ;
2022-08-17 21:14:49 +03:00
scheduler_lock . unlock ( ) ;
2021-07-10 19:23:16 +03:00
lock_lock . unlock ( ) ;
2021-07-17 22:09:51 +03:00
dbgln_if ( THREAD_DEBUG , " Thread {} blocking on Mutex {} " , * this , & lock ) ;
2021-07-10 19:23:16 +03:00
for ( ; ; ) {
// Yield to the scheduler, and wait for us to resume unblocked.
2021-08-29 21:10:24 +03:00
VERIFY ( ! g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2021-08-10 02:16:08 +03:00
VERIFY ( Processor : : in_critical ( ) ) ;
2022-01-29 02:47:18 +03:00
if ( & lock ! = & big_lock & & big_lock . is_exclusively_locked_by_current_thread ( ) ) {
2021-07-16 04:38:07 +03:00
// We're locking another lock and already hold the big lock...
// We need to release the big lock
2021-07-16 04:45:22 +03:00
yield_and_release_relock_big_lock ( ) ;
2021-07-16 04:38:07 +03:00
} else {
2021-08-10 22:20:45 +03:00
// By the time we've reached this another thread might have
// marked us as holding the big lock, so this call must not
// verify that we're not holding it.
yield_without_releasing_big_lock ( VerifyLockNotHeld : : No ) ;
2021-07-16 04:38:07 +03:00
}
2021-08-10 02:16:08 +03:00
VERIFY ( Processor : : in_critical ( ) ) ;
2021-07-10 19:23:16 +03:00
2021-08-22 02:49:22 +03:00
SpinlockLocker block_lock2 ( m_block_lock ) ;
2022-01-30 13:43:03 +03:00
VERIFY ( ! m_blocking_mutex ) ;
m_blocking_mutex = previous_blocking_mutex ;
2021-07-10 19:23:16 +03:00
break ;
}
lock_lock . lock ( ) ;
}
2022-01-30 13:43:03 +03:00
u32 Thread : : unblock_from_mutex ( Kernel : : Mutex & mutex )
2021-07-10 19:23:16 +03:00
{
2022-01-30 14:16:14 +03:00
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
2021-08-22 02:49:22 +03:00
SpinlockLocker block_lock ( m_block_lock ) ;
2022-01-30 14:16:14 +03:00
VERIFY ( ! Processor : : current_in_irq ( ) ) ;
2022-01-30 13:43:03 +03:00
VERIFY ( m_blocking_mutex = = & mutex ) ;
2022-01-30 14:16:14 +03:00
dbgln_if ( THREAD_DEBUG , " Thread {} unblocked from Mutex {} " , * this , & mutex ) ;
2021-07-10 19:23:16 +03:00
auto requested_count = m_lock_requested_count ;
2022-01-30 14:16:14 +03:00
m_blocking_mutex = nullptr ;
if ( Thread : : current ( ) = = this ) {
set_state ( Thread : : State : : Running ) ;
return requested_count ;
2021-07-10 19:23:16 +03:00
}
2022-01-30 14:16:14 +03:00
VERIFY ( m_state ! = Thread : : State : : Runnable & & m_state ! = Thread : : State : : Running ) ;
set_state ( Thread : : State : : Runnable ) ;
2021-07-10 19:23:16 +03:00
return requested_count ;
}
2020-11-30 02:05:27 +03:00
void Thread : : unblock_from_blocker ( Blocker & blocker )
{
2020-12-09 07:18:45 +03:00
auto do_unblock = [ & ] ( ) {
2021-08-22 02:49:22 +03:00
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
SpinlockLocker block_lock ( m_block_lock ) ;
2020-12-09 07:18:45 +03:00
if ( m_blocker ! = & blocker )
return ;
if ( ! should_be_stopped ( ) & & ! is_stopped ( ) )
unblock ( ) ;
} ;
2021-11-07 00:06:08 +03:00
if ( Processor : : current_in_irq ( ) ! = 0 ) {
2022-02-13 22:21:14 +03:00
Processor : : deferred_call_queue ( [ do_unblock = move ( do_unblock ) , self = try_make_weak_ptr ( ) . release_value_but_fixme_should_propagate_errors ( ) ] ( ) {
2020-12-09 07:18:45 +03:00
if ( auto this_thread = self . strong_ref ( ) )
do_unblock ( ) ;
} ) ;
} else {
do_unblock ( ) ;
}
2019-03-24 00:03:17 +03:00
}
2020-11-30 02:05:27 +03:00
void Thread : : unblock ( u8 signal )
2019-03-24 00:03:17 +03:00
{
2021-08-22 13:21:31 +03:00
VERIFY ( ! Processor : : current_in_irq ( ) ) ;
2021-08-29 21:10:24 +03:00
VERIFY ( g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
VERIFY ( m_block_lock . is_locked_by_current_processor ( ) ) ;
2022-01-30 13:38:50 +03:00
if ( m_state ! = Thread : : State : : Blocked )
2020-11-30 02:05:27 +03:00
return ;
2022-01-30 13:43:03 +03:00
if ( m_blocking_mutex )
2021-07-10 19:23:16 +03:00
return ;
2021-02-23 22:42:32 +03:00
VERIFY ( m_blocker ) ;
2020-12-09 07:18:45 +03:00
if ( signal ! = 0 ) {
2021-01-21 02:06:19 +03:00
if ( is_handling_page_fault ( ) ) {
// Don't let signals unblock threads that are blocked inside a page fault handler.
// This prevents threads from EINTR'ing the inode read in an inode page fault.
// FIXME: There's probably a better way to solve this.
return ;
}
2020-12-09 07:18:45 +03:00
if ( ! m_blocker - > can_be_interrupted ( ) & & ! m_should_die )
return ;
2020-11-30 02:05:27 +03:00
m_blocker - > set_interrupted_by_signal ( signal ) ;
2020-12-09 07:18:45 +03:00
}
2020-04-06 15:38:33 +03:00
m_blocker = nullptr ;
2020-06-29 00:34:31 +03:00
if ( Thread : : current ( ) = = this ) {
2022-01-30 13:38:50 +03:00
set_state ( Thread : : State : : Running ) ;
2019-03-24 00:03:17 +03:00
return ;
}
2022-01-30 13:38:50 +03:00
VERIFY ( m_state ! = Thread : : State : : Runnable & & m_state ! = Thread : : State : : Running ) ;
set_state ( Thread : : State : : Runnable ) ;
2019-03-24 00:03:17 +03:00
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
void Thread : : set_should_die ( )
{
2019-12-22 13:35:02 +03:00
if ( m_should_die ) {
2021-01-13 00:30:52 +03:00
dbgln ( " {} Should already die " , * this ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
return ;
2019-12-22 13:35:02 +03:00
}
2020-07-05 23:32:07 +03:00
ScopedCritical critical ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
// Remember that we should die instead of returning to
// the userspace.
2021-08-22 02:49:22 +03:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2020-12-08 07:29:41 +03:00
m_should_die = true ;
// NOTE: Even the current thread can technically be in "Stopped"
// state! This is the case when another thread sent a SIGSTOP to
// it while it was running and it calls e.g. exit() before
// the scheduler gets involved again.
if ( is_stopped ( ) ) {
// If we were stopped, we need to briefly resume so that
// the kernel stacks can clean up. We won't ever return back
// to user mode, though
2021-02-23 22:42:32 +03:00
VERIFY ( ! process ( ) . is_stopped ( ) ) ;
2020-12-08 07:29:41 +03:00
resume_from_stopped ( ) ;
2020-08-14 19:24:31 +03:00
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
if ( is_blocked ( ) ) {
2021-08-22 02:49:22 +03:00
SpinlockLocker block_lock ( m_block_lock ) ;
2020-12-08 07:29:41 +03:00
if ( m_blocker ) {
// We're blocked in the kernel.
m_blocker - > set_interrupted_by_death ( ) ;
unblock ( ) ;
}
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
}
}
void Thread : : die_if_needed ( )
{
2021-02-23 22:42:32 +03:00
VERIFY ( Thread : : current ( ) = = this ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
if ( ! m_should_die )
return ;
2020-12-15 02:36:22 +03:00
u32 unlock_count ;
2020-12-21 02:09:48 +03:00
[[maybe_unused]] auto rc = unlock_process_if_locked ( unlock_count ) ;
2019-12-22 14:34:38 +03:00
2021-06-06 12:40:11 +03:00
dbgln_if ( THREAD_DEBUG , " Thread {} is dying " , * this ) ;
{
2021-08-22 02:49:22 +03:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2021-06-06 12:40:11 +03:00
// It's possible that we don't reach the code after this block if the
// scheduler is invoked and FinalizerTask cleans up this thread, however
// that doesn't matter because we're trying to invoke the scheduler anyway
2022-01-30 13:38:50 +03:00
set_state ( Thread : : State : : Dying ) ;
2021-06-06 12:40:11 +03:00
}
2020-07-05 23:32:07 +03:00
ScopedCritical critical ;
2020-07-05 02:37:36 +03:00
2020-07-05 23:32:07 +03:00
// Flag a context switch. Because we're in a critical section,
2021-02-10 23:18:03 +03:00
// Scheduler::yield will actually only mark a pending context switch
2020-07-05 23:32:07 +03:00
// Simply leaving the critical section would not necessarily trigger
// a switch.
2020-06-27 22:42:28 +03:00
Scheduler : : yield ( ) ;
2020-07-05 02:37:36 +03:00
2020-07-05 23:32:07 +03:00
// Now leave the critical section so that we can also trigger the
// actual context switch
2021-08-10 02:56:21 +03:00
Processor : : clear_critical ( ) ;
2021-08-22 13:21:31 +03:00
dbgln ( " die_if_needed returned from clear_critical!!! in irq: {} " , Processor : : current_in_irq ( ) ) ;
2020-07-05 23:32:07 +03:00
// We should never get here, but the scoped scheduler lock
// will be released by Scheduler::context_switch again
2021-02-23 22:42:32 +03:00
VERIFY_NOT_REACHED ( ) ;
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
}
2020-11-17 06:51:34 +03:00
void Thread : : exit ( void * exit_value )
{
2021-02-23 22:42:32 +03:00
VERIFY ( Thread : : current ( ) = = this ) ;
2021-08-23 02:22:38 +03:00
m_join_blocker_set . thread_did_exit ( exit_value ) ;
2020-11-17 06:51:34 +03:00
set_should_die ( ) ;
2020-12-15 02:36:22 +03:00
u32 unlock_count ;
2020-12-21 02:09:48 +03:00
[[maybe_unused]] auto rc = unlock_process_if_locked ( unlock_count ) ;
2021-05-28 12:18:58 +03:00
if ( m_thread_specific_range . has_value ( ) ) {
2022-08-23 18:58:05 +03:00
process ( ) . address_space ( ) . with ( [ & ] ( auto & space ) {
auto * region = space - > find_region_from_range ( m_thread_specific_range . value ( ) ) ;
space - > deallocate_region ( * region ) ;
} ) ;
2021-05-28 12:18:58 +03:00
}
2021-06-07 02:15:07 +03:00
# ifdef ENABLE_KERNEL_COVERAGE_COLLECTION
KCOVDevice : : free_thread ( ) ;
# endif
2020-11-17 06:51:34 +03:00
die_if_needed ( ) ;
}
2021-08-10 22:20:45 +03:00
void Thread : : yield_without_releasing_big_lock ( VerifyLockNotHeld verify_lock_not_held )
2020-12-08 07:29:41 +03:00
{
2021-08-29 21:10:24 +03:00
VERIFY ( ! g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2022-01-29 02:47:18 +03:00
VERIFY ( verify_lock_not_held = = VerifyLockNotHeld : : No | | ! process ( ) . big_lock ( ) . is_exclusively_locked_by_current_thread ( ) ) ;
2021-07-10 19:23:16 +03:00
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
InterruptDisabler disable ;
Scheduler : : yield ( ) ; // flag a switch
2021-08-10 02:56:21 +03:00
u32 prev_critical = Processor : : clear_critical ( ) ;
2020-12-08 07:29:41 +03:00
// NOTE: We may be on a different CPU now!
2021-08-10 02:56:21 +03:00
Processor : : restore_critical ( prev_critical ) ;
2020-12-08 07:29:41 +03:00
}
2021-07-16 04:45:22 +03:00
void Thread : : yield_and_release_relock_big_lock ( )
2019-03-24 00:03:17 +03:00
{
2021-08-29 21:10:24 +03:00
VERIFY ( ! g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2021-07-10 19:23:16 +03:00
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
InterruptDisabler disable ;
Scheduler : : yield ( ) ; // flag a switch
2020-12-15 02:36:22 +03:00
u32 lock_count_to_restore = 0 ;
auto previous_locked = unlock_process_if_locked ( lock_count_to_restore ) ;
2020-09-26 06:44:43 +03:00
// NOTE: Even though we call Scheduler::yield here, unless we happen
// to be outside of a critical section, the yield will be postponed
// until leaving it in relock_process.
2020-12-15 02:36:22 +03:00
relock_process ( previous_locked , lock_count_to_restore ) ;
2019-03-24 00:03:17 +03:00
}
2019-12-01 13:57:20 +03:00
2020-12-15 02:36:22 +03:00
LockMode Thread : : unlock_process_if_locked ( u32 & lock_count_to_restore )
2020-12-09 07:18:45 +03:00
{
2022-01-29 02:47:18 +03:00
return process ( ) . big_lock ( ) . force_unlock_exclusive_if_locked ( lock_count_to_restore ) ;
2020-12-09 07:18:45 +03:00
}
2020-12-15 02:36:22 +03:00
void Thread : : relock_process ( LockMode previous_locked , u32 lock_count_to_restore )
2019-12-01 17:54:47 +03:00
{
2020-09-26 06:44:43 +03:00
// Clearing the critical section may trigger the context switch
2021-07-06 00:07:18 +03:00
// flagged by calling Scheduler::yield above.
// We have to do it this way because we intentionally
2020-09-26 06:44:43 +03:00
// leave the critical section here to be able to switch contexts.
2021-08-10 02:56:21 +03:00
u32 prev_critical = Processor : : clear_critical ( ) ;
2021-01-28 01:23:21 +03:00
// CONTEXT SWITCH HAPPENS HERE!
2020-09-26 06:44:43 +03:00
2021-01-28 01:23:21 +03:00
// NOTE: We may be on a different CPU now!
2021-08-10 02:56:21 +03:00
Processor : : restore_critical ( prev_critical ) ;
2020-12-15 02:36:22 +03:00
if ( previous_locked ! = LockMode : : Unlocked ) {
// We've unblocked, relock the process if needed and carry on.
2022-01-29 02:47:18 +03:00
process ( ) . big_lock ( ) . restore_exclusive_lock ( lock_count_to_restore ) ;
2020-12-15 02:36:22 +03:00
}
2019-12-01 13:57:20 +03:00
}
2019-03-24 00:03:17 +03:00
2021-11-01 00:54:39 +03:00
// NOLINTNEXTLINE(readability-make-member-function-const) False positive; We call block<SleepBlocker> which is not const
2022-04-01 20:58:27 +03:00
auto Thread : : sleep ( clockid_t clock_id , Time const & duration , Time * remaining_time ) - > BlockResult
2019-03-24 00:03:17 +03:00
{
2022-01-30 13:38:50 +03:00
VERIFY ( state ( ) = = Thread : : State : : Running ) ;
2021-01-11 02:29:28 +03:00
return Thread : : current ( ) - > block < Thread : : SleepBlocker > ( { } , Thread : : BlockTimeout ( false , & duration , nullptr , clock_id ) , remaining_time ) ;
2020-11-15 21:58:19 +03:00
}
2021-11-01 00:54:39 +03:00
// NOLINTNEXTLINE(readability-make-member-function-const) False positive; We call block<SleepBlocker> which is not const
2022-04-01 20:58:27 +03:00
auto Thread : : sleep_until ( clockid_t clock_id , Time const & deadline ) - > BlockResult
2020-11-15 21:58:19 +03:00
{
2022-01-30 13:38:50 +03:00
VERIFY ( state ( ) = = Thread : : State : : Running ) ;
2021-01-11 02:29:28 +03:00
return Thread : : current ( ) - > block < Thread : : SleepBlocker > ( { } , Thread : : BlockTimeout ( true , & deadline , nullptr , clock_id ) ) ;
2019-03-24 00:03:17 +03:00
}
2021-08-05 21:48:14 +03:00
StringView Thread : : state_string ( ) const
2019-03-24 00:03:17 +03:00
{
2019-07-19 10:51:48 +03:00
switch ( state ( ) ) {
2022-01-30 13:38:50 +03:00
case Thread : : State : : Invalid :
2021-08-05 21:48:14 +03:00
return " Invalid " sv ;
2022-01-30 13:38:50 +03:00
case Thread : : State : : Runnable :
2021-08-05 21:48:14 +03:00
return " Runnable " sv ;
2022-01-30 13:38:50 +03:00
case Thread : : State : : Running :
2021-08-05 21:48:14 +03:00
return " Running " sv ;
2022-01-30 13:38:50 +03:00
case Thread : : State : : Dying :
2021-08-05 21:48:14 +03:00
return " Dying " sv ;
2022-01-30 13:38:50 +03:00
case Thread : : State : : Dead :
2021-08-05 21:48:14 +03:00
return " Dead " sv ;
2022-01-30 13:38:50 +03:00
case Thread : : State : : Stopped :
2021-08-05 21:48:14 +03:00
return " Stopped " sv ;
2022-01-30 13:38:50 +03:00
case Thread : : State : : Blocked : {
2021-08-22 02:49:22 +03:00
SpinlockLocker block_lock ( m_block_lock ) ;
2022-01-30 13:43:03 +03:00
if ( m_blocking_mutex )
2021-08-05 21:48:14 +03:00
return " Mutex " sv ;
2021-07-10 19:23:16 +03:00
if ( m_blocker )
return m_blocker - > state_string ( ) ;
VERIFY_NOT_REACHED ( ) ;
2019-03-24 00:03:17 +03:00
}
2020-09-27 17:53:35 +03:00
}
2021-03-10 00:35:13 +03:00
PANIC ( " Thread::state_string(): Invalid state: {} " , ( int ) state ( ) ) ;
2019-03-24 00:03:17 +03:00
}
void Thread : : finalize ( )
{
2021-02-23 22:42:32 +03:00
VERIFY ( Thread : : current ( ) = = g_finalizer ) ;
VERIFY ( Thread : : current ( ) ! = this ) ;
2019-08-01 21:01:23 +03:00
2021-01-24 01:29:11 +03:00
# if LOCK_DEBUG
2021-08-30 03:47:40 +03:00
VERIFY ( ! m_lock . is_locked_by_current_processor ( ) ) ;
2020-12-01 05:04:36 +03:00
if ( lock_count ( ) > 0 ) {
2021-01-18 19:25:44 +03:00
dbgln ( " Thread {} leaking {} Locks! " , * this , lock_count ( ) ) ;
2021-08-22 02:49:22 +03:00
SpinlockLocker list_lock ( m_holding_locks_lock ) ;
2021-04-25 01:17:02 +03:00
for ( auto & info : m_holding_locks_list ) {
2022-04-01 20:58:27 +03:00
auto const & location = info . lock_location ;
2021-07-17 22:09:51 +03:00
dbgln ( " - Mutex: \" {} \" @ {} locked in function \" {} \" at \" {}:{} \" with a count of: {} " , info . lock - > name ( ) , info . lock , location . function_name ( ) , location . filename ( ) , location . line_number ( ) , info . count ) ;
2021-04-25 01:17:02 +03:00
}
2021-02-23 22:42:32 +03:00
VERIFY_NOT_REACHED ( ) ;
2020-12-01 05:04:36 +03:00
}
# endif
2020-10-26 05:22:59 +03:00
{
2021-08-22 02:49:22 +03:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2021-02-07 15:03:24 +03:00
dbgln_if ( THREAD_DEBUG , " Finalizing thread {} " , * this ) ;
2020-10-26 05:22:59 +03:00
set_state ( Thread : : State : : Dead ) ;
2021-08-23 02:22:38 +03:00
m_join_blocker_set . thread_finalizing ( ) ;
2019-11-14 22:58:23 +03:00
}
2022-01-15 22:19:41 +03:00
if ( m_dump_backtrace_on_finalization ) {
auto trace_or_error = backtrace ( ) ;
if ( ! trace_or_error . is_error ( ) ) {
auto trace = trace_or_error . release_value ( ) ;
dbgln ( " Backtrace: " ) ;
kernelputstr ( trace - > characters ( ) , trace - > length ( ) ) ;
}
}
2020-09-27 17:53:35 +03:00
2022-01-26 19:34:04 +03:00
drop_thread_count ( ) ;
2021-01-01 08:45:16 +03:00
}
2020-09-27 17:53:35 +03:00
2022-01-26 19:34:04 +03:00
void Thread : : drop_thread_count ( )
2021-01-01 08:45:16 +03:00
{
2021-01-23 09:24:33 +03:00
bool is_last = process ( ) . remove_thread ( * this ) ;
2022-01-26 19:34:04 +03:00
if ( is_last )
2020-12-09 07:18:45 +03:00
process ( ) . finalize ( ) ;
2019-03-24 00:03:17 +03:00
}
void Thread : : finalize_dying_threads ( )
{
2021-02-23 22:42:32 +03:00
VERIFY ( Thread : : current ( ) = = g_finalizer ) ;
2019-04-20 15:02:19 +03:00
Vector < Thread * , 32 > dying_threads ;
2019-03-24 00:03:17 +03:00
{
2021-08-22 02:49:22 +03:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2019-06-07 12:43:58 +03:00
for_each_in_state ( Thread : : State : : Dying , [ & ] ( Thread & thread ) {
2022-01-26 19:34:31 +03:00
if ( ! thread . is_finalizable ( ) )
return ;
auto result = dying_threads . try_append ( & thread ) ;
// We ignore allocation failures above the first 32 guaranteed thread slots, and
// just flag our future-selves to finalize these threads at a later point
if ( result . is_error ( ) )
g_finalizer_has_work . store ( true , AK : : MemoryOrder : : memory_order_release ) ;
2019-03-24 00:03:17 +03:00
} ) ;
}
2019-12-22 13:35:02 +03:00
for ( auto * thread : dying_threads ) {
2023-04-02 19:35:32 +03:00
RefPtr < Process > const process = thread - > process ( ) ;
2021-07-01 19:18:38 +03:00
dbgln_if ( PROCESS_DEBUG , " Before finalization, {} has {} refs and its process has {} " ,
* thread , thread - > ref_count ( ) , thread - > process ( ) . ref_count ( ) ) ;
2019-03-24 00:03:17 +03:00
thread - > finalize ( ) ;
2021-07-01 19:18:38 +03:00
dbgln_if ( PROCESS_DEBUG , " After finalization, {} has {} refs and its process has {} " ,
* thread , thread - > ref_count ( ) , thread - > process ( ) . ref_count ( ) ) ;
2020-09-27 17:53:35 +03:00
// This thread will never execute again, drop the running reference
// NOTE: This may not necessarily drop the last reference if anything
// else is still holding onto this thread!
thread - > unref ( ) ;
2019-12-22 13:35:02 +03:00
}
2019-03-24 00:03:17 +03:00
}
2021-07-15 06:46:32 +03:00
void Thread : : update_time_scheduled ( u64 current_scheduler_time , bool is_kernel , bool no_longer_running )
{
if ( m_last_time_scheduled . has_value ( ) ) {
u64 delta ;
if ( current_scheduler_time > = m_last_time_scheduled . value ( ) )
delta = current_scheduler_time - m_last_time_scheduled . value ( ) ;
else
delta = m_last_time_scheduled . value ( ) - current_scheduler_time ; // the unlikely event that the clock wrapped
if ( delta ! = 0 ) {
// Add it to the global total *before* updating the thread's value!
Scheduler : : add_time_scheduled ( delta , is_kernel ) ;
auto & total_time = is_kernel ? m_total_time_scheduled_kernel : m_total_time_scheduled_user ;
2022-08-19 14:54:14 +03:00
total_time . fetch_add ( delta , AK : : memory_order_relaxed ) ;
2021-07-15 06:46:32 +03:00
}
}
if ( no_longer_running )
m_last_time_scheduled = { } ;
else
m_last_time_scheduled = current_scheduler_time ;
}
2021-01-26 02:37:36 +03:00
bool Thread : : tick ( )
2019-03-24 00:03:17 +03:00
{
2023-01-08 18:16:08 +03:00
if ( previous_mode ( ) = = ExecutionMode : : Kernel ) {
2020-08-02 05:04:56 +03:00
+ + m_process - > m_ticks_in_kernel ;
2020-12-04 08:12:50 +03:00
+ + m_ticks_in_kernel ;
} else {
+ + m_process - > m_ticks_in_user ;
+ + m_ticks_in_user ;
}
2021-11-07 00:06:08 +03:00
- - m_ticks_left ;
return m_ticks_left ! = 0 ;
2019-03-24 00:03:17 +03:00
}
2020-12-08 07:29:41 +03:00
void Thread : : check_dispatch_pending_signal ( )
{
auto result = DispatchSignalResult : : Continue ;
{
2021-08-22 02:49:22 +03:00
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
2021-11-07 00:06:08 +03:00
if ( pending_signals_for_state ( ) ! = 0 ) {
2020-12-08 07:29:41 +03:00
result = dispatch_one_pending_signal ( ) ;
}
}
2021-08-22 11:44:43 +03:00
if ( result = = DispatchSignalResult : : Yield ) {
2021-08-10 22:20:45 +03:00
yield_without_releasing_big_lock ( ) ;
2020-12-08 07:29:41 +03:00
}
}
2020-09-09 05:37:15 +03:00
u32 Thread : : pending_signals ( ) const
{
2021-08-22 02:49:22 +03:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2020-11-30 02:05:27 +03:00
return pending_signals_for_state ( ) ;
}
u32 Thread : : pending_signals_for_state ( ) const
{
2021-08-29 21:10:24 +03:00
VERIFY ( g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2020-11-30 02:05:27 +03:00
constexpr u32 stopped_signal_mask = ( 1 < < ( SIGCONT - 1 ) ) | ( 1 < < ( SIGKILL - 1 ) ) | ( 1 < < ( SIGTRAP - 1 ) ) ;
2021-01-21 02:06:19 +03:00
if ( is_handling_page_fault ( ) )
return 0 ;
2022-01-30 13:38:50 +03:00
return m_state ! = State : : Stopped ? m_pending_signals : m_pending_signals & stopped_signal_mask ;
2020-09-09 05:37:15 +03:00
}
2020-02-01 12:27:25 +03:00
void Thread : : send_signal ( u8 signal , [[maybe_unused]] Process * sender )
2019-03-24 00:03:17 +03:00
{
2022-07-22 00:08:07 +03:00
VERIFY ( signal < NSIG ) ;
2022-02-21 20:53:39 +03:00
VERIFY ( process ( ) . is_user_process ( ) ) ;
2021-08-22 02:49:22 +03:00
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
2019-07-08 19:59:48 +03:00
// FIXME: Figure out what to do for masked signals. Should we also ignore them here?
if ( should_ignore_signal ( signal ) ) {
2021-02-07 15:03:24 +03:00
dbgln_if ( SIGNAL_DEBUG , " Signal {} was ignored by {} " , signal , process ( ) ) ;
2019-07-08 19:59:48 +03:00
return ;
}
2019-03-24 00:03:17 +03:00
2021-01-24 01:59:27 +03:00
if constexpr ( SIGNAL_DEBUG ) {
2021-01-13 00:30:52 +03:00
if ( sender )
dbgln ( " Signal: {} sent {} to {} " , * sender , signal , process ( ) ) ;
else
dbgln ( " Signal: Kernel send {} to {} " , signal , process ( ) ) ;
}
2019-03-24 00:03:17 +03:00
2019-08-01 12:00:36 +03:00
m_pending_signals | = 1 < < ( signal - 1 ) ;
2022-02-26 14:59:31 +03:00
m_signal_senders [ signal ] = sender ? sender - > pid ( ) : pid ( ) ;
2021-11-07 00:06:08 +03:00
m_have_any_unmasked_pending_signals . store ( ( pending_signals_for_state ( ) & ~ m_signal_mask ) ! = 0 , AK : : memory_order_release ) ;
2021-12-12 02:01:42 +03:00
m_signal_blocker_set . unblock_all_blockers_whose_conditions_are_met ( ) ;
2020-11-30 02:05:27 +03:00
2021-12-11 23:37:56 +03:00
if ( ! has_unmasked_pending_signals ( ) )
return ;
2022-01-30 13:38:50 +03:00
if ( m_state = = Thread : : State : : Stopped ) {
2021-11-07 00:06:08 +03:00
if ( pending_signals_for_state ( ) ! = 0 ) {
2021-02-07 15:03:24 +03:00
dbgln_if ( SIGNAL_DEBUG , " Signal: Resuming stopped {} to deliver signal {} " , * this , signal ) ;
2020-11-30 02:05:27 +03:00
resume_from_stopped ( ) ;
2020-12-08 07:29:41 +03:00
}
2020-11-30 02:05:27 +03:00
} else {
2021-08-22 02:49:22 +03:00
SpinlockLocker block_lock ( m_block_lock ) ;
2021-02-07 15:03:24 +03:00
dbgln_if ( SIGNAL_DEBUG , " Signal: Unblocking {} to deliver signal {} " , * this , signal ) ;
2020-11-30 02:05:27 +03:00
unblock ( signal ) ;
}
2019-03-24 00:03:17 +03:00
}
2020-09-09 05:37:15 +03:00
u32 Thread : : update_signal_mask ( u32 signal_mask )
{
2021-08-22 02:49:22 +03:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2020-09-09 05:37:15 +03:00
auto previous_signal_mask = m_signal_mask ;
m_signal_mask = signal_mask ;
2021-11-07 00:06:08 +03:00
m_have_any_unmasked_pending_signals . store ( ( pending_signals_for_state ( ) & ~ m_signal_mask ) ! = 0 , AK : : memory_order_release ) ;
2020-09-09 05:37:15 +03:00
return previous_signal_mask ;
}
u32 Thread : : signal_mask ( ) const
{
2021-08-22 02:49:22 +03:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2020-09-09 05:37:15 +03:00
return m_signal_mask ;
}
u32 Thread : : signal_mask_block ( sigset_t signal_set , bool block )
{
2021-08-22 02:49:22 +03:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2020-09-09 05:37:15 +03:00
auto previous_signal_mask = m_signal_mask ;
if ( block )
m_signal_mask | = signal_set ;
2021-12-12 01:08:57 +03:00
else
m_signal_mask & = ~ signal_set ;
2021-11-07 00:06:08 +03:00
m_have_any_unmasked_pending_signals . store ( ( pending_signals_for_state ( ) & ~ m_signal_mask ) ! = 0 , AK : : memory_order_release ) ;
2020-09-09 05:37:15 +03:00
return previous_signal_mask ;
}
2021-12-11 18:40:50 +03:00
void Thread : : reset_signals_for_exec ( )
2020-09-09 05:37:15 +03:00
{
2021-08-22 02:49:22 +03:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2021-12-11 18:18:39 +03:00
// The signal mask is preserved across execve(2).
2021-12-11 18:40:50 +03:00
// The pending signal set is preserved across an execve(2).
2020-09-09 05:37:15 +03:00
m_have_any_unmasked_pending_signals . store ( false , AK : : memory_order_release ) ;
2022-02-24 21:55:49 +03:00
m_signal_action_masks . fill ( { } ) ;
2021-12-11 18:39:52 +03:00
// A successful call to execve(2) removes any existing alternate signal stack
m_alternative_signal_stack = 0 ;
m_alternative_signal_stack_size = 0 ;
2020-09-09 05:37:15 +03:00
}
2019-10-07 12:22:50 +03:00
// Certain exceptions, such as SIGSEGV and SIGILL, put a
// thread into a state where the signal handler must be
// invoked immediately, otherwise it will continue to fault.
// This function should be used in an exception handler to
// ensure that when the thread resumes, it's executing in
// the appropriate signal handler.
void Thread : : send_urgent_signal_to_self ( u8 signal )
{
2021-02-23 22:42:32 +03:00
VERIFY ( Thread : : current ( ) = = this ) ;
2020-11-30 02:05:27 +03:00
DispatchSignalResult result ;
{
2021-08-22 02:49:22 +03:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2020-11-30 02:05:27 +03:00
result = dispatch_signal ( signal ) ;
}
2021-12-06 20:33:19 +03:00
if ( result = = DispatchSignalResult : : Terminate ) {
Thread : : current ( ) - > die_if_needed ( ) ;
VERIFY_NOT_REACHED ( ) ; // dispatch_signal will request termination of the thread, so the above call should never return
}
2020-11-30 02:05:27 +03:00
if ( result = = DispatchSignalResult : : Yield )
2021-07-16 04:45:22 +03:00
yield_and_release_relock_big_lock ( ) ;
2019-10-07 12:22:50 +03:00
}
2020-11-30 02:05:27 +03:00
DispatchSignalResult Thread : : dispatch_one_pending_signal ( )
2019-03-24 00:03:17 +03:00
{
2022-08-19 15:39:15 +03:00
VERIFY ( g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2020-11-30 02:05:27 +03:00
u32 signal_candidates = pending_signals_for_state ( ) & ~ m_signal_mask ;
2020-12-01 18:05:49 +03:00
if ( signal_candidates = = 0 )
return DispatchSignalResult : : Continue ;
2019-03-24 00:03:17 +03:00
2019-08-01 12:00:36 +03:00
u8 signal = 1 ;
2022-07-22 00:08:07 +03:00
for ( ; signal < NSIG ; + + signal ) {
2021-11-07 00:06:08 +03:00
if ( ( signal_candidates & ( 1 < < ( signal - 1 ) ) ) ! = 0 ) {
2019-03-24 00:03:17 +03:00
break ;
}
}
return dispatch_signal ( signal ) ;
}
2020-11-30 02:05:27 +03:00
DispatchSignalResult Thread : : try_dispatch_one_pending_signal ( u8 signal )
{
2021-02-23 22:42:32 +03:00
VERIFY ( signal ! = 0 ) ;
2021-08-22 02:49:22 +03:00
SpinlockLocker scheduler_lock ( g_scheduler_lock ) ;
2020-11-30 02:05:27 +03:00
u32 signal_candidates = pending_signals_for_state ( ) & ~ m_signal_mask ;
2021-11-07 00:06:08 +03:00
if ( ( signal_candidates & ( 1 < < ( signal - 1 ) ) ) = = 0 )
2020-11-30 02:05:27 +03:00
return DispatchSignalResult : : Continue ;
return dispatch_signal ( signal ) ;
}
2019-06-07 18:13:23 +03:00
enum class DefaultSignalAction {
2019-03-24 00:03:17 +03:00
Terminate ,
Ignore ,
DumpCore ,
Stop ,
Continue ,
} ;
Kernel: Mark compilation-unit-only functions as static
This enables a nice warning in case a function becomes dead code. Also, in case
of signal_trampoline_dummy, marking it external (non-static) prevents it from
being 'optimized away', which would lead to surprising and weird linker errors.
I found these places by using -Wmissing-declarations.
The Kernel still shows these issues, which I think are false-positives,
but don't want to touch:
- Kernel/Arch/i386/CPU.cpp:1081:17: void Kernel::enter_thread_context(Kernel::Thread*, Kernel::Thread*)
- Kernel/Arch/i386/CPU.cpp:1170:17: void Kernel::context_first_init(Kernel::Thread*, Kernel::Thread*, Kernel::TrapFrame*)
- Kernel/Arch/i386/CPU.cpp:1304:16: u32 Kernel::do_init_context(Kernel::Thread*, u32)
- Kernel/Arch/i386/CPU.cpp:1347:17: void Kernel::pre_init_finished()
- Kernel/Arch/i386/CPU.cpp:1360:17: void Kernel::post_init_finished()
No idea, not gonna touch it.
- Kernel/init.cpp:104:30: void Kernel::init()
- Kernel/init.cpp:167:30: void Kernel::init_ap(u32, Kernel::Processor*)
- Kernel/init.cpp:184:17: void Kernel::init_finished(u32)
Called by boot.S.
- Kernel/init.cpp:383:16: int Kernel::__cxa_atexit(void (*)(void*), void*, void*)
- Kernel/StdLib.cpp:285:19: void __cxa_pure_virtual()
- Kernel/StdLib.cpp:300:19: void __stack_chk_fail()
- Kernel/StdLib.cpp:305:19: void __stack_chk_fail_local()
Not sure how to tell the compiler that the compiler is already using them.
Also, maybe __cxa_atexit should go into StdLib.cpp?
- Kernel/Modules/TestModule.cpp:31:17: void module_init()
- Kernel/Modules/TestModule.cpp:40:17: void module_fini()
Could maybe go into a new header. This would also provide type-checking for new modules.
2020-08-10 22:12:13 +03:00
static DefaultSignalAction default_signal_action ( u8 signal )
2019-03-24 00:03:17 +03:00
{
2021-02-23 22:42:32 +03:00
VERIFY ( signal & & signal < NSIG ) ;
2019-03-24 00:03:17 +03:00
switch ( signal ) {
case SIGHUP :
case SIGINT :
case SIGKILL :
case SIGPIPE :
case SIGALRM :
case SIGUSR1 :
case SIGUSR2 :
case SIGVTALRM :
case SIGSTKFLT :
case SIGIO :
case SIGPROF :
case SIGTERM :
2022-06-12 15:48:28 +03:00
case SIGCANCEL :
2019-03-24 00:03:17 +03:00
return DefaultSignalAction : : Terminate ;
case SIGCHLD :
case SIGURG :
case SIGWINCH :
2020-09-08 19:07:25 +03:00
case SIGINFO :
2019-03-24 00:03:17 +03:00
return DefaultSignalAction : : Ignore ;
case SIGQUIT :
case SIGILL :
case SIGTRAP :
case SIGABRT :
case SIGBUS :
case SIGFPE :
case SIGSEGV :
case SIGXCPU :
case SIGXFSZ :
case SIGSYS :
return DefaultSignalAction : : DumpCore ;
case SIGCONT :
return DefaultSignalAction : : Continue ;
case SIGSTOP :
case SIGTSTP :
case SIGTTIN :
case SIGTTOU :
return DefaultSignalAction : : Stop ;
2021-08-22 11:44:43 +03:00
default :
VERIFY_NOT_REACHED ( ) ;
2019-03-24 00:03:17 +03:00
}
}
2019-07-08 19:59:48 +03:00
bool Thread : : should_ignore_signal ( u8 signal ) const
{
2022-07-22 00:08:07 +03:00
VERIFY ( signal < NSIG ) ;
2022-02-24 21:55:49 +03:00
auto const & action = m_process - > m_signal_action_data [ signal ] ;
2019-07-08 19:59:48 +03:00
if ( action . handler_or_sigaction . is_null ( ) )
return default_signal_action ( signal ) = = DefaultSignalAction : : Ignore ;
2021-11-01 01:52:43 +03:00
return ( ( sighandler_t ) action . handler_or_sigaction . get ( ) = = SIG_IGN ) ;
2019-07-08 19:59:48 +03:00
}
2019-10-07 12:22:50 +03:00
bool Thread : : has_signal_handler ( u8 signal ) const
{
2022-07-22 00:08:07 +03:00
VERIFY ( signal < NSIG ) ;
2022-02-24 21:55:49 +03:00
auto const & action = m_process - > m_signal_action_data [ signal ] ;
2019-10-07 12:22:50 +03:00
return ! action . handler_or_sigaction . is_null ( ) ;
}
2021-11-30 02:07:59 +03:00
bool Thread : : is_signal_masked ( u8 signal ) const
{
2022-07-22 00:08:07 +03:00
VERIFY ( signal < NSIG ) ;
2021-11-30 02:07:59 +03:00
return ( 1 < < ( signal - 1 ) ) & m_signal_mask ;
}
2021-10-28 23:33:41 +03:00
bool Thread : : has_alternative_signal_stack ( ) const
{
return m_alternative_signal_stack_size ! = 0 ;
}
bool Thread : : is_in_alternative_signal_stack ( ) const
{
auto sp = get_register_dump_from_stack ( ) . userspace_sp ( ) ;
return sp > = m_alternative_signal_stack & & sp < m_alternative_signal_stack + m_alternative_signal_stack_size ;
}
2021-11-30 02:21:03 +03:00
static ErrorOr < void > push_value_on_user_stack ( FlatPtr & stack , FlatPtr data )
2019-11-04 11:29:47 +03:00
{
Kernel: Fix UB caused by taking a reference to a packed struct's member
Taking a reference or a pointer to a value that's not aligned properly
is undefined behavior. While `[[gnu::packed]]` ensures that reads from
and writes to fields of packed structs is a safe operation, the
information about the reduced alignment is lost when creating pointers
to these values.
Weirdly enough, GCC's undefined behavior sanitizer doesn't flag these,
even though the doc of `-Waddress-of-packed-member` says that it usually
leads to UB. In contrast, x86_64 Clang does flag these, which renders
the 64-bit kernel unable to boot.
For now, the `address-of-packed-member` warning will only be enabled in
the kernel, as it is absolutely crucial there because of KUBSAN, but
might get excessively noisy for the userland in the future.
Also note that we can't append to `CMAKE_CXX_FLAGS` like we do for other
flags in the kernel, because flags added via `add_compile_options` come
after these, so the `-Wno-address-of-packed-member` in the root would
cancel it out.
2021-08-01 21:30:43 +03:00
stack - = sizeof ( FlatPtr ) ;
2021-11-30 02:21:03 +03:00
return copy_to_user ( ( FlatPtr * ) stack , & data ) ;
2019-11-04 11:29:47 +03:00
}
2022-02-26 00:28:06 +03:00
template < typename T >
static ErrorOr < void > copy_value_on_user_stack ( FlatPtr & stack , T const & data )
{
stack - = sizeof ( data ) ;
return copy_to_user ( ( RemoveCVReference < T > * ) stack , & data ) ;
}
2020-08-14 19:24:31 +03:00
void Thread : : resume_from_stopped ( )
{
2021-02-23 22:42:32 +03:00
VERIFY ( is_stopped ( ) ) ;
VERIFY ( m_stop_state ! = State : : Invalid ) ;
2021-08-29 21:10:24 +03:00
VERIFY ( g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2022-01-30 13:38:50 +03:00
if ( m_stop_state = = Thread : : State : : Blocked ) {
2021-08-22 02:49:22 +03:00
SpinlockLocker block_lock ( m_block_lock ) ;
2022-01-30 13:43:03 +03:00
if ( m_blocker | | m_blocking_mutex ) {
2020-12-09 07:18:45 +03:00
// Hasn't been unblocked yet
2022-01-30 13:38:50 +03:00
set_state ( Thread : : State : : Blocked , 0 ) ;
2020-12-09 07:18:45 +03:00
} else {
// Was unblocked while stopped
2022-01-30 13:38:50 +03:00
set_state ( Thread : : State : : Runnable ) ;
2020-12-09 07:18:45 +03:00
}
} else {
set_state ( m_stop_state , 0 ) ;
}
2020-08-14 19:24:31 +03:00
}
2020-11-30 02:05:27 +03:00
DispatchSignalResult Thread : : dispatch_signal ( u8 signal )
2019-03-24 00:03:17 +03:00
{
2021-02-23 22:42:32 +03:00
VERIFY_INTERRUPTS_DISABLED ( ) ;
2021-08-29 21:10:24 +03:00
VERIFY ( g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2022-07-22 00:08:07 +03:00
VERIFY ( signal > 0 & & signal < = NSIG ) ;
2021-02-23 22:42:32 +03:00
VERIFY ( process ( ) . is_user_process ( ) ) ;
VERIFY ( this = = Thread : : current ( ) ) ;
2019-03-24 00:03:17 +03:00
2021-03-10 00:35:13 +03:00
dbgln_if ( SIGNAL_DEBUG , " Dispatch signal {} to {}, state: {} " , signal , * this , state_string ( ) ) ;
2019-03-24 00:03:17 +03:00
2022-01-30 13:38:50 +03:00
if ( m_state = = Thread : : State : : Invalid | | ! is_initialized ( ) ) {
2020-09-07 17:31:00 +03:00
// Thread has barely been created, we need to wait until it is
// at least in Runnable state and is_initialized() returns true,
// which indicates that it is fully set up an we actually have
// a register state on the stack that we can modify
2020-11-30 02:05:27 +03:00
return DispatchSignalResult : : Deferred ;
}
2022-02-24 21:55:49 +03:00
auto & action = m_process - > m_signal_action_data [ signal ] ;
2022-02-26 14:59:31 +03:00
auto sender_pid = m_signal_senders [ signal ] ;
2022-11-02 23:26:02 +03:00
auto sender = Process : : from_pid_ignoring_jails ( sender_pid ) ;
2019-03-24 00:03:17 +03:00
2022-02-26 23:50:17 +03:00
if ( ! current_trap ( ) & & ! action . handler_or_sigaction . is_null ( ) ) {
// We're trying dispatch a handled signal to a user process that was scheduled
// after a yielding/blocking kernel thread, we don't have a register capture of
// the thread, so just defer processing the signal to later.
return DispatchSignalResult : : Deferred ;
}
2019-03-24 00:03:17 +03:00
// Mark this signal as handled.
2019-08-01 12:00:36 +03:00
m_pending_signals & = ~ ( 1 < < ( signal - 1 ) ) ;
2021-11-07 00:06:08 +03:00
m_have_any_unmasked_pending_signals . store ( ( m_pending_signals & ~ m_signal_mask ) ! = 0 , AK : : memory_order_release ) ;
2019-03-24 00:03:17 +03:00
2020-12-09 07:18:45 +03:00
auto & process = this - > process ( ) ;
2021-11-01 01:36:52 +03:00
auto * tracer = process . tracer ( ) ;
2020-12-09 07:18:45 +03:00
if ( signal = = SIGSTOP | | ( tracer & & default_signal_action ( signal ) = = DefaultSignalAction : : DumpCore ) ) {
2021-03-10 00:35:13 +03:00
dbgln_if ( SIGNAL_DEBUG , " Signal {} stopping this thread " , signal ) ;
2023-02-04 17:57:45 +03:00
if ( tracer )
tracer - > set_regs ( get_register_dump_from_stack ( ) ) ;
2022-01-30 13:38:50 +03:00
set_state ( Thread : : State : : Stopped , signal ) ;
2020-11-30 02:05:27 +03:00
return DispatchSignalResult : : Yield ;
2019-03-24 00:03:17 +03:00
}
2020-12-08 07:29:41 +03:00
if ( signal = = SIGCONT ) {
2021-01-13 00:30:52 +03:00
dbgln ( " signal: SIGCONT resuming {} " , * this ) ;
2020-08-14 19:24:31 +03:00
} else {
2020-12-09 07:18:45 +03:00
if ( tracer ) {
2020-03-28 11:47:16 +03:00
// when a thread is traced, it should be stopped whenever it receives a signal
// the tracer is notified of this by using waitpid()
// only "pending signals" from the tracer are sent to the tracee
2020-12-09 07:18:45 +03:00
if ( ! tracer - > has_pending_signal ( signal ) ) {
2021-01-13 00:30:52 +03:00
dbgln ( " signal: {} stopping {} for tracer " , signal , * this ) ;
2022-01-30 13:38:50 +03:00
set_state ( Thread : : State : : Stopped , signal ) ;
2020-11-30 02:05:27 +03:00
return DispatchSignalResult : : Yield ;
2020-03-28 11:47:16 +03:00
}
2020-12-09 07:18:45 +03:00
tracer - > unset_signal ( signal ) ;
2020-03-28 11:47:16 +03:00
}
2020-03-01 17:14:17 +03:00
}
2019-03-24 00:03:17 +03:00
2019-06-07 13:56:50 +03:00
auto handler_vaddr = action . handler_or_sigaction ;
if ( handler_vaddr . is_null ( ) ) {
2019-03-24 00:03:17 +03:00
switch ( default_signal_action ( signal ) ) {
case DefaultSignalAction : : Stop :
2022-01-30 13:38:50 +03:00
set_state ( Thread : : State : : Stopped , signal ) ;
2020-11-30 02:05:27 +03:00
return DispatchSignalResult : : Yield ;
2019-08-06 20:43:07 +03:00
case DefaultSignalAction : : DumpCore :
2021-08-22 15:51:04 +03:00
process . set_should_generate_coredump ( true ) ;
2020-12-09 07:18:45 +03:00
process . for_each_thread ( [ ] ( auto & thread ) {
2019-08-06 20:43:07 +03:00
thread . set_dump_backtrace_on_finalization ( ) ;
} ) ;
2019-07-25 22:02:19 +03:00
[[fallthrough]] ;
2019-03-24 00:03:17 +03:00
case DefaultSignalAction : : Terminate :
2020-08-02 05:04:56 +03:00
m_process - > terminate_due_to_signal ( signal ) ;
2020-11-30 02:05:27 +03:00
return DispatchSignalResult : : Terminate ;
2019-03-24 00:03:17 +03:00
case DefaultSignalAction : : Ignore :
2021-02-23 22:42:32 +03:00
VERIFY_NOT_REACHED ( ) ;
2019-03-24 00:03:17 +03:00
case DefaultSignalAction : : Continue :
2020-11-30 02:05:27 +03:00
return DispatchSignalResult : : Continue ;
2019-03-24 00:03:17 +03:00
}
2021-02-23 22:42:32 +03:00
VERIFY_NOT_REACHED ( ) ;
2019-03-24 00:03:17 +03:00
}
2021-08-14 18:05:53 +03:00
if ( ( sighandler_t ) handler_vaddr . as_ptr ( ) = = SIG_IGN ) {
2021-03-10 00:35:13 +03:00
dbgln_if ( SIGNAL_DEBUG , " Ignored signal {} " , signal ) ;
2020-11-30 02:05:27 +03:00
return DispatchSignalResult : : Continue ;
2019-03-24 00:03:17 +03:00
}
2021-09-06 18:22:36 +03:00
ScopedAddressSpaceSwitcher switcher ( m_process ) ;
2019-09-04 16:14:54 +03:00
2022-06-30 14:36:03 +03:00
m_currently_handled_signal = signal ;
2019-07-03 22:17:35 +03:00
u32 old_signal_mask = m_signal_mask ;
2022-02-24 21:55:49 +03:00
u32 new_signal_mask = m_signal_action_masks [ signal ] . value_or ( action . mask ) ;
2021-11-07 00:06:08 +03:00
if ( ( action . flags & SA_NODEFER ) = = SA_NODEFER )
2019-08-01 12:00:36 +03:00
new_signal_mask & = ~ ( 1 < < ( signal - 1 ) ) ;
2019-03-24 00:03:17 +03:00
else
2019-08-01 12:00:36 +03:00
new_signal_mask | = 1 < < ( signal - 1 ) ;
2019-03-24 00:03:17 +03:00
m_signal_mask | = new_signal_mask ;
2021-11-07 00:06:08 +03:00
m_have_any_unmasked_pending_signals . store ( ( m_pending_signals & ~ m_signal_mask ) ! = 0 , AK : : memory_order_release ) ;
2019-03-24 00:03:17 +03:00
2021-10-28 23:33:41 +03:00
bool use_alternative_stack = ( ( action . flags & SA_ONSTACK ) ! = 0 ) & & has_alternative_signal_stack ( ) & & ! is_in_alternative_signal_stack ( ) ;
2021-11-30 02:21:03 +03:00
auto setup_stack = [ & ] ( RegisterState & state ) - > ErrorOr < void > {
2021-10-28 23:33:41 +03:00
FlatPtr stack ;
if ( use_alternative_stack )
stack = m_alternative_signal_stack + m_alternative_signal_stack_size ;
else
2022-02-26 00:28:06 +03:00
stack = state . userspace_sp ( ) ;
dbgln_if ( SIGNAL_DEBUG , " Setting up user stack to return to IP {:p}, SP {:p} " , state . ip ( ) , state . userspace_sp ( ) ) ;
__ucontext ucontext {
. uc_link = nullptr ,
. uc_sigmask = old_signal_mask ,
. uc_stack = {
2022-02-26 14:59:31 +03:00
. ss_sp = bit_cast < void * > ( stack ) ,
. ss_flags = action . flags & SA_ONSTACK ,
. ss_size = use_alternative_stack ? m_alternative_signal_stack_size : 0 ,
2022-02-26 00:28:06 +03:00
} ,
. uc_mcontext = { } ,
} ;
copy_kernel_registers_into_ptrace_registers ( static_cast < PtraceRegisters & > ( ucontext . uc_mcontext ) , state ) ;
2022-02-26 14:59:31 +03:00
auto fill_signal_info_for_signal = [ & ] ( siginfo & signal_info ) {
if ( signal = = SIGCHLD ) {
if ( ! sender ) {
signal_info . si_code = CLD_EXITED ;
return ;
}
auto const * thread = sender - > thread_list ( ) . with ( [ ] ( auto & list ) { return list . is_empty ( ) ? nullptr : list . first ( ) ; } ) ;
if ( ! thread ) {
signal_info . si_code = CLD_EXITED ;
return ;
}
switch ( thread - > m_state ) {
case State : : Dead :
if ( sender - > should_generate_coredump ( ) & & sender - > is_dumpable ( ) ) {
signal_info . si_code = CLD_DUMPED ;
signal_info . si_status = sender - > termination_signal ( ) ;
return ;
}
[[fallthrough]] ;
case State : : Dying :
if ( sender - > termination_signal ( ) = = 0 ) {
signal_info . si_code = CLD_EXITED ;
signal_info . si_status = sender - > termination_status ( ) ;
return ;
}
signal_info . si_code = CLD_KILLED ;
signal_info . si_status = sender - > termination_signal ( ) ;
return ;
case State : : Runnable :
case State : : Running :
case State : : Blocked :
signal_info . si_code = CLD_CONTINUED ;
return ;
case State : : Stopped :
signal_info . si_code = CLD_STOPPED ;
return ;
case State : : Invalid :
// Something is wrong, but we're just an observer.
break ;
}
}
signal_info . si_code = SI_NOINFO ;
} ;
2022-02-26 00:28:06 +03:00
siginfo signal_info {
. si_signo = signal ,
2022-02-26 14:59:31 +03:00
// Filled in below by fill_signal_info_for_signal.
. si_code = 0 ,
// Set for SI_TIMER, we don't have the data here.
2022-02-26 00:28:06 +03:00
. si_errno = 0 ,
2022-02-26 14:59:31 +03:00
. si_pid = sender_pid . value ( ) ,
2022-08-21 01:21:01 +03:00
. si_uid = sender ? sender - > credentials ( ) - > uid ( ) . value ( ) : 0 ,
2022-02-26 14:59:31 +03:00
// Set for SIGILL, SIGFPE, SIGSEGV and SIGBUS
// FIXME: We don't generate these signals in a way that can be handled.
2022-02-26 00:28:06 +03:00
. si_addr = 0 ,
2022-02-26 14:59:31 +03:00
// Set for SIGCHLD.
2022-02-26 00:28:06 +03:00
. si_status = 0 ,
2022-02-26 14:59:31 +03:00
// Set for SIGPOLL, we don't have SIGPOLL.
2022-02-26 00:28:06 +03:00
. si_band = 0 ,
2022-02-26 14:59:31 +03:00
// Set for SI_QUEUE, SI_TIMER, SI_ASYNCIO and SI_MESGQ
// We do not generate any of these.
2022-02-26 00:28:06 +03:00
. si_value = {
. sival_int = 0 ,
} ,
} ;
2021-10-28 23:33:41 +03:00
2022-02-26 14:59:31 +03:00
if ( action . flags & SA_SIGINFO )
fill_signal_info_for_signal ( signal_info ) ;
2021-06-29 11:31:25 +03:00
// Align the stack to 16 bytes.
2022-02-25 19:23:28 +03:00
// Note that we push some elements on to the stack before the return address,
// so we need to account for this here.
2022-02-26 00:28:06 +03:00
constexpr static FlatPtr elements_pushed_on_stack_before_handler_address = 1 ; // one slot for a saved register
FlatPtr const extra_bytes_pushed_on_stack_before_handler_address = sizeof ( ucontext ) + sizeof ( signal_info ) ;
FlatPtr stack_alignment = ( stack - elements_pushed_on_stack_before_handler_address * sizeof ( FlatPtr ) + extra_bytes_pushed_on_stack_before_handler_address ) % 16 ;
2023-02-23 02:13:16 +03:00
stack - = stack_alignment ;
# if ARCH(X86_64)
2022-02-26 00:28:06 +03:00
// Also note that we have to skip the thread red-zone (if needed), so do that here.
2023-02-23 02:13:16 +03:00
constexpr static FlatPtr thread_red_zone_size = 128 ;
stack - = thread_red_zone_size ;
# endif
2022-02-26 00:28:06 +03:00
auto start_of_stack = stack ;
2021-02-25 18:18:36 +03:00
2022-02-26 00:28:06 +03:00
TRY ( push_value_on_user_stack ( stack , 0 ) ) ; // syscall return value slot
2019-11-04 11:29:47 +03:00
2022-02-26 00:28:06 +03:00
TRY ( copy_value_on_user_stack ( stack , ucontext ) ) ;
auto pointer_to_ucontext = stack ;
TRY ( copy_value_on_user_stack ( stack , signal_info ) ) ;
auto pointer_to_signal_info = stack ;
Kernel: Properly align stack for signal handlers
The System V ABI requires that the stack is 16-byte aligned on function
call. Confusingly, however, they mean that the stack must be aligned
this way **before** the `CALL` instruction is executed. That instruction
pushes the return value onto the stack, so the callee will actually see
the stack pointer as a value `sizeof(FlatPtr)` smaller.
The signal trampoline was written with this in mind, but `setup_stack`
aligned the entire stack, *including the return address* to a 16-byte
boundary. Because of this, the trampoline subtracted too much from the
stack pointer, thus misaligning it.
This was not a problem on i686 because we didn't execute any
instructions from signal handlers that would require memory operands to
be aligned to more than 4 bytes. This is not the case, however, on
x86_64, where SSE instructions are enabled by default and they require
16-byte aligned operands. Running such instructions raised a GP fault,
immediately killing the offending program with a SIGSEGV signal.
This issue caused TestKernelAlarm to fail in LibC when ran locally, and
at one point, the zsh port was affected too.
Fixes #9291
2021-10-24 18:34:59 +03:00
2022-02-25 19:23:28 +03:00
// Make sure we actually pushed as many elements as we claimed to have pushed.
2022-02-26 00:28:06 +03:00
if ( start_of_stack - stack ! = elements_pushed_on_stack_before_handler_address * sizeof ( FlatPtr ) + extra_bytes_pushed_on_stack_before_handler_address ) {
PANIC ( " Stack in invalid state after signal trampoline, expected {:x} but got {:x} " ,
start_of_stack - elements_pushed_on_stack_before_handler_address * sizeof ( FlatPtr ) - extra_bytes_pushed_on_stack_before_handler_address , stack ) ;
}
VERIFY ( stack % 16 = = 0 ) ;
2022-02-26 18:00:51 +03:00
// Save the FPU/SSE state
TRY ( copy_value_on_user_stack ( stack , fpu_state ( ) ) ) ;
2022-02-26 00:28:06 +03:00
TRY ( push_value_on_user_stack ( stack , pointer_to_ucontext ) ) ;
TRY ( push_value_on_user_stack ( stack , pointer_to_signal_info ) ) ;
TRY ( push_value_on_user_stack ( stack , signal ) ) ;
2022-02-25 19:23:28 +03:00
2022-02-26 00:28:06 +03:00
TRY ( push_value_on_user_stack ( stack , handler_vaddr . get ( ) ) ) ;
2019-11-04 11:29:47 +03:00
Kernel: Fix UB caused by taking a reference to a packed struct's member
Taking a reference or a pointer to a value that's not aligned properly
is undefined behavior. While `[[gnu::packed]]` ensures that reads from
and writes to fields of packed structs is a safe operation, the
information about the reduced alignment is lost when creating pointers
to these values.
Weirdly enough, GCC's undefined behavior sanitizer doesn't flag these,
even though the doc of `-Waddress-of-packed-member` says that it usually
leads to UB. In contrast, x86_64 Clang does flag these, which renders
the 64-bit kernel unable to boot.
For now, the `address-of-packed-member` warning will only be enabled in
the kernel, as it is absolutely crucial there because of KUBSAN, but
might get excessively noisy for the userland in the future.
Also note that we can't append to `CMAKE_CXX_FLAGS` like we do for other
flags in the kernel, because flags added via `add_compile_options` come
after these, so the `-Wno-address-of-packed-member` in the root would
cancel it out.
2021-08-01 21:30:43 +03:00
// We write back the adjusted stack value into the register state.
// We have to do this because we can't just pass around a reference to a packed field, as it's UB.
2021-08-19 22:53:53 +03:00
state . set_userspace_sp ( stack ) ;
2021-11-30 02:21:03 +03:00
return { } ;
2019-11-04 11:29:47 +03:00
} ;
// We now place the thread state on the userspace stack.
2020-08-02 21:08:22 +03:00
// Note that we use a RegisterState.
2020-02-16 02:15:37 +03:00
// Conversely, when the thread isn't blocking the RegisterState may not be
2019-11-04 11:29:47 +03:00
// valid (fork, exec etc) but the tss will, so we use that instead.
2020-08-02 21:08:22 +03:00
auto & regs = get_register_dump_from_stack ( ) ;
2021-11-30 02:21:03 +03:00
auto result = setup_stack ( regs ) ;
if ( result . is_error ( ) ) {
dbgln ( " Invalid stack pointer: {} " , regs . userspace_sp ( ) ) ;
process . set_should_generate_coredump ( true ) ;
process . for_each_thread ( [ ] ( auto & thread ) {
thread . set_dump_backtrace_on_finalization ( ) ;
} ) ;
m_process - > terminate_due_to_signal ( signal ) ;
return DispatchSignalResult : : Terminate ;
}
2021-06-26 15:56:28 +03:00
auto signal_trampoline_addr = process . signal_trampoline ( ) . get ( ) ;
2021-08-19 22:53:53 +03:00
regs . set_ip ( signal_trampoline_addr ) ;
2019-03-24 00:03:17 +03:00
2022-12-25 20:21:31 +03:00
# if ARCH(X86_64)
2022-11-21 00:36:24 +03:00
// Userspace flags might be invalid for function entry, according to SYSV ABI (section 3.2.1).
// Set them to a known-good value to avoid weird handler misbehavior.
// Only IF (and the reserved bit 1) are set.
regs . set_flags ( 2 | ( regs . rflags & ~ safe_eflags_mask ) ) ;
# endif
2022-11-02 18:30:12 +03:00
dbgln_if ( SIGNAL_DEBUG , " Thread in state '{}' has been primed with signal handler {:p} to deliver {} " , state_string ( ) , m_regs . ip ( ) , signal ) ;
2021-06-23 22:54:41 +03:00
2020-11-30 02:05:27 +03:00
return DispatchSignalResult : : Continue ;
2019-03-24 00:03:17 +03:00
}
2020-02-16 02:15:37 +03:00
RegisterState & Thread : : get_register_dump_from_stack ( )
2019-11-02 12:11:41 +03:00
{
2021-01-25 23:19:34 +03:00
auto * trap = current_trap ( ) ;
// We should *always* have a trap. If we don't we're probably a kernel
2021-08-22 11:44:43 +03:00
// thread that hasn't been preempted. If we want to support this, we
2021-06-29 11:31:25 +03:00
// need to capture the registers probably into m_regs and return it
2021-02-23 22:42:32 +03:00
VERIFY ( trap ) ;
2021-01-25 23:19:34 +03:00
while ( trap ) {
if ( ! trap - > next_trap )
break ;
trap = trap - > next_trap ;
}
return * trap - > regs ;
2019-11-02 12:11:41 +03:00
}
2023-04-02 21:40:47 +03:00
ErrorOr < NonnullRefPtr < Thread > > Thread : : clone ( NonnullRefPtr < Process > process )
2019-03-24 00:03:17 +03:00
{
2023-04-02 21:40:47 +03:00
auto clone = TRY ( Thread : : create ( move ( process ) ) ) ;
2022-02-24 21:55:49 +03:00
m_signal_action_masks . span ( ) . copy_to ( clone - > m_signal_action_masks ) ;
2019-03-24 00:03:17 +03:00
clone - > m_signal_mask = m_signal_mask ;
2021-08-05 23:29:38 +03:00
clone - > m_fpu_state = m_fpu_state ;
2019-09-07 16:50:44 +03:00
clone - > m_thread_specific_data = m_thread_specific_data ;
2019-03-24 00:03:17 +03:00
return clone ;
}
2020-12-09 07:18:45 +03:00
void Thread : : set_state ( State new_state , u8 stop_signal )
2019-05-18 21:07:00 +03:00
{
2020-12-09 07:18:45 +03:00
State previous_state ;
2021-08-29 21:10:24 +03:00
VERIFY ( g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2019-12-01 17:54:47 +03:00
if ( new_state = = m_state )
return ;
2020-12-09 07:18:45 +03:00
{
previous_state = m_state ;
2022-01-30 13:38:50 +03:00
if ( previous_state = = Thread : : State : : Invalid ) {
2020-12-09 07:18:45 +03:00
// If we were *just* created, we may have already pending signals
if ( has_unmasked_pending_signals ( ) ) {
2021-02-07 15:03:24 +03:00
dbgln_if ( THREAD_DEBUG , " Dispatch pending signals to new thread {} " , * this ) ;
2020-12-09 07:18:45 +03:00
dispatch_one_pending_signal ( ) ;
}
2020-09-07 17:31:00 +03:00
}
2020-12-09 07:18:45 +03:00
m_state = new_state ;
2021-02-07 15:03:24 +03:00
dbgln_if ( THREAD_DEBUG , " Set thread {} state to {} " , * this , state_string ( ) ) ;
2020-12-09 07:18:45 +03:00
}
2020-07-05 23:32:07 +03:00
2022-01-30 13:38:50 +03:00
if ( previous_state = = Thread : : State : : Runnable ) {
2021-01-23 02:56:08 +03:00
Scheduler : : dequeue_runnable_thread ( * this ) ;
2022-01-30 13:38:50 +03:00
} else if ( previous_state = = Thread : : State : : Stopped ) {
2020-11-30 02:05:27 +03:00
m_stop_state = State : : Invalid ;
2020-12-09 07:18:45 +03:00
auto & process = this - > process ( ) ;
2021-11-01 01:52:43 +03:00
if ( process . set_stopped ( false ) ) {
2020-12-09 07:18:45 +03:00
process . for_each_thread ( [ & ] ( auto & thread ) {
2021-05-16 12:36:52 +03:00
if ( & thread = = this )
return ;
if ( ! thread . is_stopped ( ) )
return ;
2021-02-07 15:03:24 +03:00
dbgln_if ( THREAD_DEBUG , " Resuming peer thread {} " , thread ) ;
2020-12-09 07:18:45 +03:00
thread . resume_from_stopped ( ) ;
} ) ;
process . unblock_waiters ( Thread : : WaitBlocker : : UnblockFlags : : Continued ) ;
2021-03-30 01:12:51 +03:00
// Tell the parent process (if any) about this change.
2022-11-02 23:26:02 +03:00
if ( auto parent = Process : : from_pid_ignoring_jails ( process . ppid ( ) ) ) {
2021-03-30 01:12:51 +03:00
[[maybe_unused]] auto result = parent - > send_signal ( SIGCHLD , & process ) ;
}
2020-12-09 07:18:45 +03:00
}
2020-11-30 02:05:27 +03:00
}
2022-01-30 13:38:50 +03:00
if ( m_state = = Thread : : State : : Runnable ) {
2021-08-08 15:19:55 +03:00
Scheduler : : enqueue_runnable_thread ( * this ) ;
2020-10-29 01:06:16 +03:00
Processor : : smp_wake_n_idle_processors ( 1 ) ;
2022-01-30 13:38:50 +03:00
} else if ( m_state = = Thread : : State : : Stopped ) {
2020-11-30 02:05:27 +03:00
// We don't want to restore to Running state, only Runnable!
2022-01-30 13:38:50 +03:00
m_stop_state = previous_state ! = Thread : : State : : Running ? previous_state : Thread : : State : : Runnable ;
2020-12-09 07:18:45 +03:00
auto & process = this - > process ( ) ;
2021-11-01 01:52:43 +03:00
if ( ! process . set_stopped ( true ) ) {
2020-12-09 07:18:45 +03:00
process . for_each_thread ( [ & ] ( auto & thread ) {
2021-05-16 12:36:52 +03:00
if ( & thread = = this )
return ;
if ( thread . is_stopped ( ) )
return ;
2021-02-07 15:03:24 +03:00
dbgln_if ( THREAD_DEBUG , " Stopping peer thread {} " , thread ) ;
2022-01-30 13:38:50 +03:00
thread . set_state ( Thread : : State : : Stopped , stop_signal ) ;
2020-12-09 07:18:45 +03:00
} ) ;
process . unblock_waiters ( Thread : : WaitBlocker : : UnblockFlags : : Stopped , stop_signal ) ;
2021-03-30 01:12:51 +03:00
// Tell the parent process (if any) about this change.
2022-11-02 23:26:02 +03:00
if ( auto parent = Process : : from_pid_ignoring_jails ( process . ppid ( ) ) ) {
2021-03-30 01:12:51 +03:00
[[maybe_unused]] auto result = parent - > send_signal ( SIGCHLD , & process ) ;
}
2020-12-09 07:18:45 +03:00
}
2022-01-30 13:38:50 +03:00
} else if ( m_state = = Thread : : State : : Dying ) {
VERIFY ( previous_state ! = Thread : : State : : Blocked ) ;
2020-08-06 04:13:28 +03:00
if ( this ! = Thread : : current ( ) & & is_finalizable ( ) ) {
// Some other thread set this thread to Dying, notify the
// finalizer right away as it can be cleaned up now
Scheduler : : notify_finalizer ( ) ;
}
2020-07-05 23:32:07 +03:00
}
2019-04-17 13:41:51 +03:00
}
2019-07-25 22:02:19 +03:00
2020-01-19 12:10:46 +03:00
struct RecognizedSymbol {
2021-02-25 18:18:36 +03:00
FlatPtr address ;
2022-04-01 20:58:27 +03:00
KernelSymbol const * symbol { nullptr } ;
2020-01-19 12:10:46 +03:00
} ;
2022-01-11 23:44:29 +03:00
static ErrorOr < bool > symbolicate ( RecognizedSymbol const & symbol , Process & process , StringBuilder & builder )
2020-01-19 12:10:46 +03:00
{
2021-11-07 00:06:08 +03:00
if ( symbol . address = = 0 )
2020-01-19 12:10:46 +03:00
return false ;
2022-08-21 01:21:01 +03:00
auto credentials = process . credentials ( ) ;
bool mask_kernel_addresses = ! credentials - > is_superuser ( ) ;
2020-04-08 14:30:50 +03:00
if ( ! symbol . symbol ) {
2021-08-06 14:49:36 +03:00
if ( ! Memory : : is_user_address ( VirtualAddress ( symbol . address ) ) ) {
2022-07-11 20:32:29 +03:00
TRY ( builder . try_append ( " 0xdeadc0de \n " sv ) ) ;
2020-01-19 12:10:46 +03:00
} else {
2022-08-23 18:58:05 +03:00
TRY ( process . address_space ( ) . with ( [ & ] ( auto & space ) - > ErrorOr < void > {
if ( auto * region = space - > find_region_containing ( { VirtualAddress ( symbol . address ) , sizeof ( FlatPtr ) } ) ) {
size_t offset = symbol . address - region - > vaddr ( ) . get ( ) ;
if ( auto region_name = region - > name ( ) ; ! region_name . is_null ( ) & & ! region_name . is_empty ( ) )
TRY ( builder . try_appendff ( " {:p} {} + {:#x} \n " , ( void * ) symbol . address , region_name , offset ) ) ;
else
TRY ( builder . try_appendff ( " {:p} {:p} + {:#x} \n " , ( void * ) symbol . address , region - > vaddr ( ) . as_ptr ( ) , offset ) ) ;
} else {
TRY ( builder . try_appendff ( " {:p} \n " , symbol . address ) ) ;
}
return { } ;
} ) ) ;
2020-01-19 12:10:46 +03:00
}
return true ;
}
2020-04-08 14:30:50 +03:00
unsigned offset = symbol . address - symbol . symbol - > address ;
2022-01-11 23:44:29 +03:00
if ( symbol . symbol - > address = = g_highest_kernel_symbol_address & & offset > 4096 )
TRY ( builder . try_appendff ( " {:p} \n " , ( void * ) ( mask_kernel_addresses ? 0xdeadc0de : symbol . address ) ) ) ;
else
TRY ( builder . try_appendff ( " {:p} {} + {:#x} \n " , ( void * ) ( mask_kernel_addresses ? 0xdeadc0de : symbol . address ) , symbol . symbol - > name , offset ) ) ;
2020-01-19 12:10:46 +03:00
return true ;
}
2022-01-11 23:44:29 +03:00
ErrorOr < NonnullOwnPtr < KString > > Thread : : backtrace ( )
2019-07-25 22:02:19 +03:00
{
2020-01-19 12:10:46 +03:00
Vector < RecognizedSymbol , 128 > recognized_symbols ;
2019-07-25 22:02:19 +03:00
auto & process = const_cast < Process & > ( this - > process ( ) ) ;
2022-01-15 22:19:16 +03:00
auto stack_trace = TRY ( Processor : : capture_stack_trace ( * this ) ) ;
2021-08-29 21:10:24 +03:00
VERIFY ( ! g_scheduler_lock . is_locked_by_current_processor ( ) ) ;
2021-09-06 18:22:36 +03:00
ScopedAddressSpaceSwitcher switcher ( process ) ;
2020-12-08 07:29:41 +03:00
for ( auto & frame : stack_trace ) {
2021-08-06 14:49:36 +03:00
if ( Memory : : is_user_range ( VirtualAddress ( frame ) , sizeof ( FlatPtr ) * 2 ) ) {
2022-01-11 23:44:29 +03:00
TRY ( recognized_symbols . try_append ( { frame } ) ) ;
2020-12-08 07:29:41 +03:00
} else {
2022-01-11 23:44:29 +03:00
TRY ( recognized_symbols . try_append ( { frame , symbolicate_kernel_address ( frame ) } ) ) ;
2020-01-19 12:10:46 +03:00
}
2019-07-25 22:02:19 +03:00
}
2020-01-19 12:10:46 +03:00
StringBuilder builder ;
2019-07-25 22:02:19 +03:00
for ( auto & symbol : recognized_symbols ) {
2022-01-11 23:44:29 +03:00
if ( ! TRY ( symbolicate ( symbol , process , builder ) ) )
2019-07-25 22:02:19 +03:00
break ;
}
2022-01-11 23:44:29 +03:00
return KString : : try_create ( builder . string_view ( ) ) ;
2019-07-25 22:02:19 +03:00
}
2019-09-07 16:50:44 +03:00
2020-12-25 18:45:35 +03:00
size_t Thread : : thread_specific_region_alignment ( ) const
{
return max ( process ( ) . m_master_tls_alignment , alignof ( ThreadSpecificData ) ) ;
}
size_t Thread : : thread_specific_region_size ( ) const
{
return align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ( ) ) + sizeof ( ThreadSpecificData ) ;
}
2021-11-08 02:51:39 +03:00
ErrorOr < void > Thread : : make_thread_specific_region ( Badge < Process > )
2019-09-07 16:50:44 +03:00
{
2021-03-15 22:56:13 +03:00
// The process may not require a TLS region, or allocate TLS later with sys$allocate_tls (which is what dynamically loaded programs do)
2020-10-10 12:13:21 +03:00
if ( ! process ( ) . m_master_tls_region )
2021-11-08 02:51:39 +03:00
return { } ;
2020-10-10 12:13:21 +03:00
2022-08-23 18:58:05 +03:00
return process ( ) . address_space ( ) . with ( [ & ] ( auto & space ) - > ErrorOr < void > {
auto * region = TRY ( space - > allocate_region ( Memory : : RandomizeVirtualAddress : : Yes , { } , thread_specific_region_size ( ) , PAGE_SIZE , " Thread-specific " sv , PROT_READ | PROT_WRITE ) ) ;
2020-12-25 18:45:35 +03:00
2022-08-23 18:58:05 +03:00
m_thread_specific_range = region - > range ( ) ;
2021-05-28 12:18:58 +03:00
2022-08-23 18:58:05 +03:00
SmapDisabler disabler ;
auto * thread_specific_data = ( ThreadSpecificData * ) region - > vaddr ( ) . offset ( align_up_to ( process ( ) . m_master_tls_size , thread_specific_region_alignment ( ) ) ) . as_ptr ( ) ;
auto * thread_local_storage = ( u8 * ) ( ( u8 * ) thread_specific_data ) - align_up_to ( process ( ) . m_master_tls_size , process ( ) . m_master_tls_alignment ) ;
m_thread_specific_data = VirtualAddress ( thread_specific_data ) ;
thread_specific_data - > self = thread_specific_data ;
2021-03-15 22:56:13 +03:00
2022-08-23 18:58:05 +03:00
if ( process ( ) . m_master_tls_size ! = 0 )
memcpy ( thread_local_storage , process ( ) . m_master_tls_region . unsafe_ptr ( ) - > vaddr ( ) . as_ptr ( ) , process ( ) . m_master_tls_size ) ;
2021-03-15 22:56:13 +03:00
2022-08-23 18:58:05 +03:00
return { } ;
} ) ;
2019-09-07 16:50:44 +03:00
}
2019-10-13 15:36:55 +03:00
2023-04-02 21:40:47 +03:00
RefPtr < Thread > Thread : : from_tid ( ThreadID tid )
2019-12-30 21:23:13 +03:00
{
2023-04-02 21:40:47 +03:00
return Thread : : all_instances ( ) . with ( [ & ] ( auto & list ) - > RefPtr < Thread > {
2021-08-15 13:38:02 +03:00
for ( Thread & thread : list ) {
if ( thread . tid ( ) = = tid )
return thread ;
2021-05-20 22:58:36 +03:00
}
2021-08-15 13:38:02 +03:00
return nullptr ;
} ) ;
2019-12-30 21:23:13 +03:00
}
2020-02-16 03:27:42 +03:00
2020-02-18 15:44:27 +03:00
void Thread : : reset_fpu_state ( )
{
2021-08-22 16:35:54 +03:00
memcpy ( & m_fpu_state , & Processor : : clean_fpu_state ( ) , sizeof ( FPUState ) ) ;
2020-02-18 15:44:27 +03:00
}
2020-12-09 07:18:45 +03:00
bool Thread : : should_be_stopped ( ) const
2020-04-07 18:23:37 +03:00
{
2020-12-09 07:18:45 +03:00
return process ( ) . is_stopped ( ) ;
2020-04-07 18:23:37 +03:00
}
2021-09-07 12:40:31 +03:00
void Thread : : track_lock_acquire ( LockRank rank )
{
// Nothing to do for locks without a rank.
if ( rank = = LockRank : : None )
return ;
if ( m_lock_rank_mask ! = LockRank : : None ) {
// Verify we are only attempting to take a lock of a higher rank.
VERIFY ( m_lock_rank_mask > rank ) ;
}
m_lock_rank_mask | = rank ;
}
void Thread : : track_lock_release ( LockRank rank )
{
// Nothing to do for locks without a rank.
if ( rank = = LockRank : : None )
return ;
// The rank value from the caller should only contain a single bit, otherwise
// we are disabling the tracking for multiple locks at once which will corrupt
// the lock tracking mask, and we will assert somewhere else.
auto rank_is_a_single_bit = [ ] ( auto rank_enum ) - > bool {
2021-09-12 18:17:31 +03:00
auto rank = to_underlying ( rank_enum ) ;
2021-09-07 12:40:31 +03:00
auto rank_without_least_significant_bit = rank - 1 ;
return ( rank & rank_without_least_significant_bit ) = = 0 ;
} ;
// We can't release locks out of order, as that would violate the ranking.
// This is validated by toggling the least significant bit of the mask, and
// then bit wise or-ing the rank we are trying to release with the resulting
// mask. If the rank we are releasing is truly the highest rank then the mask
2022-08-10 06:32:36 +03:00
// we get back will be equal to the current mask stored on the thread.
2021-09-07 12:40:31 +03:00
auto rank_is_in_order = [ ] ( auto mask_enum , auto rank_enum ) - > bool {
2021-09-12 18:17:31 +03:00
auto mask = to_underlying ( mask_enum ) ;
auto rank = to_underlying ( rank_enum ) ;
2021-09-07 12:40:31 +03:00
auto mask_without_least_significant_bit = mask - 1 ;
return ( ( mask & mask_without_least_significant_bit ) | rank ) = = mask ;
} ;
VERIFY ( has_flag ( m_lock_rank_mask , rank ) ) ;
VERIFY ( rank_is_a_single_bit ( rank ) ) ;
VERIFY ( rank_is_in_order ( m_lock_rank_mask , rank ) ) ;
m_lock_rank_mask ^ = rank ;
}
2023-02-04 17:11:35 +03:00
void Thread : : set_name ( NonnullOwnPtr < KString > name )
{
m_name . with ( [ & ] ( auto & this_name ) {
this_name = move ( name ) ;
} ) ;
}
2020-02-16 03:27:42 +03:00
}
2021-01-09 02:42:44 +03:00
2021-11-16 03:15:21 +03:00
ErrorOr < void > AK : : Formatter < Kernel : : Thread > : : format ( FormatBuilder & builder , Kernel : : Thread const & value )
2021-01-09 02:42:44 +03:00
{
2023-02-04 16:01:46 +03:00
return value . process ( ) . name ( ) . with ( [ & ] ( auto & process_name ) {
return AK : : Formatter < FormatString > : : format (
builder ,
" {}({}:{}) " sv , process_name - > view ( ) , value . pid ( ) . value ( ) , value . tid ( ) . value ( ) ) ;
} ) ;
2021-01-09 02:42:44 +03:00
}