Kernel: Change wait blocking to Process-only blocking

This prevents zombies created by multi-threaded applications and brings
our model back to closer to what other OSs do.

This also means that SIGSTOP needs to halt all threads, and SIGCONT needs
to resume those threads.
This commit is contained in:
Tom 2020-12-08 21:18:45 -07:00 committed by Andreas Kling
parent 47ede74326
commit c455fc2030
Notes: sideshowbarker 2024-07-19 00:54:26 +09:00
11 changed files with 284 additions and 203 deletions

View File

@ -72,7 +72,7 @@ if (ALL_THE_DEBUG_MACROS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DTCP_DEBUG -DTCP_SOCKET_DEBUG -DTERMCAP_DEBUG -DTERMINAL_DEBUG -DTHREAD_DEBUG -DTLS_DEBUG -DTTY_DEBUG")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUCI_DEBUG -DUDP_DEBUG -DUPDATE_COALESCING_DEBUG")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DVERY_DEBUG -DVFS_DEBUG -DVMWAREBACKDOOR_DEBUG -DVRA_DEBUG")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DWAITQUEUE_DEBUG -DWEAKABLE_DEBUG -DWINDOWMANAGER_DEBUG -DWSMESSAGELOOP_DEBUG -DWSSCREEN_DEBUG")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DWAITBLOCK_DEBUG -DWAITQUEUE_DEBUG -DWEAKABLE_DEBUG -DWINDOWMANAGER_DEBUG -DWSMESSAGELOOP_DEBUG -DWSSCREEN_DEBUG")
# False positive: IF_BMP_DEBUG is not actually a flag.
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DIF_BMP_DEBUG")
# False positive: LOG_DEBUG is a flag, but for a bitset, not a feature.

View File

@ -484,7 +484,8 @@ void debug_handler(TrapFrame* trap)
clac();
auto& regs = *trap->regs;
auto current_thread = Thread::current();
if (&current_thread->process() == nullptr || (regs.cs & 3) == 0) {
auto& process = current_thread->process();
if ((regs.cs & 3) == 0) {
klog() << "Debug Exception in Ring0";
Processor::halt();
return;
@ -494,8 +495,8 @@ void debug_handler(TrapFrame* trap)
if (!is_reason_singlestep)
return;
if (current_thread->tracer()) {
current_thread->tracer()->set_regs(regs);
if (auto tracer = process.tracer()) {
tracer->set_regs(regs);
}
current_thread->send_urgent_signal_to_self(SIGTRAP);
}
@ -506,13 +507,14 @@ void breakpoint_handler(TrapFrame* trap)
clac();
auto& regs = *trap->regs;
auto current_thread = Thread::current();
if (&current_thread->process() == nullptr || (regs.cs & 3) == 0) {
auto& process = current_thread->process();
if ((regs.cs & 3) == 0) {
klog() << "Breakpoint Trap in Ring0";
Processor::halt();
return;
}
if (current_thread->tracer()) {
current_thread->tracer()->set_regs(regs);
if (auto tracer = process.tracer()) {
tracer->set_regs(regs);
}
current_thread->send_urgent_signal_to_self(SIGTRAP);
}

View File

@ -577,7 +577,7 @@ KResultOr<String> Process::get_syscall_path_argument(const Syscall::StringArgume
return get_syscall_path_argument(path.characters, path.length);
}
void Process::finalize(Thread& last_thread)
void Process::finalize()
{
ASSERT(Thread::current() == g_finalizer);
#ifdef PROCESS_DEBUG
@ -627,7 +627,7 @@ void Process::finalize(Thread& last_thread)
}
}
unblock_waiters(last_thread, Thread::WaitBlocker::UnblockFlags::Terminated);
unblock_waiters(Thread::WaitBlocker::UnblockFlags::Terminated);
{
ScopedSpinLock lock(m_lock);
@ -647,10 +647,10 @@ void Process::disowned_by_waiter(Process& process)
m_wait_block_condition.disowned_by_waiter(process);
}
void Process::unblock_waiters(Thread& thread, Thread::WaitBlocker::UnblockFlags flags, u8 signal)
void Process::unblock_waiters(Thread::WaitBlocker::UnblockFlags flags, u8 signal)
{
if (auto parent = Process::from_pid(ppid()))
parent->m_wait_block_condition.unblock(thread, flags, signal);
parent->m_wait_block_condition.unblock(*this, flags, signal);
}
void Process::die()
@ -877,4 +877,21 @@ OwnPtr<Process::ELFBundle> Process::elf_bundle() const
return bundle;
}
void Process::start_tracing_from(ProcessID tracer)
{
m_tracer = ThreadTracer::create(tracer);
}
void Process::stop_tracing()
{
m_tracer = nullptr;
}
void Process::tracer_trap(Thread& thread, const RegisterState& regs)
{
ASSERT(m_tracer.ptr());
m_tracer->set_regs(regs);
thread.send_urgent_signal_to_self(SIGTRAP);
}
}

View File

@ -42,6 +42,7 @@
#include <Kernel/ProcessGroup.h>
#include <Kernel/StdLib.h>
#include <Kernel/Thread.h>
#include <Kernel/ThreadTracer.h>
#include <Kernel/UnixTypes.h>
#include <Kernel/VM/RangeAllocator.h>
#include <LibC/signal_numbers.h>
@ -166,6 +167,9 @@ public:
bool is_dead() const { return m_dead; }
bool is_stopped() const { return m_is_stopped.load(AK::MemoryOrder::memory_order_relaxed); }
bool set_stopped(bool stopped) { return m_is_stopped.exchange(stopped, AK::MemoryOrder::memory_order_relaxed); }
bool is_kernel_process() const { return m_is_kernel_process; }
bool is_user_process() const { return !m_is_kernel_process; }
@ -209,10 +213,16 @@ public:
void for_each_thread(Callback) const;
void die();
void finalize(Thread&);
void finalize();
ALWAYS_INLINE SpinLock<u32>& get_lock() const { return m_lock; }
ThreadTracer* tracer() { return m_tracer.ptr(); }
bool is_traced() const { return !!m_tracer; }
void start_tracing_from(ProcessID tracer);
void stop_tracing();
void tracer_trap(Thread&, const RegisterState&);
int sys$yield();
int sys$sync();
int sys$beep();
@ -489,7 +499,7 @@ public:
KResult poke_user_data(Userspace<u32*> address, u32 data);
void disowned_by_waiter(Process& process);
void unblock_waiters(Thread&, Thread::WaitBlocker::UnblockFlags, u8 signal = 0);
void unblock_waiters(Thread::WaitBlocker::UnblockFlags, u8 signal = 0);
Thread::WaitBlockCondition& wait_block_condition() { return m_wait_block_condition; }
private:
@ -530,7 +540,7 @@ private:
}
KResultOr<String> get_syscall_path_argument(const Syscall::StringArgument&) const;
bool has_tracee_thread(ProcessID tracer_pid) const;
bool has_tracee_thread(ProcessID tracer_pid);
RefPtr<PageDirectory> m_page_directory;
@ -554,6 +564,8 @@ private:
FlatPtr m_load_offset { 0U };
FlatPtr m_entry_eip { 0U };
OwnPtr<ThreadTracer> m_tracer;
static const int m_max_open_file_descriptors { FD_SETSIZE };
class FileDescriptionAndFlags {
@ -582,6 +594,7 @@ private:
const bool m_is_kernel_process;
bool m_dead { false };
bool m_profiling { false };
Atomic<bool> m_is_stopped { false };
RefPtr<Custody> m_executable;
RefPtr<Custody> m_cwd;

View File

@ -37,7 +37,7 @@ KResultOr<u32> handle_syscall(const Kernel::Syscall::SC_ptrace_params& params, P
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
if (params.request == PT_TRACE_ME) {
if (Thread::current()->tracer())
if (Process::current()->tracer())
return KResult(-EBUSY);
caller.set_wait_for_tracer_at_next_execve(true);
@ -59,11 +59,12 @@ KResultOr<u32> handle_syscall(const Kernel::Syscall::SC_ptrace_params& params, P
|| (peer->process().uid() != peer->process().euid())) // Disallow tracing setuid processes
return KResult(-EACCES);
auto& peer_process = peer->process();
if (params.request == PT_ATTACH) {
if (peer->tracer()) {
if (peer_process.tracer()) {
return KResult(-EBUSY);
}
peer->start_tracing_from(caller.pid());
peer_process.start_tracing_from(caller.pid());
ScopedSpinLock lock(peer->get_lock());
if (peer->state() != Thread::State::Stopped) {
peer->send_signal(SIGSTOP, &caller);
@ -71,7 +72,7 @@ KResultOr<u32> handle_syscall(const Kernel::Syscall::SC_ptrace_params& params, P
return KSuccess;
}
auto* tracer = peer->tracer();
auto* tracer = peer_process.tracer();
if (!tracer)
return KResult(-EPERM);
@ -88,7 +89,7 @@ KResultOr<u32> handle_syscall(const Kernel::Syscall::SC_ptrace_params& params, P
break;
case PT_DETACH:
peer->stop_tracing();
peer_process.stop_tracing();
peer->send_signal(SIGCONT, &caller);
break;

View File

@ -94,10 +94,10 @@ int handle(RegisterState& regs, u32 function, u32 arg1, u32 arg2, u32 arg3)
if (function == SC_exit || function == SC_exit_thread) {
// These syscalls need special handling since they never return to the caller.
if (auto* tracer = current_thread->tracer(); tracer && tracer->is_tracing_syscalls()) {
if (auto* tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
regs.eax = 0;
tracer->set_trace_syscalls(false);
current_thread->tracer_trap(regs); // this triggers SIGTRAP and stops the thread!
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
}
cli();
@ -137,10 +137,11 @@ void syscall_handler(TrapFrame* trap)
{
auto& regs = *trap->regs;
auto current_thread = Thread::current();
auto& process = current_thread->process();
if (auto* tracer = current_thread->tracer(); tracer && tracer->is_tracing_syscalls()) {
if (auto tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
tracer->set_trace_syscalls(false);
current_thread->tracer_trap(regs); // this triggers SIGTRAP and stops the thread!
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
}
current_thread->yield_if_stopped();
@ -160,7 +161,6 @@ void syscall_handler(TrapFrame* trap)
asm volatile(""
: "=m"(*ptr));
auto& process = current_thread->process();
if (!MM.validate_user_stack(process, VirtualAddress(regs.userspace_esp))) {
dbgln("Invalid stack pointer: {:p}", regs.userspace_esp);
handle_crash(regs, "Bad stack on syscall entry", SIGSTKFLT);
@ -189,9 +189,9 @@ void syscall_handler(TrapFrame* trap)
process.big_lock().unlock();
if (auto* tracer = current_thread->tracer(); tracer && tracer->is_tracing_syscalls()) {
if (auto tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
tracer->set_trace_syscalls(false);
current_thread->tracer_trap(regs); // this triggers SIGTRAP and stops the thread!
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
}
current_thread->yield_if_stopped();

View File

@ -48,18 +48,11 @@ int Process::sys$ptrace(Userspace<const Syscall::SC_ptrace_params*> user_params)
/**
* "Does this process have a thread that is currently being traced by the provided process?"
*/
bool Process::has_tracee_thread(ProcessID tracer_pid) const
bool Process::has_tracee_thread(ProcessID tracer_pid)
{
bool has_tracee = false;
for_each_thread([&](Thread& t) {
if (t.tracer() && t.tracer()->tracer_pid() == tracer_pid) {
has_tracee = true;
return IterationDecision::Break;
}
return IterationDecision::Continue;
});
return has_tracee;
if (auto tracer = this->tracer())
return tracer->tracer_pid() == tracer_pid;
return false;
}
KResultOr<u32> Process::peek_user_data(Userspace<const u32*> address)

View File

@ -136,6 +136,7 @@ int Process::sys$join_thread(pid_t tid, Userspace<void**> exit_value)
}
if (result == Thread::BlockResult::InterruptedByDeath)
break;
dbg() << "join_thread: retrying";
}
if (exit_value && !copy_to_user(exit_value, &joinee_exit_value))

View File

@ -129,23 +129,37 @@ Thread::~Thread()
void Thread::unblock_from_blocker(Blocker& blocker)
{
ScopedSpinLock scheduler_lock(g_scheduler_lock);
ScopedSpinLock block_lock(m_block_lock);
if (m_blocker != &blocker)
return;
if (!is_stopped())
unblock();
auto do_unblock = [&]() {
ScopedSpinLock scheduler_lock(g_scheduler_lock);
ScopedSpinLock block_lock(m_block_lock);
if (m_blocker != &blocker)
return;
if (!should_be_stopped() && !is_stopped())
unblock();
};
if (Processor::current().in_irq()) {
Processor::current().deferred_call_queue([do_unblock = move(do_unblock), self = make_weak_ptr()]() {
if (auto this_thread = self.strong_ref())
do_unblock();
});
} else {
do_unblock();
}
}
void Thread::unblock(u8 signal)
{
ASSERT(!Processor::current().in_irq());
ASSERT(g_scheduler_lock.own_lock());
ASSERT(m_block_lock.own_lock());
if (m_state != Thread::Blocked)
return;
ASSERT(m_blocker);
if (signal != 0)
if (signal != 0) {
if (!m_blocker->can_be_interrupted() && !m_should_die)
return;
m_blocker->set_interrupted_by_signal(signal);
}
m_blocker = nullptr;
if (Thread::current() == this) {
set_state(Thread::Running);
@ -178,6 +192,7 @@ void Thread::set_should_die()
// If we were stopped, we need to briefly resume so that
// the kernel stacks can clean up. We won't ever return back
// to user mode, though
ASSERT(!process().is_stopped());
resume_from_stopped();
}
if (is_blocked()) {
@ -253,6 +268,11 @@ bool Thread::unlock_process_if_locked()
return process().big_lock().force_unlock_if_locked();
}
void Thread::lock_process()
{
process().big_lock().lock();
}
void Thread::relock_process(bool did_unlock)
{
// Clearing the critical section may trigger the context switch
@ -343,9 +363,7 @@ void Thread::finalize()
ASSERT(thread_cnt_before != 0);
if (thread_cnt_before == 1)
process().finalize(*this);
else
process().unblock_waiters(*this, Thread::WaitBlocker::UnblockFlags::Terminated);
process().finalize();
}
void Thread::finalize_dying_threads()
@ -624,7 +642,18 @@ void Thread::resume_from_stopped()
ASSERT(is_stopped());
ASSERT(m_stop_state != State::Invalid);
ASSERT(g_scheduler_lock.own_lock());
set_state(m_stop_state != Blocked ? m_stop_state : Runnable);
if (m_stop_state == Blocked) {
ScopedSpinLock block_lock(m_block_lock);
if (m_blocker) {
// Hasn't been unblocked yet
set_state(Blocked, 0);
} else {
// Was unblocked while stopped
set_state(Runnable);
}
} else {
set_state(m_stop_state, 0);
}
}
DispatchSignalResult Thread::dispatch_signal(u8 signal)
@ -668,13 +697,13 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
m_pending_signals &= ~(1 << (signal - 1));
m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
auto* thread_tracer = tracer();
if (signal == SIGSTOP || (thread_tracer && default_signal_action(signal) == DefaultSignalAction::DumpCore)) {
auto& process = this->process();
auto tracer = process.tracer();
if (signal == SIGSTOP || (tracer && default_signal_action(signal) == DefaultSignalAction::DumpCore)) {
#ifdef SIGNAL_DEBUG
dbg() << "signal: signal " << signal << " stopping thread " << *this;
#endif
m_stop_signal = signal;
set_state(State::Stopped);
set_state(State::Stopped, signal);
return DispatchSignalResult::Yield;
}
@ -683,19 +712,18 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
dbg() << "signal: SIGCONT resuming " << *this;
#endif
} else {
if (thread_tracer != nullptr) {
if (tracer) {
// when a thread is traced, it should be stopped whenever it receives a signal
// the tracer is notified of this by using waitpid()
// only "pending signals" from the tracer are sent to the tracee
if (!thread_tracer->has_pending_signal(signal)) {
m_stop_signal = signal;
if (!tracer->has_pending_signal(signal)) {
#ifdef SIGNAL_DEBUG
dbg() << "signal: " << signal << " stopping " << *this << " for tracer";
#endif
set_state(Stopped);
set_state(Stopped, signal);
return DispatchSignalResult::Yield;
}
thread_tracer->unset_signal(signal);
tracer->unset_signal(signal);
}
}
@ -703,11 +731,10 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
if (handler_vaddr.is_null()) {
switch (default_signal_action(signal)) {
case DefaultSignalAction::Stop:
m_stop_signal = signal;
set_state(Stopped);
set_state(Stopped, signal);
return DispatchSignalResult::Yield;
case DefaultSignalAction::DumpCore:
process().for_each_thread([](auto& thread) {
process.for_each_thread([](auto& thread) {
thread.set_dump_backtrace_on_finalization();
return IterationDecision::Continue;
});
@ -897,33 +924,29 @@ RefPtr<Thread> Thread::clone(Process& process)
return clone;
}
void Thread::set_state(State new_state)
void Thread::set_state(State new_state, u8 stop_signal)
{
State previous_state;
ASSERT(g_scheduler_lock.own_lock());
if (new_state == m_state)
return;
if (new_state == Blocked) {
// we should always have a Blocker while blocked
ScopedSpinLock block_lock(m_block_lock);
ASSERT(m_blocker != nullptr);
}
auto previous_state = m_state;
ScopedSpinLock thread_lock(m_lock);
if (previous_state == Invalid) {
// If we were *just* created, we may have already pending signals
if (has_unmasked_pending_signals()) {
dbg() << "Dispatch pending signals to new thread " << *this;
dispatch_one_pending_signal();
{
ScopedSpinLock thread_lock(m_lock);
previous_state = m_state;
if (previous_state == Invalid) {
// If we were *just* created, we may have already pending signals
if (has_unmasked_pending_signals()) {
dbg() << "Dispatch pending signals to new thread " << *this;
dispatch_one_pending_signal();
}
}
}
m_state = new_state;
m_state = new_state;
#ifdef THREAD_DEBUG
dbg() << "Set Thread " << *this << " state to " << state_string();
dbg() << "Set Thread " << *this << " state to " << state_string();
#endif
thread_lock.unlock();
}
if (m_process->pid() != 0) {
update_state_for_thread(previous_state);
@ -932,13 +955,37 @@ void Thread::set_state(State new_state)
if (previous_state == Stopped) {
m_stop_state = State::Invalid;
process().unblock_waiters(*this, Thread::WaitBlocker::UnblockFlags::Continued);
auto& process = this->process();
if (process.set_stopped(false) == true) {
process.for_each_thread([&](auto& thread) {
if (&thread == this || !thread.is_stopped())
return IterationDecision::Continue;
#ifdef THREAD_DEBUG
dbg() << "Resuming peer thread " << thread;
#endif
thread.resume_from_stopped();
return IterationDecision::Continue;
});
process.unblock_waiters(Thread::WaitBlocker::UnblockFlags::Continued);
}
}
if (m_state == Stopped) {
// We don't want to restore to Running state, only Runnable!
m_stop_state = previous_state != Running ? m_state : Runnable;
process().unblock_waiters(*this, Thread::WaitBlocker::UnblockFlags::Stopped, m_stop_signal);
m_stop_state = previous_state != Running ? previous_state : Runnable;
auto& process = this->process();
if (process.set_stopped(true) == false) {
process.for_each_thread([&](auto& thread) {
if (&thread == this || thread.is_stopped())
return IterationDecision::Continue;
#ifdef THREAD_DEBUG
dbg() << "Stopping peer thread " << thread;
#endif
thread.set_state(Stopped, stop_signal);
return IterationDecision::Continue;
});
process.unblock_waiters(Thread::WaitBlocker::UnblockFlags::Stopped, stop_signal);
}
} else if (m_state == Dying) {
ASSERT(previous_state != Blocked);
if (this != Thread::current() && is_finalizable()) {
@ -1014,6 +1061,7 @@ String Thread::backtrace_impl()
elf_bundle = process.elf_bundle();
}
auto stack_trace = Processor::capture_stack_trace(*this);
ASSERT(!g_scheduler_lock.own_lock());
ProcessPagingScope paging_scope(process);
for (auto& frame : stack_trace) {
if (is_user_range(VirtualAddress(frame), sizeof(FlatPtr) * 2)) {
@ -1096,28 +1144,9 @@ void Thread::reset_fpu_state()
memcpy(m_fpu_state, &Processor::current().clean_fpu_state(), sizeof(FPUState));
}
void Thread::start_tracing_from(ProcessID tracer)
bool Thread::should_be_stopped() const
{
m_tracer = ThreadTracer::create(tracer);
}
void Thread::stop_tracing()
{
m_tracer = nullptr;
}
void Thread::tracer_trap(const RegisterState& regs)
{
ASSERT(m_tracer.ptr());
m_tracer->set_regs(regs);
send_urgent_signal_to_self(SIGTRAP);
}
const Thread::Blocker& Thread::blocker() const
{
ASSERT(m_lock.own_lock());
ASSERT(m_blocker);
return *m_blocker;
return process().is_stopped();
}
}

View File

@ -272,6 +272,7 @@ public:
virtual bool should_block() { return true; }
virtual Type blocker_type() const = 0;
virtual const BlockTimeout& override_timeout(const BlockTimeout& timeout) { return timeout; }
virtual bool can_be_interrupted() const { return true; }
virtual void not_blocking(bool) = 0;
virtual void was_unblocked(bool did_timeout)
{
@ -480,6 +481,7 @@ public:
explicit JoinBlocker(Thread& joinee, KResult& try_join_result, void*& joinee_exit_value);
virtual Type blocker_type() const override { return Type::Join; }
virtual const char* state_string() const override { return "Joining"; }
virtual bool can_be_interrupted() const override { return false; }
virtual bool should_block() override { return !m_join_error && m_should_block; }
virtual void not_blocking(bool) override;
@ -655,7 +657,7 @@ public:
virtual void not_blocking(bool) override;
virtual void was_unblocked(bool) override;
bool unblock(Thread& thread, UnblockFlags flags, u8 signal, bool from_add_blocker);
bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
bool is_wait() const { return !(m_wait_options & WNOWAIT); }
private:
@ -684,7 +686,7 @@ public:
}
void disowned_by_waiter(Process&);
bool unblock(Thread&, WaitBlocker::UnblockFlags, u8);
bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
void try_unblock(WaitBlocker&);
void finalize();
@ -692,22 +694,18 @@ public:
virtual bool should_add_blocker(Blocker&, void*) override;
private:
struct ThreadBlockInfo {
NonnullRefPtr<Thread> thread;
struct ProcessBlockInfo {
NonnullRefPtr<Process> process;
WaitBlocker::UnblockFlags flags;
u8 signal;
bool was_waited { false };
explicit ThreadBlockInfo(NonnullRefPtr<Thread>&& thread, WaitBlocker::UnblockFlags flags, u8 signal)
: thread(move(thread))
, flags(flags)
, signal(signal)
{
}
explicit ProcessBlockInfo(NonnullRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
~ProcessBlockInfo();
};
Process& m_process;
Vector<ThreadBlockInfo, 2> m_threads;
Vector<ProcessBlockInfo, 2> m_processes;
bool m_finalized { false };
};
@ -735,6 +733,7 @@ public:
void resume_from_stopped();
bool should_be_stopped() const;
bool is_stopped() const { return m_state == Stopped; }
bool is_blocked() const { return m_state == Blocked; }
bool is_in_block() const
@ -742,7 +741,6 @@ public:
ScopedSpinLock lock(m_block_lock);
return m_in_block;
}
const Blocker& blocker() const;
u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
@ -782,15 +780,17 @@ public:
template<typename T, class... Args>
[[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
{
ASSERT(!Processor::current().in_irq());
ScopedCritical critical;
ScopedSpinLock scheduler_lock(g_scheduler_lock);
ScopedSpinLock block_lock(m_block_lock);
// We need to hold m_block_lock so that nobody can unblock a blocker as soon
// as it is constructed and registered elsewhere
ASSERT(!m_in_block);
m_in_block = true;
m_in_block++;
T t(forward<Args>(args)...);
bool did_timeout = false;
Atomic<bool> timeout_unblocked(false);
Atomic<bool> did_unblock(false);
RefPtr<Timer> timer;
{
switch (state()) {
@ -809,7 +809,7 @@ public:
// Don't block if the wake condition is already met
t.not_blocking(false);
m_blocker = nullptr;
m_in_block = false;
m_in_block--;
return BlockResult::NotBlocked;
}
@ -817,34 +817,23 @@ public:
if (!block_timeout.is_infinite()) {
// Process::kill_all_threads may be called at any time, which will mark all
// threads to die. In that case
m_blocker_timeout = timer = TimerQueue::the().add_timer_without_id(block_timeout.clock_id(), block_timeout.absolute_time(), [this]() {
timer = TimerQueue::the().add_timer_without_id(block_timeout.clock_id(), block_timeout.absolute_time(), [&]() {
ASSERT(!Processor::current().in_irq());
ASSERT(!g_scheduler_lock.own_lock());
ASSERT(!m_block_lock.own_lock());
// NOTE: this may execute on the same or any other processor!
{
ScopedSpinLock block_lock(m_block_lock);
if (!m_blocker)
return;
m_blocker_timeout = nullptr;
}
ScopedSpinLock scheduler_lock(g_scheduler_lock);
ScopedSpinLock block_lock(m_block_lock);
if (!this->is_stopped()) {
// Only unblock if we're not stopped. In either
// case the blocker should be marked as timed out
if (m_blocker && timeout_unblocked.exchange(true, AK::MemoryOrder::memory_order_relaxed) == false)
unblock();
}
});
if (!m_blocker_timeout) {
if (!timer) {
// Timeout is already in the past
t.not_blocking(true);
m_blocker = nullptr;
m_in_block = false;
m_in_block--;
return BlockResult::InterruptedByTimeout;
}
} else {
m_blocker_timeout = nullptr;
}
t.begin_blocking({});
@ -853,30 +842,46 @@ public:
}
block_lock.unlock();
scheduler_lock.unlock();
// Yield to the scheduler, and wait for us to resume unblocked.
yield_without_holding_big_lock();
bool did_timeout = false;
bool last_recursive_unblock;
for (;;) {
scheduler_lock.unlock();
scheduler_lock.lock();
bool is_stopped = state() == Thread::Stopped;
{
if (t.was_interrupted_by_signal()) {
ScopedSpinLock lock(m_lock);
dispatch_one_pending_signal();
}
// We should no longer be blocked once we woke up, but we may be stopped
ASSERT(state() == (is_stopped ? Thread::Stopped : Thread::Running));
// Yield to the scheduler, and wait for us to resume unblocked.
m_need_relock_process |= unlock_process_if_locked();
ASSERT(!g_scheduler_lock.own_lock());
ASSERT(Processor::current().in_critical());
yield_while_not_holding_big_lock();
scheduler_lock.lock();
ScopedSpinLock block_lock2(m_block_lock);
// Remove ourselves...
m_blocker = nullptr;
m_in_block = false;
if (timer && !m_blocker_timeout)
did_timeout = true;
if (should_be_stopped() || state() == Stopped) {
dbg() << "Thread should be stopped, current state: " << state_string();
set_state(Thread::Blocked);
continue;
}
if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) {
block_lock2.unlock();
dbg() << "Thread should not be unblocking, current state: " << state_string();
set_state(Thread::Blocked);
continue;
}
// Prevent the timeout from unblocking this thread if it happens to
// be in the process of firing already
did_timeout |= timeout_unblocked.exchange(true, AK::MemoryOrder::memory_order_relaxed);
if (m_blocker) {
// Remove ourselves...
ASSERT(m_blocker == &t);
m_blocker = nullptr;
}
last_recursive_unblock = (--m_in_block == 0);
break;
}
if (t.was_interrupted_by_signal()) {
ScopedSpinLock lock(m_lock);
dispatch_one_pending_signal();
}
// Notify the blocker that we are no longer blocking. It may need
@ -889,13 +894,12 @@ public:
// the timer function to complete before we remove it
// (e.g. if it's on another processor)
TimerQueue::the().cancel_timer(timer.release_nonnull());
if (is_stopped) {
// If we're stopped we need to yield
yield_without_holding_big_lock();
}
} else if (is_stopped) {
// If we're stopped we need to yield
yield_without_holding_big_lock();
}
if (m_need_relock_process && last_recursive_unblock) {
m_need_relock_process = false;
// NOTE: this may trigger another call to Thread::block(), so
// we need to do this after we're all done and restored m_in_block!
lock_process();
}
return result;
}
@ -929,7 +933,7 @@ public:
u32 kernel_stack_base() const { return m_kernel_stack_base; }
u32 kernel_stack_top() const { return m_kernel_stack_top; }
void set_state(State);
void set_state(State, u8 = 0);
bool is_initialized() const { return m_initialized; }
void set_initialized(bool initialized) { m_initialized = initialized; }
@ -1055,12 +1059,6 @@ public:
static constexpr u32 default_kernel_stack_size = 65536;
static constexpr u32 default_userspace_stack_size = 4 * MiB;
ThreadTracer* tracer() { return m_tracer.ptr(); }
void start_tracing_from(ProcessID tracer);
void stop_tracing();
void tracer_trap(const RegisterState&);
bool is_traced() const { return !!m_tracer; }
RecursiveSpinLock& get_lock() const { return m_lock; }
#ifdef LOCK_DEBUG
@ -1165,6 +1163,7 @@ private:
};
bool unlock_process_if_locked();
void lock_process();
void relock_process(bool did_unlock);
String backtrace_impl();
void reset_fpu_state();
@ -1188,7 +1187,6 @@ private:
size_t m_thread_specific_region_size { 0 };
SignalActionData m_signal_action_data[32];
Blocker* m_blocker { nullptr };
RefPtr<Timer> m_blocker_timeout;
#ifdef LOCK_DEBUG
struct HoldingLockInfo {
@ -1227,17 +1225,15 @@ private:
u32 m_extra_priority { 0 };
u32 m_priority_boost { 0 };
u8 m_stop_signal { 0 };
State m_stop_state { Invalid };
u32 m_in_block { false };
bool m_dump_backtrace_on_finalization { false };
bool m_should_die { false };
bool m_initialized { false };
bool m_in_block { false };
bool m_need_relock_process { false };
Atomic<bool> m_have_any_unmasked_pending_signals { false };
OwnPtr<ThreadTracer> m_tracer;
void yield_without_holding_big_lock();
void yield_while_not_holding_big_lock();
void update_state_for_thread(Thread::State previous_state);

View File

@ -30,6 +30,8 @@
#include <Kernel/Scheduler.h>
#include <Kernel/Thread.h>
//#define WAITBLOCK_DEBUG
namespace Kernel {
bool Thread::Blocker::set_block_condition(Thread::BlockCondition& block_condition, void* data)
@ -390,22 +392,40 @@ void Thread::SelectBlocker::was_unblocked(bool did_timeout)
}
}
Thread::WaitBlockCondition::ProcessBlockInfo::ProcessBlockInfo(NonnullRefPtr<Process>&& process, WaitBlocker::UnblockFlags flags, u8 signal)
: process(move(process))
, flags(flags)
, signal(signal)
{
}
Thread::WaitBlockCondition::ProcessBlockInfo::~ProcessBlockInfo()
{
}
void Thread::WaitBlockCondition::try_unblock(Thread::WaitBlocker& blocker)
{
ScopedSpinLock lock(m_lock);
// We if we have any processes pending
for (size_t i = 0; i < m_threads.size(); i++) {
auto& info = m_threads[i];
for (size_t i = 0; i < m_processes.size(); i++) {
auto& info = m_processes[i];
// We need to call unblock as if we were called from add_blocker
// so that we don't trigger a context switch by yielding!
if (info.was_waited && blocker.is_wait())
continue; // This state was already waited on, do not unblock
if (blocker.unblock(info.thread, info.flags, info.signal, true)) {
if (blocker.unblock(info.process, info.flags, info.signal, true)) {
if (blocker.is_wait()) {
if (info.flags == Thread::WaitBlocker::UnblockFlags::Terminated)
m_threads.remove(i);
else
if (info.flags == Thread::WaitBlocker::UnblockFlags::Terminated) {
m_processes.remove(i);
#ifdef WAITBLOCK_DEBUG
dbg() << "WaitBlockCondition[" << m_process << "] terminated, remove " << *info.process;
#endif
} else {
#ifdef WAITBLOCK_DEBUG
dbg() << "WaitBlockCondition[" << m_process << "] terminated, mark as waited " << *info.process;
#endif
info.was_waited = true;
}
}
break;
}
@ -417,17 +437,20 @@ void Thread::WaitBlockCondition::disowned_by_waiter(Process& process)
ScopedSpinLock lock(m_lock);
if (m_finalized)
return;
for (size_t i = 0; i < m_threads.size();) {
auto& info = m_threads[i];
if (&info.thread->process() == &process) {
for (size_t i = 0; i < m_processes.size();) {
auto& info = m_processes[i];
if (info.process == &process) {
do_unblock([&](Blocker& b, void*) {
ASSERT(b.blocker_type() == Blocker::Type::Wait);
auto& blocker = static_cast<WaitBlocker&>(b);
bool did_unblock = blocker.unblock(info.thread, WaitBlocker::UnblockFlags::Disowned, 0, false);
bool did_unblock = blocker.unblock(info.process, WaitBlocker::UnblockFlags::Disowned, 0, false);
ASSERT(did_unblock); // disowning must unblock everyone
return true;
});
m_threads.remove(i);
#ifdef WAITBLOCK_DEBUG
dbg() << "WaitBlockCondition[" << m_process << "] disowned " << *info.process;
#endif
m_processes.remove(i);
continue;
}
@ -435,7 +458,7 @@ void Thread::WaitBlockCondition::disowned_by_waiter(Process& process)
}
}
bool Thread::WaitBlockCondition::unblock(Thread& thread, WaitBlocker::UnblockFlags flags, u8 signal)
bool Thread::WaitBlockCondition::unblock(Process& process, WaitBlocker::UnblockFlags flags, u8 signal)
{
ASSERT(flags != WaitBlocker::UnblockFlags::Disowned);
@ -448,8 +471,8 @@ bool Thread::WaitBlockCondition::unblock(Thread& thread, WaitBlocker::UnblockFla
return false;
if (flags != WaitBlocker::UnblockFlags::Terminated) {
// First check if this state was already waited on
for (auto& info : m_threads) {
if (info.thread == &thread) {
for (auto& info : m_processes) {
if (info.process == &process) {
was_waited_already = info.was_waited;
break;
}
@ -461,7 +484,7 @@ bool Thread::WaitBlockCondition::unblock(Thread& thread, WaitBlocker::UnblockFla
auto& blocker = static_cast<WaitBlocker&>(b);
if (was_waited_already && blocker.is_wait())
return false; // This state was already waited on, do not unblock
if (blocker.unblock(thread, flags, signal, false)) {
if (blocker.unblock(process, flags, signal, false)) {
did_wait |= blocker.is_wait(); // anyone requesting a wait
did_unblock_any = true;
return true;
@ -473,18 +496,25 @@ bool Thread::WaitBlockCondition::unblock(Thread& thread, WaitBlocker::UnblockFla
// UnblockFlags::Terminated then add it to your list
if (!did_unblock_any || !did_wait || flags != WaitBlocker::UnblockFlags::Terminated) {
bool updated_existing = false;
for (auto& info : m_threads) {
if (info.thread == &thread) {
for (auto& info : m_processes) {
if (info.process == &process) {
ASSERT(info.flags != WaitBlocker::UnblockFlags::Terminated);
info.flags = flags;
info.signal = signal;
info.was_waited = did_wait;
#ifdef WAITBLOCK_DEBUG
dbg() << "WaitBlockCondition[" << m_process << "] update " << process << " flags: " << (int)flags << " mark as waited: " << info.was_waited;
#endif
updated_existing = true;
break;
}
}
if (!updated_existing)
m_threads.append(ThreadBlockInfo(thread, flags, signal));
if (!updated_existing) {
#ifdef WAITBLOCK_DEBUG
dbg() << "WaitBlockCondition[" << m_process << "] add " << process << " flags: " << (int)flags;
#endif
m_processes.append(ProcessBlockInfo(process, flags, signal));
}
}
return did_unblock_any;
}
@ -497,12 +527,12 @@ bool Thread::WaitBlockCondition::should_add_blocker(Blocker& b, void*)
ASSERT(b.blocker_type() == Blocker::Type::Wait);
auto& blocker = static_cast<WaitBlocker&>(b);
// See if we can match any process immediately
for (size_t i = 0; i < m_threads.size(); i++) {
auto& info = m_threads[i];
if (blocker.unblock(info.thread, info.flags, info.signal, true)) {
for (size_t i = 0; i < m_processes.size(); i++) {
auto& info = m_processes[i];
if (blocker.unblock(info.process, info.flags, info.signal, true)) {
// Only remove the entry if UnblockFlags::Terminated
if (info.flags == Thread::WaitBlocker::UnblockFlags::Terminated && blocker.is_wait())
m_threads.remove(i);
m_processes.remove(i);
return false;
}
}
@ -516,7 +546,7 @@ void Thread::WaitBlockCondition::finalize()
m_finalized = true;
// Clear the list of threads here so we can drop the references to them
m_threads.clear();
m_processes.clear();
// No more waiters, drop the last reference immediately. This may
// cause us to be destructed ourselves!
@ -614,15 +644,14 @@ void Thread::WaitBlocker::do_set_result(const siginfo_t& result)
}
}
bool Thread::WaitBlocker::unblock(Thread& thread, UnblockFlags flags, u8 signal, bool from_add_blocker)
bool Thread::WaitBlocker::unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker)
{
ASSERT(flags != UnblockFlags::Terminated || signal == 0); // signal argument should be ignored for Terminated
auto& process = thread.process();
switch (m_id_type) {
case P_PID:
ASSERT(m_waitee);
if (process.pid() != m_waitee_id && thread.tid() != m_waitee_id) // TODO: pid/tid
if (process.pid() != m_waitee_id)
return false;
break;
case P_PGID:
@ -648,13 +677,13 @@ bool Thread::WaitBlocker::unblock(Thread& thread, UnblockFlags flags, u8 signal,
case UnblockFlags::Stopped:
if (!(m_wait_options & WSTOPPED))
return false;
if (!(m_wait_options & WUNTRACED) && !thread.is_traced())
if (!(m_wait_options & WUNTRACED) && !process.is_traced())
return false;
break;
case UnblockFlags::Continued:
if (!(m_wait_options & WCONTINUED))
return false;
if (!(m_wait_options & WUNTRACED) && !thread.is_traced())
if (!(m_wait_options & WUNTRACED) && !process.is_traced())
return false;
break;
case UnblockFlags::Disowned:
@ -681,7 +710,7 @@ bool Thread::WaitBlocker::unblock(Thread& thread, UnblockFlags flags, u8 signal,
ScopedSpinLock lock(g_scheduler_lock);
// We need to gather the information before we release the sheduler lock!
siginfo.si_signo = SIGCHLD;
siginfo.si_pid = thread.tid().value();
siginfo.si_pid = process.pid().value();
siginfo.si_uid = process.uid();
siginfo.si_status = signal;