2020-01-18 11:38:21 +03:00
|
|
|
/*
|
2021-08-05 23:22:26 +03:00
|
|
|
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
|
2020-01-18 11:38:21 +03:00
|
|
|
*
|
2021-04-22 11:24:48 +03:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 11:38:21 +03:00
|
|
|
*/
|
|
|
|
|
2019-03-24 00:03:17 +03:00
|
|
|
#pragma once
|
|
|
|
|
2021-05-16 12:36:52 +03:00
|
|
|
#include <AK/Concepts.h>
|
2021-03-07 14:01:11 +03:00
|
|
|
#include <AK/EnumBits.h>
|
2021-11-08 02:51:39 +03:00
|
|
|
#include <AK/Error.h>
|
2019-07-19 14:04:42 +03:00
|
|
|
#include <AK/IntrusiveList.h>
|
2020-02-16 03:27:42 +03:00
|
|
|
#include <AK/Optional.h>
|
2019-04-20 20:23:45 +03:00
|
|
|
#include <AK/OwnPtr.h>
|
2020-11-15 21:58:19 +03:00
|
|
|
#include <AK/Time.h>
|
2021-08-23 07:01:04 +03:00
|
|
|
#include <AK/Variant.h>
|
2019-03-24 00:03:17 +03:00
|
|
|
#include <AK/Vector.h>
|
2022-07-24 17:38:41 +03:00
|
|
|
#include <Kernel/API/POSIX/sched.h>
|
2022-01-29 15:08:37 +03:00
|
|
|
#include <Kernel/Arch/RegisterState.h>
|
2022-12-27 16:04:07 +03:00
|
|
|
#include <Kernel/Arch/ThreadRegisters.h>
|
2021-01-25 18:07:10 +03:00
|
|
|
#include <Kernel/Debug.h>
|
2020-02-16 03:50:16 +03:00
|
|
|
#include <Kernel/Forward.h>
|
2021-08-05 23:22:26 +03:00
|
|
|
#include <Kernel/KString.h>
|
2021-08-16 22:52:42 +03:00
|
|
|
#include <Kernel/Library/ListedRefCounted.h>
|
2022-08-19 21:53:40 +03:00
|
|
|
#include <Kernel/Library/LockWeakPtr.h>
|
|
|
|
#include <Kernel/Library/LockWeakable.h>
|
2021-08-07 14:19:39 +03:00
|
|
|
#include <Kernel/Locking/LockLocation.h>
|
2021-07-18 10:09:04 +03:00
|
|
|
#include <Kernel/Locking/LockMode.h>
|
2021-09-07 12:40:31 +03:00
|
|
|
#include <Kernel/Locking/LockRank.h>
|
2021-08-22 02:37:17 +03:00
|
|
|
#include <Kernel/Locking/SpinlockProtected.h>
|
2021-08-06 14:54:48 +03:00
|
|
|
#include <Kernel/Memory/VirtualRange.h>
|
2019-05-28 12:53:16 +03:00
|
|
|
#include <Kernel/UnixTypes.h>
|
2019-07-19 10:04:12 +03:00
|
|
|
#include <LibC/fd_set.h>
|
2021-02-21 13:59:53 +03:00
|
|
|
#include <LibC/signal_numbers.h>
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2020-02-16 03:27:42 +03:00
|
|
|
namespace Kernel {
|
|
|
|
|
2022-01-29 15:08:37 +03:00
|
|
|
class Timer;
|
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
enum class DispatchSignalResult {
|
|
|
|
Deferred = 0,
|
|
|
|
Yield,
|
|
|
|
Terminate,
|
|
|
|
Continue
|
2019-05-28 12:53:16 +03:00
|
|
|
};
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2019-09-07 16:50:44 +03:00
|
|
|
struct ThreadSpecificData {
|
|
|
|
ThreadSpecificData* self;
|
|
|
|
};
|
|
|
|
|
2020-06-29 00:34:31 +03:00
|
|
|
#define THREAD_AFFINITY_DEFAULT 0xffffffff
|
|
|
|
|
2020-09-27 17:53:35 +03:00
|
|
|
class Thread
|
2021-12-29 01:22:14 +03:00
|
|
|
: public ListedRefCounted<Thread, LockType::Spinlock>
|
2022-08-19 21:53:40 +03:00
|
|
|
, public LockWeakable<Thread> {
|
2020-04-22 12:54:58 +03:00
|
|
|
AK_MAKE_NONCOPYABLE(Thread);
|
|
|
|
AK_MAKE_NONMOVABLE(Thread);
|
|
|
|
|
2021-07-17 22:09:51 +03:00
|
|
|
friend class Mutex;
|
2019-03-24 00:03:17 +03:00
|
|
|
friend class Process;
|
|
|
|
friend class Scheduler;
|
2021-04-16 15:03:24 +03:00
|
|
|
friend struct ThreadReadyQueue;
|
2019-05-28 12:53:16 +03:00
|
|
|
|
2019-03-24 00:03:17 +03:00
|
|
|
public:
|
2020-06-29 00:34:31 +03:00
|
|
|
inline static Thread* current()
|
|
|
|
{
|
2021-01-27 00:16:07 +03:00
|
|
|
return Processor::current_thread();
|
2020-06-29 00:34:31 +03:00
|
|
|
}
|
2020-02-17 17:04:27 +03:00
|
|
|
|
2022-08-19 21:53:40 +03:00
|
|
|
static ErrorOr<NonnullLockRefPtr<Thread>> try_create(NonnullLockRefPtr<Process>);
|
2019-03-24 00:03:17 +03:00
|
|
|
~Thread();
|
|
|
|
|
2022-08-19 21:53:40 +03:00
|
|
|
static LockRefPtr<Thread> from_tid(ThreadID);
|
2019-03-24 00:03:17 +03:00
|
|
|
static void finalize_dying_threads();
|
|
|
|
|
2020-08-08 18:32:34 +03:00
|
|
|
ThreadID tid() const { return m_tid; }
|
|
|
|
ProcessID pid() const;
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2019-12-30 20:46:17 +03:00
|
|
|
void set_priority(u32 p) { m_priority = p; }
|
|
|
|
u32 priority() const { return m_priority; }
|
|
|
|
|
2020-09-26 06:44:43 +03:00
|
|
|
void detach()
|
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2020-09-26 06:44:43 +03:00
|
|
|
m_is_joinable = false;
|
|
|
|
}
|
|
|
|
|
2020-12-26 12:47:08 +03:00
|
|
|
[[nodiscard]] bool is_joinable() const
|
2020-09-26 06:44:43 +03:00
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2020-09-26 06:44:43 +03:00
|
|
|
return m_is_joinable;
|
|
|
|
}
|
2019-11-18 06:08:10 +03:00
|
|
|
|
2019-03-24 00:03:17 +03:00
|
|
|
Process& process() { return m_process; }
|
2022-04-01 20:58:27 +03:00
|
|
|
Process const& process() const { return m_process; }
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2021-08-05 23:22:26 +03:00
|
|
|
// NOTE: This returns a null-terminated string.
|
|
|
|
StringView name() const
|
2020-09-27 17:53:35 +03:00
|
|
|
{
|
2021-08-05 23:22:26 +03:00
|
|
|
// NOTE: Whoever is calling this needs to be holding our lock while reading the name.
|
2021-08-29 21:10:24 +03:00
|
|
|
VERIFY(m_lock.is_locked_by_current_processor());
|
2021-09-06 13:44:27 +03:00
|
|
|
return m_name->view();
|
2020-09-27 17:53:35 +03:00
|
|
|
}
|
2021-08-05 23:22:26 +03:00
|
|
|
|
2021-09-06 13:44:27 +03:00
|
|
|
void set_name(NonnullOwnPtr<KString> name)
|
2020-09-27 17:53:35 +03:00
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2020-09-27 17:53:35 +03:00
|
|
|
m_name = move(name);
|
|
|
|
}
|
2019-12-07 22:45:26 +03:00
|
|
|
|
2019-03-24 00:03:17 +03:00
|
|
|
void finalize();
|
|
|
|
|
2022-01-30 13:38:50 +03:00
|
|
|
enum class State : u8 {
|
2019-03-24 00:03:17 +03:00
|
|
|
Invalid = 0,
|
|
|
|
Runnable,
|
|
|
|
Running,
|
|
|
|
Dying,
|
|
|
|
Dead,
|
|
|
|
Stopped,
|
2022-01-30 13:38:50 +03:00
|
|
|
Blocked,
|
2019-03-24 00:03:17 +03:00
|
|
|
};
|
|
|
|
|
2021-02-15 02:06:10 +03:00
|
|
|
class [[nodiscard]] BlockResult {
|
2020-11-15 21:58:19 +03:00
|
|
|
public:
|
|
|
|
enum Type {
|
|
|
|
WokeNormally,
|
|
|
|
NotBlocked,
|
|
|
|
InterruptedBySignal,
|
|
|
|
InterruptedByDeath,
|
|
|
|
InterruptedByTimeout,
|
|
|
|
};
|
|
|
|
|
|
|
|
BlockResult() = delete;
|
|
|
|
|
|
|
|
BlockResult(Type type)
|
|
|
|
: m_type(type)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
bool operator==(Type type) const
|
|
|
|
{
|
|
|
|
return m_type == type;
|
|
|
|
}
|
|
|
|
bool operator!=(Type type) const
|
|
|
|
{
|
|
|
|
return m_type != type;
|
|
|
|
}
|
|
|
|
|
2020-12-26 12:47:08 +03:00
|
|
|
[[nodiscard]] bool was_interrupted() const
|
2020-11-15 21:58:19 +03:00
|
|
|
{
|
|
|
|
switch (m_type) {
|
|
|
|
case InterruptedBySignal:
|
|
|
|
case InterruptedByDeath:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Type m_type;
|
|
|
|
};
|
|
|
|
|
|
|
|
class BlockTimeout {
|
|
|
|
public:
|
|
|
|
BlockTimeout()
|
|
|
|
: m_infinite(true)
|
|
|
|
{
|
|
|
|
}
|
2022-04-01 20:58:27 +03:00
|
|
|
explicit BlockTimeout(bool is_absolute, Time const* time, Time const* start_time = nullptr, clockid_t clock_id = CLOCK_MONOTONIC_COARSE);
|
2020-11-15 21:58:19 +03:00
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
Time const& absolute_time() const { return m_time; }
|
|
|
|
Time const* start_time() const { return !m_infinite ? &m_start_time : nullptr; }
|
2020-12-02 02:53:47 +03:00
|
|
|
clockid_t clock_id() const { return m_clock_id; }
|
2020-11-15 21:58:19 +03:00
|
|
|
bool is_infinite() const { return m_infinite; }
|
|
|
|
|
|
|
|
private:
|
2021-02-28 01:56:16 +03:00
|
|
|
Time m_time {};
|
|
|
|
Time m_start_time {};
|
2020-12-04 08:12:50 +03:00
|
|
|
clockid_t m_clock_id { CLOCK_MONOTONIC_COARSE };
|
2020-11-15 21:58:19 +03:00
|
|
|
bool m_infinite { false };
|
|
|
|
};
|
|
|
|
|
2021-08-22 16:59:47 +03:00
|
|
|
class BlockerSet;
|
2020-11-30 02:05:27 +03:00
|
|
|
|
2019-07-18 19:12:37 +03:00
|
|
|
class Blocker {
|
2021-08-23 02:06:47 +03:00
|
|
|
AK_MAKE_NONMOVABLE(Blocker);
|
|
|
|
AK_MAKE_NONCOPYABLE(Blocker);
|
|
|
|
|
2019-07-18 17:22:26 +03:00
|
|
|
public:
|
2020-11-30 02:05:27 +03:00
|
|
|
enum class Type {
|
|
|
|
Unknown = 0,
|
|
|
|
File,
|
2020-12-22 09:21:58 +03:00
|
|
|
Futex,
|
2020-11-30 02:05:27 +03:00
|
|
|
Plan9FS,
|
|
|
|
Join,
|
2020-12-08 07:29:41 +03:00
|
|
|
Queue,
|
2020-11-30 02:05:27 +03:00
|
|
|
Routing,
|
|
|
|
Sleep,
|
2021-12-12 02:01:42 +03:00
|
|
|
Signal,
|
2022-07-14 02:17:01 +03:00
|
|
|
Wait,
|
|
|
|
Flock
|
2020-11-30 02:05:27 +03:00
|
|
|
};
|
|
|
|
virtual ~Blocker();
|
2021-08-05 21:48:14 +03:00
|
|
|
virtual StringView state_string() const = 0;
|
2020-11-30 02:05:27 +03:00
|
|
|
virtual Type blocker_type() const = 0;
|
2022-04-01 20:58:27 +03:00
|
|
|
virtual BlockTimeout const& override_timeout(BlockTimeout const& timeout) { return timeout; }
|
2020-12-09 07:18:45 +03:00
|
|
|
virtual bool can_be_interrupted() const { return true; }
|
2021-08-24 13:14:14 +03:00
|
|
|
virtual bool setup_blocker();
|
2022-01-29 14:46:04 +03:00
|
|
|
virtual void finalize();
|
2021-08-23 03:09:08 +03:00
|
|
|
|
2021-08-24 02:07:16 +03:00
|
|
|
Thread& thread() { return m_thread; }
|
|
|
|
|
2021-08-23 03:09:08 +03:00
|
|
|
enum class UnblockImmediatelyReason {
|
|
|
|
UnblockConditionAlreadyMet,
|
|
|
|
TimeoutInThePast,
|
|
|
|
};
|
|
|
|
|
|
|
|
virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) = 0;
|
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
virtual void was_unblocked(bool did_timeout)
|
|
|
|
{
|
|
|
|
if (did_timeout) {
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2020-11-30 02:05:27 +03:00
|
|
|
m_did_timeout = true;
|
|
|
|
}
|
|
|
|
}
|
2020-09-26 06:44:43 +03:00
|
|
|
void set_interrupted_by_death()
|
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2020-11-30 02:05:27 +03:00
|
|
|
do_set_interrupted_by_death();
|
|
|
|
}
|
|
|
|
void set_interrupted_by_signal(u8 signal)
|
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2020-11-30 02:05:27 +03:00
|
|
|
do_set_interrupted_by_signal(signal);
|
2020-09-26 06:44:43 +03:00
|
|
|
}
|
2020-11-30 02:05:27 +03:00
|
|
|
u8 was_interrupted_by_signal() const
|
2020-09-26 06:44:43 +03:00
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2020-11-30 02:05:27 +03:00
|
|
|
return do_get_interrupted_by_signal();
|
2020-09-26 06:44:43 +03:00
|
|
|
}
|
2020-11-30 02:05:27 +03:00
|
|
|
virtual Thread::BlockResult block_result()
|
2020-09-26 06:44:43 +03:00
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2020-11-15 21:58:19 +03:00
|
|
|
if (m_was_interrupted_by_death)
|
|
|
|
return Thread::BlockResult::InterruptedByDeath;
|
2020-11-30 02:05:27 +03:00
|
|
|
if (m_was_interrupted_by_signal != 0)
|
2020-11-15 21:58:19 +03:00
|
|
|
return Thread::BlockResult::InterruptedBySignal;
|
2020-11-30 02:05:27 +03:00
|
|
|
if (m_did_timeout)
|
2020-11-15 21:58:19 +03:00
|
|
|
return Thread::BlockResult::InterruptedByTimeout;
|
|
|
|
return Thread::BlockResult::WokeNormally;
|
2020-09-26 06:44:43 +03:00
|
|
|
}
|
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
void begin_blocking(Badge<Thread>);
|
|
|
|
BlockResult end_blocking(Badge<Thread>, bool);
|
|
|
|
|
2020-09-26 06:44:43 +03:00
|
|
|
protected:
|
2021-08-24 02:01:50 +03:00
|
|
|
Blocker()
|
|
|
|
: m_thread(*Thread::current())
|
|
|
|
{
|
|
|
|
}
|
2021-08-23 02:06:47 +03:00
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
void do_set_interrupted_by_death()
|
|
|
|
{
|
|
|
|
m_was_interrupted_by_death = true;
|
|
|
|
}
|
|
|
|
void do_set_interrupted_by_signal(u8 signal)
|
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(signal != 0);
|
2020-11-30 02:05:27 +03:00
|
|
|
m_was_interrupted_by_signal = signal;
|
|
|
|
}
|
|
|
|
void do_clear_interrupted_by_signal()
|
|
|
|
{
|
|
|
|
m_was_interrupted_by_signal = 0;
|
|
|
|
}
|
|
|
|
u8 do_get_interrupted_by_signal() const
|
|
|
|
{
|
|
|
|
return m_was_interrupted_by_signal;
|
|
|
|
}
|
2020-12-26 12:47:08 +03:00
|
|
|
[[nodiscard]] bool was_interrupted() const
|
2020-11-30 02:05:27 +03:00
|
|
|
{
|
|
|
|
return m_was_interrupted_by_death || m_was_interrupted_by_signal != 0;
|
|
|
|
}
|
|
|
|
void unblock_from_blocker()
|
|
|
|
{
|
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2021-08-24 02:01:50 +03:00
|
|
|
if (!m_is_blocking)
|
|
|
|
return;
|
|
|
|
m_is_blocking = false;
|
2020-11-30 02:05:27 +03:00
|
|
|
}
|
|
|
|
|
2021-08-24 02:01:50 +03:00
|
|
|
m_thread->unblock_from_blocker(*this);
|
2020-11-30 02:05:27 +03:00
|
|
|
}
|
|
|
|
|
2021-08-22 16:59:47 +03:00
|
|
|
bool add_to_blocker_set(BlockerSet&, void* = nullptr);
|
|
|
|
void set_blocker_set_raw_locked(BlockerSet* blocker_set) { m_blocker_set = blocker_set; }
|
2020-11-30 02:05:27 +03:00
|
|
|
|
2022-08-18 22:46:28 +03:00
|
|
|
// FIXME: Figure out whether this can be Thread.
|
|
|
|
mutable RecursiveSpinlock m_lock { LockRank::None };
|
2019-09-04 16:14:54 +03:00
|
|
|
|
2019-07-20 12:05:52 +03:00
|
|
|
private:
|
2021-08-22 16:59:47 +03:00
|
|
|
BlockerSet* m_blocker_set { nullptr };
|
2022-08-19 21:53:40 +03:00
|
|
|
NonnullLockRefPtr<Thread> m_thread;
|
2020-11-30 02:05:27 +03:00
|
|
|
u8 m_was_interrupted_by_signal { 0 };
|
|
|
|
bool m_is_blocking { false };
|
2020-01-10 21:15:01 +03:00
|
|
|
bool m_was_interrupted_by_death { false };
|
2020-11-30 02:05:27 +03:00
|
|
|
bool m_did_timeout { false };
|
2019-07-18 17:22:26 +03:00
|
|
|
};
|
|
|
|
|
2021-08-22 16:59:47 +03:00
|
|
|
class BlockerSet {
|
|
|
|
AK_MAKE_NONCOPYABLE(BlockerSet);
|
|
|
|
AK_MAKE_NONMOVABLE(BlockerSet);
|
2020-11-30 02:05:27 +03:00
|
|
|
|
|
|
|
public:
|
2021-08-22 16:59:47 +03:00
|
|
|
BlockerSet() = default;
|
2020-11-30 02:05:27 +03:00
|
|
|
|
2021-08-22 16:59:47 +03:00
|
|
|
virtual ~BlockerSet()
|
2020-11-30 02:05:27 +03:00
|
|
|
{
|
2021-08-22 18:01:46 +03:00
|
|
|
VERIFY(!m_lock.is_locked());
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(m_blockers.is_empty());
|
2020-11-30 02:05:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
bool add_blocker(Blocker& blocker, void* data)
|
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2020-11-30 02:05:27 +03:00
|
|
|
if (!should_add_blocker(blocker, data))
|
|
|
|
return false;
|
|
|
|
m_blockers.append({ &blocker, data });
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-08-24 01:34:11 +03:00
|
|
|
void remove_blocker(Blocker& blocker)
|
2020-11-30 02:05:27 +03:00
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2020-11-30 02:05:27 +03:00
|
|
|
// NOTE: it's possible that the blocker is no longer present
|
2021-08-24 01:34:11 +03:00
|
|
|
m_blockers.remove_all_matching([&](auto& info) {
|
|
|
|
return info.blocker == &blocker;
|
2020-11-30 02:05:27 +03:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2020-12-22 09:21:58 +03:00
|
|
|
bool is_empty() const
|
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2020-12-22 09:21:58 +03:00
|
|
|
return is_empty_locked();
|
|
|
|
}
|
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
protected:
|
2021-08-22 18:38:16 +03:00
|
|
|
template<typename Callback>
|
|
|
|
bool unblock_all_blockers_whose_conditions_are_met(Callback try_to_unblock_one)
|
2020-11-30 02:05:27 +03:00
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2021-08-22 18:38:16 +03:00
|
|
|
return unblock_all_blockers_whose_conditions_are_met_locked(try_to_unblock_one);
|
2020-11-30 02:05:27 +03:00
|
|
|
}
|
|
|
|
|
2021-08-22 18:38:16 +03:00
|
|
|
template<typename Callback>
|
|
|
|
bool unblock_all_blockers_whose_conditions_are_met_locked(Callback try_to_unblock_one)
|
2020-11-30 02:05:27 +03:00
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(m_lock.is_locked());
|
2020-12-08 07:29:41 +03:00
|
|
|
bool stop_iterating = false;
|
2021-08-22 18:38:16 +03:00
|
|
|
bool did_unblock_any = false;
|
2020-12-08 07:29:41 +03:00
|
|
|
for (size_t i = 0; i < m_blockers.size() && !stop_iterating;) {
|
|
|
|
auto& info = m_blockers[i];
|
2021-08-22 18:38:16 +03:00
|
|
|
if (bool did_unblock = try_to_unblock_one(*info.blocker, info.data, stop_iterating)) {
|
2020-12-08 07:29:41 +03:00
|
|
|
m_blockers.remove(i);
|
2021-08-22 18:38:16 +03:00
|
|
|
did_unblock_any = true;
|
2020-12-08 07:29:41 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
i++;
|
|
|
|
}
|
2021-08-22 18:38:16 +03:00
|
|
|
return did_unblock_any;
|
2020-12-08 07:29:41 +03:00
|
|
|
}
|
|
|
|
|
2020-12-22 09:21:58 +03:00
|
|
|
bool is_empty_locked() const
|
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(m_lock.is_locked());
|
2020-12-22 09:21:58 +03:00
|
|
|
return m_blockers.is_empty();
|
|
|
|
}
|
2020-11-30 02:05:27 +03:00
|
|
|
|
2020-12-22 09:21:58 +03:00
|
|
|
virtual bool should_add_blocker(Blocker&, void*) { return true; }
|
2020-11-30 02:05:27 +03:00
|
|
|
|
|
|
|
struct BlockerInfo {
|
|
|
|
Blocker* blocker;
|
|
|
|
void* data;
|
|
|
|
};
|
2020-12-22 09:21:58 +03:00
|
|
|
|
|
|
|
Vector<BlockerInfo, 4> do_take_blockers(size_t count)
|
|
|
|
{
|
|
|
|
if (m_blockers.size() <= count)
|
|
|
|
return move(m_blockers);
|
|
|
|
|
|
|
|
size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(move_count > 0);
|
2020-12-22 09:21:58 +03:00
|
|
|
|
|
|
|
Vector<BlockerInfo, 4> taken_blockers;
|
|
|
|
taken_blockers.ensure_capacity(move_count);
|
|
|
|
for (size_t i = 0; i < move_count; i++)
|
|
|
|
taken_blockers.append(m_blockers.take(i));
|
|
|
|
m_blockers.remove(0, move_count);
|
|
|
|
return taken_blockers;
|
|
|
|
}
|
|
|
|
|
|
|
|
void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
|
|
|
|
{
|
|
|
|
if (blockers_to_append.is_empty())
|
|
|
|
return;
|
|
|
|
if (m_blockers.is_empty()) {
|
|
|
|
m_blockers = move(blockers_to_append);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
|
|
|
|
for (size_t i = 0; i < blockers_to_append.size(); i++)
|
|
|
|
m_blockers.append(blockers_to_append.take(i));
|
|
|
|
blockers_to_append.clear();
|
|
|
|
}
|
|
|
|
|
2022-08-18 22:46:28 +03:00
|
|
|
// FIXME: Check whether this can be Thread.
|
|
|
|
mutable Spinlock m_lock { LockRank::None };
|
2020-12-22 09:21:58 +03:00
|
|
|
|
|
|
|
private:
|
2020-11-30 02:05:27 +03:00
|
|
|
Vector<BlockerInfo, 4> m_blockers;
|
|
|
|
};
|
|
|
|
|
|
|
|
friend class JoinBlocker;
|
2019-11-14 22:58:23 +03:00
|
|
|
class JoinBlocker final : public Blocker {
|
|
|
|
public:
|
2021-11-08 02:51:39 +03:00
|
|
|
explicit JoinBlocker(Thread& joinee, ErrorOr<void>& try_join_result, void*& joinee_exit_value);
|
2020-11-30 02:05:27 +03:00
|
|
|
virtual Type blocker_type() const override { return Type::Join; }
|
2021-08-05 21:48:14 +03:00
|
|
|
virtual StringView state_string() const override { return "Joining"sv; }
|
2020-12-09 07:18:45 +03:00
|
|
|
virtual bool can_be_interrupted() const override { return false; }
|
2021-08-23 03:09:08 +03:00
|
|
|
virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
|
2020-11-30 02:05:27 +03:00
|
|
|
|
2021-08-24 13:14:14 +03:00
|
|
|
virtual bool setup_blocker() override;
|
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
bool unblock(void*, bool);
|
2019-11-14 22:58:23 +03:00
|
|
|
|
|
|
|
private:
|
2022-08-19 21:53:40 +03:00
|
|
|
NonnullLockRefPtr<Thread> m_joinee;
|
2019-11-14 23:04:34 +03:00
|
|
|
void*& m_joinee_exit_value;
|
2021-11-08 02:51:39 +03:00
|
|
|
ErrorOr<void>& m_try_join_result;
|
2020-11-30 02:05:27 +03:00
|
|
|
bool m_did_unblock { false };
|
|
|
|
};
|
|
|
|
|
2021-08-23 01:10:33 +03:00
|
|
|
class WaitQueueBlocker final : public Blocker {
|
2020-12-08 07:29:41 +03:00
|
|
|
public:
|
2021-08-23 01:10:33 +03:00
|
|
|
explicit WaitQueueBlocker(WaitQueue&, StringView block_reason = {});
|
|
|
|
virtual ~WaitQueueBlocker();
|
2020-12-08 07:29:41 +03:00
|
|
|
|
|
|
|
virtual Type blocker_type() const override { return Type::Queue; }
|
2021-08-05 21:48:14 +03:00
|
|
|
virtual StringView state_string() const override { return m_block_reason.is_null() ? m_block_reason : "Queue"sv; }
|
2021-08-23 03:09:08 +03:00
|
|
|
virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
|
2021-08-24 13:14:14 +03:00
|
|
|
virtual bool setup_blocker() override;
|
2020-12-08 07:29:41 +03:00
|
|
|
|
|
|
|
bool unblock();
|
|
|
|
|
|
|
|
protected:
|
2021-08-24 13:14:14 +03:00
|
|
|
WaitQueue& m_wait_queue;
|
2021-08-05 21:48:14 +03:00
|
|
|
StringView m_block_reason;
|
2020-12-08 07:29:41 +03:00
|
|
|
bool m_did_unblock { false };
|
|
|
|
};
|
|
|
|
|
2021-08-22 16:34:16 +03:00
|
|
|
class FutexBlocker final : public Blocker {
|
2020-12-22 09:21:58 +03:00
|
|
|
public:
|
|
|
|
explicit FutexBlocker(FutexQueue&, u32);
|
|
|
|
virtual ~FutexBlocker();
|
|
|
|
|
|
|
|
virtual Type blocker_type() const override { return Type::Futex; }
|
2021-08-05 21:48:14 +03:00
|
|
|
virtual StringView state_string() const override { return "Futex"sv; }
|
2021-08-23 03:09:08 +03:00
|
|
|
virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override { }
|
2021-08-24 13:14:14 +03:00
|
|
|
virtual bool setup_blocker() override;
|
2020-12-22 09:21:58 +03:00
|
|
|
|
|
|
|
u32 bitset() const { return m_bitset; }
|
|
|
|
|
|
|
|
void begin_requeue()
|
|
|
|
{
|
|
|
|
// We need to hold the lock until we moved it over
|
2022-08-23 22:42:30 +03:00
|
|
|
m_previous_interrupts_state = m_lock.lock();
|
2020-12-22 09:21:58 +03:00
|
|
|
}
|
|
|
|
void finish_requeue(FutexQueue&);
|
|
|
|
|
|
|
|
bool unblock_bitset(u32 bitset);
|
|
|
|
bool unblock(bool force = false);
|
|
|
|
|
|
|
|
protected:
|
2021-08-24 13:14:14 +03:00
|
|
|
FutexQueue& m_futex_queue;
|
|
|
|
u32 m_bitset { 0 };
|
2022-08-23 22:42:30 +03:00
|
|
|
InterruptsState m_previous_interrupts_state { InterruptsState::Disabled };
|
2020-12-22 09:21:58 +03:00
|
|
|
bool m_did_unblock { false };
|
|
|
|
};
|
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
class FileBlocker : public Blocker {
|
|
|
|
public:
|
2021-03-03 22:21:24 +03:00
|
|
|
enum class BlockFlags : u16 {
|
2020-11-30 02:05:27 +03:00
|
|
|
None = 0,
|
|
|
|
|
|
|
|
Read = 1 << 0,
|
|
|
|
Write = 1 << 1,
|
|
|
|
ReadPriority = 1 << 2,
|
2021-12-02 02:26:08 +03:00
|
|
|
WritePriority = 1 << 3,
|
2020-11-30 02:05:27 +03:00
|
|
|
|
2021-12-02 02:26:08 +03:00
|
|
|
Accept = 1 << 4,
|
|
|
|
Connect = 1 << 5,
|
2020-11-30 02:05:27 +03:00
|
|
|
SocketFlags = Accept | Connect,
|
|
|
|
|
2022-07-10 00:21:54 +03:00
|
|
|
WriteError = 1 << 6,
|
|
|
|
WriteHangUp = 1 << 7,
|
|
|
|
ReadHangUp = 1 << 8,
|
|
|
|
Exception = WriteError | WriteHangUp | ReadHangUp,
|
2020-11-30 02:05:27 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
virtual Type blocker_type() const override { return Type::File; }
|
|
|
|
|
2021-09-05 01:12:49 +03:00
|
|
|
virtual bool unblock_if_conditions_are_met(bool, void*) = 0;
|
2019-11-14 22:58:23 +03:00
|
|
|
};
|
|
|
|
|
2021-09-07 14:39:11 +03:00
|
|
|
class OpenFileDescriptionBlocker : public FileBlocker {
|
2019-07-18 17:22:26 +03:00
|
|
|
public:
|
2022-04-01 20:58:27 +03:00
|
|
|
OpenFileDescription const& blocked_description() const;
|
2019-07-18 17:22:26 +03:00
|
|
|
|
2021-09-05 01:12:49 +03:00
|
|
|
virtual bool unblock_if_conditions_are_met(bool, void*) override;
|
2021-08-23 03:09:08 +03:00
|
|
|
virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
|
2021-08-24 13:14:14 +03:00
|
|
|
virtual bool setup_blocker() override;
|
2020-11-30 02:05:27 +03:00
|
|
|
|
2019-07-19 14:32:56 +03:00
|
|
|
protected:
|
2021-09-07 14:39:11 +03:00
|
|
|
explicit OpenFileDescriptionBlocker(OpenFileDescription&, BlockFlags, BlockFlags&);
|
2019-07-19 14:32:56 +03:00
|
|
|
|
2019-07-18 17:22:26 +03:00
|
|
|
private:
|
2022-08-19 21:53:40 +03:00
|
|
|
NonnullLockRefPtr<OpenFileDescription> m_blocked_description;
|
2020-11-30 02:05:27 +03:00
|
|
|
const BlockFlags m_flags;
|
|
|
|
BlockFlags& m_unblocked_flags;
|
|
|
|
bool m_did_unblock { false };
|
2019-07-18 17:22:26 +03:00
|
|
|
};
|
|
|
|
|
2021-09-07 14:39:11 +03:00
|
|
|
class AcceptBlocker final : public OpenFileDescriptionBlocker {
|
2019-07-18 17:22:26 +03:00
|
|
|
public:
|
2021-09-07 14:39:11 +03:00
|
|
|
explicit AcceptBlocker(OpenFileDescription&, BlockFlags&);
|
2021-08-05 21:48:14 +03:00
|
|
|
virtual StringView state_string() const override { return "Accepting"sv; }
|
2019-07-18 17:22:26 +03:00
|
|
|
};
|
|
|
|
|
2021-09-07 14:39:11 +03:00
|
|
|
class ConnectBlocker final : public OpenFileDescriptionBlocker {
|
2019-07-18 17:22:26 +03:00
|
|
|
public:
|
2021-09-07 14:39:11 +03:00
|
|
|
explicit ConnectBlocker(OpenFileDescription&, BlockFlags&);
|
2021-08-05 21:48:14 +03:00
|
|
|
virtual StringView state_string() const override { return "Connecting"sv; }
|
2019-07-18 17:22:26 +03:00
|
|
|
};
|
|
|
|
|
2021-09-07 14:39:11 +03:00
|
|
|
class WriteBlocker final : public OpenFileDescriptionBlocker {
|
2019-07-18 17:22:26 +03:00
|
|
|
public:
|
2021-09-07 14:39:11 +03:00
|
|
|
explicit WriteBlocker(OpenFileDescription&, BlockFlags&);
|
2021-08-05 21:48:14 +03:00
|
|
|
virtual StringView state_string() const override { return "Writing"sv; }
|
2022-04-01 20:58:27 +03:00
|
|
|
virtual BlockTimeout const& override_timeout(BlockTimeout const&) override;
|
2020-02-16 03:27:42 +03:00
|
|
|
|
2020-01-26 19:54:23 +03:00
|
|
|
private:
|
2020-11-15 21:58:19 +03:00
|
|
|
BlockTimeout m_timeout;
|
2019-07-18 17:22:26 +03:00
|
|
|
};
|
|
|
|
|
2021-09-07 14:39:11 +03:00
|
|
|
class ReadBlocker final : public OpenFileDescriptionBlocker {
|
2019-07-18 17:22:26 +03:00
|
|
|
public:
|
2021-09-07 14:39:11 +03:00
|
|
|
explicit ReadBlocker(OpenFileDescription&, BlockFlags&);
|
2021-08-05 21:48:14 +03:00
|
|
|
virtual StringView state_string() const override { return "Reading"sv; }
|
2022-04-01 20:58:27 +03:00
|
|
|
virtual BlockTimeout const& override_timeout(BlockTimeout const&) override;
|
2020-02-16 03:27:42 +03:00
|
|
|
|
2020-01-26 19:54:23 +03:00
|
|
|
private:
|
2020-11-15 21:58:19 +03:00
|
|
|
BlockTimeout m_timeout;
|
2019-07-18 17:22:26 +03:00
|
|
|
};
|
|
|
|
|
2019-07-19 14:19:47 +03:00
|
|
|
class SleepBlocker final : public Blocker {
|
2019-07-18 18:26:11 +03:00
|
|
|
public:
|
2022-04-01 20:58:27 +03:00
|
|
|
explicit SleepBlocker(BlockTimeout const&, Time* = nullptr);
|
2021-08-05 21:48:14 +03:00
|
|
|
virtual StringView state_string() const override { return "Sleeping"sv; }
|
2020-11-30 02:05:27 +03:00
|
|
|
virtual Type blocker_type() const override { return Type::Sleep; }
|
2022-04-01 20:58:27 +03:00
|
|
|
virtual BlockTimeout const& override_timeout(BlockTimeout const&) override;
|
2021-08-23 03:09:08 +03:00
|
|
|
virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
|
2020-11-30 02:05:27 +03:00
|
|
|
virtual void was_unblocked(bool) override;
|
|
|
|
virtual Thread::BlockResult block_result() override;
|
2019-07-18 18:26:11 +03:00
|
|
|
|
|
|
|
private:
|
2020-11-30 02:05:27 +03:00
|
|
|
void calculate_remaining();
|
|
|
|
|
2020-11-15 21:58:19 +03:00
|
|
|
BlockTimeout m_deadline;
|
2021-02-28 01:56:16 +03:00
|
|
|
Time* m_remaining;
|
2019-07-18 18:26:11 +03:00
|
|
|
};
|
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
class SelectBlocker final : public FileBlocker {
|
2019-07-18 18:39:49 +03:00
|
|
|
public:
|
2020-11-30 02:05:27 +03:00
|
|
|
struct FDInfo {
|
2022-08-19 21:53:40 +03:00
|
|
|
LockRefPtr<OpenFileDescription> description;
|
2021-03-03 22:21:24 +03:00
|
|
|
BlockFlags block_flags { BlockFlags::None };
|
2020-11-30 02:05:27 +03:00
|
|
|
BlockFlags unblocked_flags { BlockFlags::None };
|
|
|
|
};
|
|
|
|
|
2021-09-04 21:02:38 +03:00
|
|
|
using FDVector = Vector<FDInfo, FD_SETSIZE>;
|
2021-08-24 13:14:14 +03:00
|
|
|
explicit SelectBlocker(FDVector&);
|
2020-11-30 02:05:27 +03:00
|
|
|
virtual ~SelectBlocker();
|
|
|
|
|
2021-09-05 01:12:49 +03:00
|
|
|
virtual bool unblock_if_conditions_are_met(bool, void*) override;
|
2021-08-23 03:09:08 +03:00
|
|
|
virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
|
2020-11-30 02:05:27 +03:00
|
|
|
virtual void was_unblocked(bool) override;
|
2021-08-05 21:48:14 +03:00
|
|
|
virtual StringView state_string() const override { return "Selecting"sv; }
|
2021-08-24 13:14:14 +03:00
|
|
|
virtual bool setup_blocker() override;
|
2022-01-29 14:46:04 +03:00
|
|
|
virtual void finalize() override;
|
2019-07-18 18:39:49 +03:00
|
|
|
|
|
|
|
private:
|
2020-11-30 02:05:27 +03:00
|
|
|
size_t collect_unblocked_flags();
|
|
|
|
|
|
|
|
FDVector& m_fds;
|
|
|
|
bool m_did_unblock { false };
|
2019-07-18 18:39:49 +03:00
|
|
|
};
|
|
|
|
|
2021-12-12 02:01:42 +03:00
|
|
|
class SignalBlocker final : public Blocker {
|
|
|
|
public:
|
|
|
|
explicit SignalBlocker(sigset_t pending_set, siginfo_t& result);
|
|
|
|
virtual StringView state_string() const override { return "Pending Signal"sv; }
|
|
|
|
virtual Type blocker_type() const override { return Type::Signal; }
|
|
|
|
void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
|
|
|
|
virtual bool setup_blocker() override;
|
|
|
|
bool check_pending_signals(bool from_add_blocker);
|
|
|
|
|
|
|
|
private:
|
|
|
|
sigset_t m_pending_set { 0 };
|
|
|
|
siginfo_t& m_result;
|
|
|
|
bool m_did_unblock { false };
|
|
|
|
};
|
|
|
|
|
|
|
|
class SignalBlockerSet final : public BlockerSet {
|
|
|
|
public:
|
|
|
|
void unblock_all_blockers_whose_conditions_are_met()
|
|
|
|
{
|
|
|
|
BlockerSet::unblock_all_blockers_whose_conditions_are_met([&](auto& b, void*, bool&) {
|
|
|
|
VERIFY(b.blocker_type() == Blocker::Type::Signal);
|
|
|
|
auto& blocker = static_cast<Thread::SignalBlocker&>(b);
|
|
|
|
return blocker.check_pending_signals(false);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
bool should_add_blocker(Blocker& b, void*) override
|
|
|
|
{
|
|
|
|
VERIFY(b.blocker_type() == Blocker::Type::Signal);
|
|
|
|
auto& blocker = static_cast<Thread::SignalBlocker&>(b);
|
|
|
|
return !blocker.check_pending_signals(true);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-07-19 14:19:47 +03:00
|
|
|
class WaitBlocker final : public Blocker {
|
2019-07-18 19:05:19 +03:00
|
|
|
public:
|
2020-11-30 02:05:27 +03:00
|
|
|
enum class UnblockFlags {
|
|
|
|
Terminated,
|
|
|
|
Stopped,
|
2020-12-09 05:04:05 +03:00
|
|
|
Continued,
|
|
|
|
Disowned
|
2020-11-30 02:05:27 +03:00
|
|
|
};
|
|
|
|
|
2022-08-19 21:53:40 +03:00
|
|
|
WaitBlocker(int wait_options, Variant<Empty, NonnullLockRefPtr<Process>, NonnullLockRefPtr<ProcessGroup>> waitee, ErrorOr<siginfo_t>& result);
|
2021-08-05 21:48:14 +03:00
|
|
|
virtual StringView state_string() const override { return "Waiting"sv; }
|
2020-11-30 02:05:27 +03:00
|
|
|
virtual Type blocker_type() const override { return Type::Wait; }
|
2021-08-23 03:09:08 +03:00
|
|
|
virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
|
2020-11-30 02:05:27 +03:00
|
|
|
virtual void was_unblocked(bool) override;
|
2021-08-24 13:14:14 +03:00
|
|
|
virtual bool setup_blocker() override;
|
2020-11-30 02:05:27 +03:00
|
|
|
|
2020-12-09 07:18:45 +03:00
|
|
|
bool unblock(Process& process, UnblockFlags flags, u8 signal, bool from_add_blocker);
|
2021-11-07 00:06:08 +03:00
|
|
|
bool is_wait() const { return (m_wait_options & WNOWAIT) != WNOWAIT; }
|
2019-07-18 19:05:19 +03:00
|
|
|
|
|
|
|
private:
|
2020-12-09 05:04:05 +03:00
|
|
|
void do_was_disowned();
|
2022-04-01 20:58:27 +03:00
|
|
|
void do_set_result(siginfo_t const&);
|
2020-11-30 02:05:27 +03:00
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
int const m_wait_options;
|
2021-11-08 02:51:39 +03:00
|
|
|
ErrorOr<siginfo_t>& m_result;
|
2022-08-19 21:53:40 +03:00
|
|
|
Variant<Empty, NonnullLockRefPtr<Process>, NonnullLockRefPtr<ProcessGroup>> m_waitee;
|
2020-11-30 02:05:27 +03:00
|
|
|
bool m_did_unblock { false };
|
|
|
|
bool m_got_sigchild { false };
|
2019-07-18 19:05:19 +03:00
|
|
|
};
|
|
|
|
|
2021-08-22 16:59:47 +03:00
|
|
|
class WaitBlockerSet final : public BlockerSet {
|
2020-11-30 02:05:27 +03:00
|
|
|
friend class WaitBlocker;
|
2019-07-19 10:34:11 +03:00
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
public:
|
2021-08-22 16:59:47 +03:00
|
|
|
explicit WaitBlockerSet(Process& process)
|
2020-11-30 02:05:27 +03:00
|
|
|
: m_process(process)
|
2019-07-19 10:51:48 +03:00
|
|
|
{
|
|
|
|
}
|
2020-11-30 02:05:27 +03:00
|
|
|
|
2020-12-09 05:04:05 +03:00
|
|
|
void disowned_by_waiter(Process&);
|
2020-12-09 07:18:45 +03:00
|
|
|
bool unblock(Process&, WaitBlocker::UnblockFlags, u8);
|
2020-11-30 02:05:27 +03:00
|
|
|
void try_unblock(WaitBlocker&);
|
|
|
|
void finalize();
|
|
|
|
|
|
|
|
protected:
|
|
|
|
virtual bool should_add_blocker(Blocker&, void*) override;
|
2019-07-19 10:34:11 +03:00
|
|
|
|
|
|
|
private:
|
2020-12-09 07:18:45 +03:00
|
|
|
struct ProcessBlockInfo {
|
2022-08-19 21:53:40 +03:00
|
|
|
NonnullLockRefPtr<Process> process;
|
2020-11-30 02:05:27 +03:00
|
|
|
WaitBlocker::UnblockFlags flags;
|
|
|
|
u8 signal;
|
|
|
|
bool was_waited { false };
|
|
|
|
|
2022-08-19 21:53:40 +03:00
|
|
|
explicit ProcessBlockInfo(NonnullLockRefPtr<Process>&&, WaitBlocker::UnblockFlags, u8);
|
2020-12-09 07:18:45 +03:00
|
|
|
~ProcessBlockInfo();
|
2020-11-30 02:05:27 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
Process& m_process;
|
2020-12-09 07:18:45 +03:00
|
|
|
Vector<ProcessBlockInfo, 2> m_processes;
|
2020-11-30 02:05:27 +03:00
|
|
|
bool m_finalized { false };
|
2019-07-19 10:34:11 +03:00
|
|
|
};
|
|
|
|
|
2022-07-14 02:17:01 +03:00
|
|
|
class FlockBlocker final : public Blocker {
|
|
|
|
public:
|
2022-08-19 21:53:40 +03:00
|
|
|
FlockBlocker(NonnullLockRefPtr<Inode>, flock const&);
|
2022-07-14 02:17:01 +03:00
|
|
|
virtual StringView state_string() const override { return "Locking File"sv; }
|
|
|
|
virtual Type blocker_type() const override { return Type::Flock; }
|
|
|
|
virtual void will_unblock_immediately_without_blocking(UnblockImmediatelyReason) override;
|
|
|
|
virtual bool setup_blocker() override;
|
|
|
|
bool try_unblock(bool from_add_blocker);
|
|
|
|
|
|
|
|
private:
|
2022-08-19 21:53:40 +03:00
|
|
|
NonnullLockRefPtr<Inode> m_inode;
|
2022-07-14 02:17:01 +03:00
|
|
|
flock const& m_flock;
|
|
|
|
bool m_did_unblock { false };
|
|
|
|
};
|
|
|
|
|
|
|
|
class FlockBlockerSet final : public BlockerSet {
|
|
|
|
public:
|
|
|
|
void unblock_all_blockers_whose_conditions_are_met()
|
|
|
|
{
|
|
|
|
BlockerSet::unblock_all_blockers_whose_conditions_are_met([&](auto& b, void*, bool&) {
|
|
|
|
VERIFY(b.blocker_type() == Blocker::Type::Flock);
|
|
|
|
auto& blocker = static_cast<Thread::FlockBlocker&>(b);
|
|
|
|
return blocker.try_unblock(false);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
bool should_add_blocker(Blocker& b, void*) override
|
|
|
|
{
|
|
|
|
VERIFY(b.blocker_type() == Blocker::Type::Flock);
|
|
|
|
auto& blocker = static_cast<Thread::FlockBlocker&>(b);
|
|
|
|
return !blocker.try_unblock(true);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-12-08 07:29:41 +03:00
|
|
|
template<typename AddBlockerHandler>
|
2021-11-08 02:51:39 +03:00
|
|
|
ErrorOr<void> try_join(AddBlockerHandler add_blocker)
|
2020-11-30 02:05:27 +03:00
|
|
|
{
|
|
|
|
if (Thread::current() == this)
|
2021-01-21 01:11:17 +03:00
|
|
|
return EDEADLK;
|
2020-11-30 02:05:27 +03:00
|
|
|
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2022-11-10 22:59:39 +03:00
|
|
|
|
|
|
|
// Joining dead threads is allowed for two main reasons:
|
|
|
|
// - Thread join behavior should not be racy when a thread is joined and exiting at roughly the same time.
|
|
|
|
// This is common behavior when threads are given a signal to end (meaning they are going to exit ASAP) and then joined.
|
|
|
|
// - POSIX requires that exited threads are joinable (at least, there is no language in the specification forbidding it).
|
|
|
|
if (!m_is_joinable || state() == Thread::State::Invalid)
|
2021-01-21 01:11:17 +03:00
|
|
|
return EINVAL;
|
2020-11-30 02:05:27 +03:00
|
|
|
|
2020-12-08 07:29:41 +03:00
|
|
|
add_blocker();
|
2020-11-30 02:05:27 +03:00
|
|
|
|
|
|
|
// From this point on the thread is no longer joinable by anyone
|
|
|
|
// else. It also means that if the join is timed, it becomes
|
|
|
|
// detached when a timeout happens.
|
|
|
|
m_is_joinable = false;
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2020-11-30 02:05:27 +03:00
|
|
|
}
|
|
|
|
|
2019-03-24 00:03:17 +03:00
|
|
|
void did_schedule() { ++m_times_scheduled; }
|
2019-07-03 22:17:35 +03:00
|
|
|
u32 times_scheduled() const { return m_times_scheduled; }
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2020-08-14 19:24:31 +03:00
|
|
|
void resume_from_stopped();
|
|
|
|
|
2020-12-26 12:47:08 +03:00
|
|
|
[[nodiscard]] bool should_be_stopped() const;
|
2022-01-30 13:38:50 +03:00
|
|
|
[[nodiscard]] bool is_stopped() const { return m_state == Thread::State::Stopped; }
|
|
|
|
[[nodiscard]] bool is_blocked() const { return m_state == Thread::State::Blocked; }
|
2020-04-07 18:23:37 +03:00
|
|
|
|
2020-06-28 07:36:15 +03:00
|
|
|
u32 cpu() const { return m_cpu.load(AK::MemoryOrder::memory_order_consume); }
|
|
|
|
void set_cpu(u32 cpu) { m_cpu.store(cpu, AK::MemoryOrder::memory_order_release); }
|
2020-06-29 00:34:31 +03:00
|
|
|
u32 affinity() const { return m_cpu_affinity; }
|
|
|
|
void set_affinity(u32 affinity) { m_cpu_affinity = affinity; }
|
2020-06-28 07:36:15 +03:00
|
|
|
|
2020-02-16 02:15:37 +03:00
|
|
|
RegisterState& get_register_dump_from_stack();
|
2022-04-01 20:58:27 +03:00
|
|
|
RegisterState const& get_register_dump_from_stack() const { return const_cast<Thread*>(this)->get_register_dump_from_stack(); }
|
2019-11-02 12:11:41 +03:00
|
|
|
|
2021-04-15 19:29:00 +03:00
|
|
|
DebugRegisterState& debug_register_state() { return m_debug_register_state; }
|
2022-04-01 20:58:27 +03:00
|
|
|
DebugRegisterState const& debug_register_state() const { return m_debug_register_state; }
|
2021-04-15 19:29:00 +03:00
|
|
|
|
2021-06-26 20:57:16 +03:00
|
|
|
ThreadRegisters& regs() { return m_regs; }
|
|
|
|
ThreadRegisters const& regs() const { return m_regs; }
|
|
|
|
|
2019-03-24 00:03:17 +03:00
|
|
|
State state() const { return m_state; }
|
2021-08-05 21:48:14 +03:00
|
|
|
StringView state_string() const;
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2019-09-07 16:50:44 +03:00
|
|
|
VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
|
2020-12-25 18:45:35 +03:00
|
|
|
size_t thread_specific_region_size() const;
|
|
|
|
size_t thread_specific_region_alignment() const;
|
2019-09-07 16:50:44 +03:00
|
|
|
|
2020-12-08 07:29:41 +03:00
|
|
|
ALWAYS_INLINE void yield_if_stopped()
|
|
|
|
{
|
|
|
|
// If some thread stopped us, we need to yield to someone else
|
|
|
|
// We check this when entering/exiting a system call. A thread
|
|
|
|
// may continue to execute in user land until the next timer
|
|
|
|
// tick or entering the next system call, or if it's in kernel
|
|
|
|
// mode then we will intercept prior to returning back to user
|
|
|
|
// mode.
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2022-01-30 13:38:50 +03:00
|
|
|
while (state() == Thread::State::Stopped) {
|
2020-12-08 07:29:41 +03:00
|
|
|
lock.unlock();
|
|
|
|
// We shouldn't be holding the big lock here
|
2021-08-10 22:20:45 +03:00
|
|
|
yield_without_releasing_big_lock();
|
2020-12-08 07:29:41 +03:00
|
|
|
lock.lock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-05 20:02:03 +03:00
|
|
|
void block(Kernel::Mutex&, SpinlockLocker<Spinlock>&, u32);
|
2021-07-10 19:23:16 +03:00
|
|
|
|
2021-03-30 11:59:38 +03:00
|
|
|
template<typename BlockerType, class... Args>
|
2022-01-29 14:46:04 +03:00
|
|
|
BlockResult block(BlockTimeout const& timeout, Args&&... args)
|
2019-07-19 11:12:50 +03:00
|
|
|
{
|
2021-03-30 11:59:38 +03:00
|
|
|
BlockerType blocker(forward<Args>(args)...);
|
2022-01-29 14:46:04 +03:00
|
|
|
return block_impl(timeout, blocker);
|
2019-12-01 13:57:20 +03:00
|
|
|
}
|
2019-07-19 11:12:50 +03:00
|
|
|
|
2022-01-30 13:43:03 +03:00
|
|
|
u32 unblock_from_mutex(Kernel::Mutex&);
|
2020-11-30 02:05:27 +03:00
|
|
|
void unblock_from_blocker(Blocker&);
|
|
|
|
void unblock(u8 signal = 0);
|
2019-07-20 12:05:52 +03:00
|
|
|
|
2020-12-15 02:36:22 +03:00
|
|
|
template<class... Args>
|
2022-04-01 20:58:27 +03:00
|
|
|
Thread::BlockResult wait_on(WaitQueue& wait_queue, Thread::BlockTimeout const& timeout, Args&&... args)
|
2020-12-15 02:36:22 +03:00
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(this == Thread::current());
|
2021-08-23 01:10:33 +03:00
|
|
|
return block<Thread::WaitQueueBlocker>(timeout, wait_queue, forward<Args>(args)...);
|
2020-12-15 02:36:22 +03:00
|
|
|
}
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
BlockResult sleep(clockid_t, Time const&, Time* = nullptr);
|
|
|
|
BlockResult sleep(Time const& duration, Time* remaining_time = nullptr)
|
2020-12-02 02:53:47 +03:00
|
|
|
{
|
2020-12-04 08:12:50 +03:00
|
|
|
return sleep(CLOCK_MONOTONIC_COARSE, duration, remaining_time);
|
2020-12-02 02:53:47 +03:00
|
|
|
}
|
2022-04-01 20:58:27 +03:00
|
|
|
BlockResult sleep_until(clockid_t, Time const&);
|
|
|
|
BlockResult sleep_until(Time const& duration)
|
2020-12-02 02:53:47 +03:00
|
|
|
{
|
2020-12-04 08:12:50 +03:00
|
|
|
return sleep_until(CLOCK_MONOTONIC_COARSE, duration);
|
2020-12-02 02:53:47 +03:00
|
|
|
}
|
2020-11-15 21:58:19 +03:00
|
|
|
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
|
|
|
// Tell this thread to unblock if needed,
|
|
|
|
// gracefully unwind the stack and die.
|
|
|
|
void set_should_die();
|
2020-12-26 12:47:08 +03:00
|
|
|
[[nodiscard]] bool should_die() const { return m_should_die; }
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
|
|
|
void die_if_needed();
|
|
|
|
|
2020-11-17 06:51:34 +03:00
|
|
|
void exit(void* = nullptr);
|
|
|
|
|
2021-07-15 06:46:32 +03:00
|
|
|
void update_time_scheduled(u64, bool, bool);
|
2021-01-26 02:37:36 +03:00
|
|
|
bool tick();
|
2019-07-03 22:17:35 +03:00
|
|
|
void set_ticks_left(u32 t) { m_ticks_left = t; }
|
|
|
|
u32 ticks_left() const { return m_ticks_left; }
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2021-07-17 03:09:45 +03:00
|
|
|
FlatPtr kernel_stack_base() const { return m_kernel_stack_base; }
|
|
|
|
FlatPtr kernel_stack_top() const { return m_kernel_stack_top; }
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2020-12-09 07:18:45 +03:00
|
|
|
void set_state(State, u8 = 0);
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2020-12-26 12:47:08 +03:00
|
|
|
[[nodiscard]] bool is_initialized() const { return m_initialized; }
|
2020-06-27 22:42:28 +03:00
|
|
|
void set_initialized(bool initialized) { m_initialized = initialized; }
|
|
|
|
|
2019-10-07 12:22:50 +03:00
|
|
|
void send_urgent_signal_to_self(u8 signal);
|
2019-07-03 22:17:35 +03:00
|
|
|
void send_signal(u8 signal, Process* sender);
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2020-09-09 05:37:15 +03:00
|
|
|
u32 update_signal_mask(u32 signal_mask);
|
|
|
|
u32 signal_mask_block(sigset_t signal_set, bool block);
|
|
|
|
u32 signal_mask() const;
|
2021-12-11 18:40:50 +03:00
|
|
|
void reset_signals_for_exec();
|
2020-09-09 05:37:15 +03:00
|
|
|
|
2021-11-19 17:13:07 +03:00
|
|
|
ErrorOr<FlatPtr> peek_debug_register(u32 register_index);
|
|
|
|
ErrorOr<void> poke_debug_register(u32 register_index, FlatPtr data);
|
2021-04-15 19:34:51 +03:00
|
|
|
|
2019-08-06 20:43:07 +03:00
|
|
|
void set_dump_backtrace_on_finalization() { m_dump_backtrace_on_finalization = true; }
|
|
|
|
|
2020-11-30 02:05:27 +03:00
|
|
|
DispatchSignalResult dispatch_one_pending_signal();
|
|
|
|
DispatchSignalResult try_dispatch_one_pending_signal(u8 signal);
|
|
|
|
DispatchSignalResult dispatch_signal(u8 signal);
|
2020-12-08 07:29:41 +03:00
|
|
|
void check_dispatch_pending_signal();
|
2020-12-26 12:47:08 +03:00
|
|
|
[[nodiscard]] bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
|
|
|
|
[[nodiscard]] bool should_ignore_signal(u8 signal) const;
|
|
|
|
[[nodiscard]] bool has_signal_handler(u8 signal) const;
|
2021-11-30 02:07:59 +03:00
|
|
|
[[nodiscard]] bool is_signal_masked(u8 signal) const;
|
2020-09-09 05:37:15 +03:00
|
|
|
u32 pending_signals() const;
|
2020-11-30 02:05:27 +03:00
|
|
|
u32 pending_signals_for_state() const;
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2021-10-28 23:33:41 +03:00
|
|
|
[[nodiscard]] bool has_alternative_signal_stack() const;
|
|
|
|
[[nodiscard]] bool is_in_alternative_signal_stack() const;
|
|
|
|
|
2021-08-05 23:29:38 +03:00
|
|
|
FPUState& fpu_state() { return m_fpu_state; }
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
ErrorOr<void> make_thread_specific_region(Badge<Process>);
|
2019-09-07 16:50:44 +03:00
|
|
|
|
2019-11-26 23:35:24 +03:00
|
|
|
unsigned syscall_count() const { return m_syscall_count; }
|
|
|
|
void did_syscall() { ++m_syscall_count; }
|
|
|
|
unsigned inode_faults() const { return m_inode_faults; }
|
|
|
|
void did_inode_fault() { ++m_inode_faults; }
|
|
|
|
unsigned zero_faults() const { return m_zero_faults; }
|
|
|
|
void did_zero_fault() { ++m_zero_faults; }
|
|
|
|
unsigned cow_faults() const { return m_cow_faults; }
|
|
|
|
void did_cow_fault() { ++m_cow_faults; }
|
|
|
|
|
2019-12-01 19:36:06 +03:00
|
|
|
unsigned file_read_bytes() const { return m_file_read_bytes; }
|
|
|
|
unsigned file_write_bytes() const { return m_file_write_bytes; }
|
|
|
|
|
|
|
|
void did_file_read(unsigned bytes)
|
|
|
|
{
|
|
|
|
m_file_read_bytes += bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
void did_file_write(unsigned bytes)
|
|
|
|
{
|
|
|
|
m_file_write_bytes += bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned unix_socket_read_bytes() const { return m_unix_socket_read_bytes; }
|
|
|
|
unsigned unix_socket_write_bytes() const { return m_unix_socket_write_bytes; }
|
|
|
|
|
|
|
|
void did_unix_socket_read(unsigned bytes)
|
|
|
|
{
|
|
|
|
m_unix_socket_read_bytes += bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
void did_unix_socket_write(unsigned bytes)
|
|
|
|
{
|
|
|
|
m_unix_socket_write_bytes += bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned ipv4_socket_read_bytes() const { return m_ipv4_socket_read_bytes; }
|
|
|
|
unsigned ipv4_socket_write_bytes() const { return m_ipv4_socket_write_bytes; }
|
|
|
|
|
|
|
|
void did_ipv4_socket_read(unsigned bytes)
|
|
|
|
{
|
|
|
|
m_ipv4_socket_read_bytes += bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
void did_ipv4_socket_write(unsigned bytes)
|
|
|
|
{
|
|
|
|
m_ipv4_socket_write_bytes += bytes;
|
|
|
|
}
|
|
|
|
|
2021-01-23 09:24:33 +03:00
|
|
|
void set_active(bool active) { m_is_active = active; }
|
2020-12-26 12:47:08 +03:00
|
|
|
|
2021-01-24 08:24:10 +03:00
|
|
|
u32 saved_critical() const { return m_saved_critical; }
|
|
|
|
void save_critical(u32 critical) { m_saved_critical = critical; }
|
|
|
|
|
2021-09-07 12:40:31 +03:00
|
|
|
void track_lock_acquire(LockRank rank);
|
|
|
|
void track_lock_release(LockRank rank);
|
|
|
|
|
2021-01-23 09:24:33 +03:00
|
|
|
[[nodiscard]] bool is_active() const { return m_is_active; }
|
2020-07-05 23:32:07 +03:00
|
|
|
|
2020-12-26 12:47:08 +03:00
|
|
|
[[nodiscard]] bool is_finalizable() const
|
2020-07-05 23:32:07 +03:00
|
|
|
{
|
2020-09-26 06:44:43 +03:00
|
|
|
// We can't finalize as long as this thread is still running
|
|
|
|
// Note that checking for Running state here isn't sufficient
|
|
|
|
// as the thread may not be in Running state but switching out.
|
|
|
|
// m_is_active is set to false once the context switch is
|
|
|
|
// complete and the thread is not executing on any processor.
|
2020-12-08 07:29:41 +03:00
|
|
|
if (m_is_active.load(AK::memory_order_acquire))
|
2020-09-26 06:44:43 +03:00
|
|
|
return false;
|
|
|
|
// We can't finalize until the thread is either detached or
|
|
|
|
// a join has started. We can't make m_is_joinable atomic
|
|
|
|
// because that would introduce a race in try_join.
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2020-09-26 06:44:43 +03:00
|
|
|
return !m_is_joinable;
|
2020-07-05 23:32:07 +03:00
|
|
|
}
|
|
|
|
|
2022-08-19 21:53:40 +03:00
|
|
|
ErrorOr<NonnullLockRefPtr<Thread>> try_clone(Process&);
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2021-05-16 12:36:52 +03:00
|
|
|
template<IteratorFunction<Thread&> Callback>
|
2019-07-19 13:16:00 +03:00
|
|
|
static IterationDecision for_each_in_state(State, Callback);
|
2021-05-16 12:36:52 +03:00
|
|
|
template<IteratorFunction<Thread&> Callback>
|
|
|
|
static IterationDecision for_each(Callback);
|
|
|
|
|
|
|
|
template<VoidFunction<Thread&> Callback>
|
|
|
|
static IterationDecision for_each_in_state(State, Callback);
|
|
|
|
template<VoidFunction<Thread&> Callback>
|
2019-07-19 13:16:00 +03:00
|
|
|
static IterationDecision for_each(Callback);
|
2019-03-24 00:03:17 +03:00
|
|
|
|
2019-10-20 19:11:40 +03:00
|
|
|
static constexpr u32 default_kernel_stack_size = 65536;
|
2022-08-15 15:30:51 +03:00
|
|
|
static constexpr u32 default_userspace_stack_size = 1 * MiB;
|
2019-10-20 19:11:40 +03:00
|
|
|
|
2022-08-19 14:54:14 +03:00
|
|
|
u64 time_in_user() const { return m_total_time_scheduled_user.load(AK::MemoryOrder::memory_order_relaxed); }
|
|
|
|
u64 time_in_kernel() const { return m_total_time_scheduled_kernel.load(AK::MemoryOrder::memory_order_relaxed); }
|
2020-12-04 08:12:50 +03:00
|
|
|
|
2021-01-25 23:19:34 +03:00
|
|
|
enum class PreviousMode : u8 {
|
|
|
|
KernelMode = 0,
|
|
|
|
UserMode
|
|
|
|
};
|
|
|
|
PreviousMode previous_mode() const { return m_previous_mode; }
|
2021-07-15 06:46:32 +03:00
|
|
|
bool set_previous_mode(PreviousMode mode)
|
|
|
|
{
|
|
|
|
if (m_previous_mode == mode)
|
|
|
|
return false;
|
|
|
|
m_previous_mode = mode;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-01-25 23:19:34 +03:00
|
|
|
TrapFrame*& current_trap() { return m_current_trap; }
|
2021-08-10 02:19:23 +03:00
|
|
|
TrapFrame const* const& current_trap() const { return m_current_trap; }
|
2021-01-25 23:19:34 +03:00
|
|
|
|
2021-08-22 02:37:17 +03:00
|
|
|
RecursiveSpinlock& get_lock() const { return m_lock; }
|
2020-08-03 01:59:01 +03:00
|
|
|
|
2021-01-24 01:29:11 +03:00
|
|
|
#if LOCK_DEBUG
|
2021-08-07 14:58:07 +03:00
|
|
|
void holding_lock(Mutex& lock, int refs_delta, LockLocation const& location)
|
2020-12-01 05:04:36 +03:00
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(refs_delta != 0);
|
2020-12-15 02:36:22 +03:00
|
|
|
m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed);
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker list_lock(m_holding_locks_lock);
|
2020-12-15 02:36:22 +03:00
|
|
|
if (refs_delta > 0) {
|
2020-12-01 05:04:36 +03:00
|
|
|
bool have_existing = false;
|
|
|
|
for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
|
|
|
|
auto& info = m_holding_locks_list[i];
|
|
|
|
if (info.lock == &lock) {
|
|
|
|
have_existing = true;
|
2020-12-15 02:36:22 +03:00
|
|
|
info.count += refs_delta;
|
2020-12-01 05:04:36 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!have_existing)
|
2021-04-25 01:17:02 +03:00
|
|
|
m_holding_locks_list.append({ &lock, location, 1 });
|
2020-12-01 05:04:36 +03:00
|
|
|
} else {
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(refs_delta < 0);
|
2020-12-01 05:04:36 +03:00
|
|
|
bool found = false;
|
|
|
|
for (size_t i = 0; i < m_holding_locks_list.size(); i++) {
|
|
|
|
auto& info = m_holding_locks_list[i];
|
|
|
|
if (info.lock == &lock) {
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(info.count >= (unsigned)-refs_delta);
|
2020-12-15 02:36:22 +03:00
|
|
|
info.count -= (unsigned)-refs_delta;
|
|
|
|
if (info.count == 0)
|
2020-12-01 05:04:36 +03:00
|
|
|
m_holding_locks_list.remove(i);
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(found);
|
2020-12-01 05:04:36 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
u32 lock_count() const
|
|
|
|
{
|
|
|
|
return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-02-07 20:13:51 +03:00
|
|
|
bool is_handling_page_fault() const
|
2020-09-06 00:52:14 +03:00
|
|
|
{
|
2021-02-07 20:13:51 +03:00
|
|
|
return m_handling_page_fault;
|
2020-09-06 00:52:14 +03:00
|
|
|
}
|
2021-01-21 02:06:19 +03:00
|
|
|
void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
|
2021-01-29 06:07:41 +03:00
|
|
|
void set_idle_thread() { m_is_idle_thread = true; }
|
|
|
|
bool is_idle_thread() const { return m_is_idle_thread; }
|
2021-01-21 02:06:19 +03:00
|
|
|
|
2021-09-07 10:17:45 +03:00
|
|
|
void set_crashing() { m_is_crashing = true; }
|
|
|
|
[[nodiscard]] bool is_crashing() const { return m_is_crashing; }
|
|
|
|
|
2021-05-18 12:26:11 +03:00
|
|
|
ALWAYS_INLINE u32 enter_profiler()
|
|
|
|
{
|
|
|
|
return m_nested_profiler_calls.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
|
|
|
|
}
|
|
|
|
|
|
|
|
ALWAYS_INLINE u32 leave_profiler()
|
|
|
|
{
|
|
|
|
return m_nested_profiler_calls.fetch_sub(1, AK::MemoryOrder::memory_order_acquire);
|
|
|
|
}
|
|
|
|
|
2021-05-30 17:24:53 +03:00
|
|
|
bool is_profiling_suppressed() const { return m_is_profiling_suppressed; }
|
|
|
|
void set_profiling_suppressed() { m_is_profiling_suppressed = true; }
|
|
|
|
|
2021-12-29 15:11:51 +03:00
|
|
|
bool is_promise_violation_pending() const { return m_is_promise_violation_pending; }
|
|
|
|
void set_promise_violation_pending(bool value) { m_is_promise_violation_pending = value; }
|
|
|
|
|
2022-01-09 01:36:13 +03:00
|
|
|
bool is_allocation_enabled() const { return m_allocation_enabled; }
|
|
|
|
void set_allocation_enabled(bool value) { m_allocation_enabled = value; }
|
|
|
|
|
2022-01-11 23:44:29 +03:00
|
|
|
ErrorOr<NonnullOwnPtr<KString>> backtrace();
|
2021-07-15 23:54:19 +03:00
|
|
|
|
2022-08-25 13:18:16 +03:00
|
|
|
Blocker const* blocker() const { return m_blocker; };
|
|
|
|
Kernel::Mutex const* blocking_mutex() const { return m_blocking_mutex; }
|
|
|
|
|
|
|
|
#if LOCK_DEBUG
|
|
|
|
struct HoldingLockInfo {
|
|
|
|
Mutex* lock;
|
|
|
|
LockLocation lock_location;
|
|
|
|
unsigned count;
|
|
|
|
};
|
|
|
|
|
|
|
|
template<IteratorFunction<HoldingLockInfo const&> Callback>
|
|
|
|
void for_each_held_lock(Callback);
|
|
|
|
template<VoidFunction<HoldingLockInfo const&> Callback>
|
|
|
|
void for_each_held_lock(Callback);
|
|
|
|
#endif
|
|
|
|
|
2019-07-19 14:04:42 +03:00
|
|
|
private:
|
2022-08-19 21:53:40 +03:00
|
|
|
Thread(NonnullLockRefPtr<Process>, NonnullOwnPtr<Memory::Region>, NonnullLockRefPtr<Timer>, NonnullOwnPtr<KString>);
|
2021-02-07 20:13:51 +03:00
|
|
|
|
2022-01-29 14:46:04 +03:00
|
|
|
BlockResult block_impl(BlockTimeout const&, Blocker&);
|
|
|
|
|
2021-04-16 15:03:24 +03:00
|
|
|
IntrusiveListNode<Thread> m_process_thread_list_node;
|
2021-01-23 02:56:08 +03:00
|
|
|
int m_runnable_priority { -1 };
|
2019-07-19 14:04:42 +03:00
|
|
|
|
2019-12-22 14:23:44 +03:00
|
|
|
friend class WaitQueue;
|
2020-11-30 02:05:27 +03:00
|
|
|
|
2021-08-22 16:59:47 +03:00
|
|
|
class JoinBlockerSet final : public BlockerSet {
|
2020-11-30 02:05:27 +03:00
|
|
|
public:
|
|
|
|
void thread_did_exit(void* exit_value)
|
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(!m_thread_did_exit);
|
2020-11-30 02:05:27 +03:00
|
|
|
m_thread_did_exit = true;
|
|
|
|
m_exit_value.store(exit_value, AK::MemoryOrder::memory_order_release);
|
|
|
|
do_unblock_joiner();
|
|
|
|
}
|
|
|
|
void thread_finalizing()
|
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2020-11-30 02:05:27 +03:00
|
|
|
do_unblock_joiner();
|
|
|
|
}
|
|
|
|
void* exit_value() const
|
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(m_thread_did_exit);
|
2020-11-30 02:05:27 +03:00
|
|
|
return m_exit_value.load(AK::MemoryOrder::memory_order_acquire);
|
|
|
|
}
|
|
|
|
|
|
|
|
void try_unblock(JoinBlocker& blocker)
|
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker lock(m_lock);
|
2020-11-30 02:05:27 +03:00
|
|
|
if (m_thread_did_exit)
|
|
|
|
blocker.unblock(exit_value(), false);
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
virtual bool should_add_blocker(Blocker& b, void*) override
|
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(b.blocker_type() == Blocker::Type::Join);
|
2020-11-30 02:05:27 +03:00
|
|
|
auto& blocker = static_cast<JoinBlocker&>(b);
|
|
|
|
|
|
|
|
// NOTE: m_lock is held already!
|
2020-12-01 05:04:36 +03:00
|
|
|
if (m_thread_did_exit) {
|
2020-11-30 02:05:27 +03:00
|
|
|
blocker.unblock(exit_value(), true);
|
2020-12-01 05:04:36 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2020-11-30 02:05:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void do_unblock_joiner()
|
|
|
|
{
|
2021-08-22 18:38:16 +03:00
|
|
|
unblock_all_blockers_whose_conditions_are_met_locked([&](Blocker& b, void*, bool&) {
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(b.blocker_type() == Blocker::Type::Join);
|
2020-11-30 02:05:27 +03:00
|
|
|
auto& blocker = static_cast<JoinBlocker&>(b);
|
|
|
|
return blocker.unblock(exit_value(), false);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
Atomic<void*> m_exit_value { nullptr };
|
|
|
|
bool m_thread_did_exit { false };
|
|
|
|
};
|
|
|
|
|
2020-12-15 02:36:22 +03:00
|
|
|
LockMode unlock_process_if_locked(u32&);
|
|
|
|
void relock_process(LockMode, u32);
|
2020-02-18 15:44:27 +03:00
|
|
|
void reset_fpu_state();
|
|
|
|
|
2021-09-07 13:00:13 +03:00
|
|
|
mutable RecursiveSpinlock m_lock { LockRank::Thread };
|
2022-08-18 22:46:28 +03:00
|
|
|
mutable RecursiveSpinlock m_block_lock { LockRank::None };
|
2022-08-19 21:53:40 +03:00
|
|
|
NonnullLockRefPtr<Process> m_process;
|
2020-08-08 18:32:34 +03:00
|
|
|
ThreadID m_tid { -1 };
|
2021-08-22 11:44:43 +03:00
|
|
|
ThreadRegisters m_regs {};
|
2021-04-15 19:29:00 +03:00
|
|
|
DebugRegisterState m_debug_register_state {};
|
2021-01-25 23:19:34 +03:00
|
|
|
TrapFrame* m_current_trap { nullptr };
|
2021-01-24 08:24:10 +03:00
|
|
|
u32 m_saved_critical { 1 };
|
2021-04-16 15:03:24 +03:00
|
|
|
IntrusiveListNode<Thread> m_ready_queue_node;
|
2020-06-28 07:36:15 +03:00
|
|
|
Atomic<u32> m_cpu { 0 };
|
2020-06-29 00:34:31 +03:00
|
|
|
u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
|
2021-07-15 06:46:32 +03:00
|
|
|
Optional<u64> m_last_time_scheduled;
|
2022-08-19 14:54:14 +03:00
|
|
|
Atomic<u64> m_total_time_scheduled_user { 0 };
|
|
|
|
Atomic<u64> m_total_time_scheduled_kernel { 0 };
|
2019-07-03 22:17:35 +03:00
|
|
|
u32 m_ticks_left { 0 };
|
|
|
|
u32 m_times_scheduled { 0 };
|
2020-12-04 08:12:50 +03:00
|
|
|
u32 m_ticks_in_user { 0 };
|
|
|
|
u32 m_ticks_in_kernel { 0 };
|
2019-07-03 22:17:35 +03:00
|
|
|
u32 m_pending_signals { 0 };
|
2022-06-30 14:36:03 +03:00
|
|
|
u8 m_currently_handled_signal { 0 };
|
2019-07-03 22:17:35 +03:00
|
|
|
u32 m_signal_mask { 0 };
|
2021-10-28 23:33:41 +03:00
|
|
|
FlatPtr m_alternative_signal_stack { 0 };
|
|
|
|
FlatPtr m_alternative_signal_stack_size { 0 };
|
2021-12-12 02:01:42 +03:00
|
|
|
SignalBlockerSet m_signal_blocker_set;
|
2021-07-17 03:09:45 +03:00
|
|
|
FlatPtr m_kernel_stack_base { 0 };
|
|
|
|
FlatPtr m_kernel_stack_top { 0 };
|
2022-01-02 18:49:55 +03:00
|
|
|
NonnullOwnPtr<Memory::Region> m_kernel_stack_region;
|
2019-09-07 16:50:44 +03:00
|
|
|
VirtualAddress m_thread_specific_data;
|
2021-08-06 14:54:48 +03:00
|
|
|
Optional<Memory::VirtualRange> m_thread_specific_range;
|
2022-02-24 21:55:49 +03:00
|
|
|
Array<Optional<u32>, NSIG> m_signal_action_masks;
|
2022-02-26 14:59:31 +03:00
|
|
|
Array<ProcessID, NSIG> m_signal_senders;
|
2019-09-09 06:58:42 +03:00
|
|
|
Blocker* m_blocker { nullptr };
|
2022-01-30 13:43:03 +03:00
|
|
|
Kernel::Mutex* m_blocking_mutex { nullptr };
|
2021-07-10 19:23:16 +03:00
|
|
|
u32 m_lock_requested_count { 0 };
|
|
|
|
IntrusiveListNode<Thread> m_blocked_threads_list_node;
|
2021-09-07 12:40:31 +03:00
|
|
|
LockRank m_lock_rank_mask { LockRank::None };
|
2022-01-09 01:36:13 +03:00
|
|
|
bool m_allocation_enabled { true };
|
2019-11-14 22:58:23 +03:00
|
|
|
|
2022-04-06 02:49:30 +03:00
|
|
|
// FIXME: remove this after annihilating Process::m_big_lock
|
|
|
|
IntrusiveListNode<Thread> m_big_lock_blocked_threads_list_node;
|
|
|
|
|
2021-01-24 01:29:11 +03:00
|
|
|
#if LOCK_DEBUG
|
2020-12-01 05:04:36 +03:00
|
|
|
Atomic<u32> m_holding_locks { 0 };
|
2022-08-18 22:46:28 +03:00
|
|
|
Spinlock m_holding_locks_lock { LockRank::None };
|
2020-12-01 05:04:36 +03:00
|
|
|
Vector<HoldingLockInfo> m_holding_locks_list;
|
|
|
|
#endif
|
|
|
|
|
2021-08-23 02:22:38 +03:00
|
|
|
JoinBlockerSet m_join_blocker_set;
|
2021-01-23 09:24:33 +03:00
|
|
|
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
|
2019-11-18 06:08:10 +03:00
|
|
|
bool m_is_joinable { true };
|
2021-01-21 02:06:19 +03:00
|
|
|
bool m_handling_page_fault { false };
|
2021-07-15 06:46:32 +03:00
|
|
|
PreviousMode m_previous_mode { PreviousMode::KernelMode }; // We always start out in kernel mode
|
2019-11-14 22:58:23 +03:00
|
|
|
|
2019-11-26 23:35:24 +03:00
|
|
|
unsigned m_syscall_count { 0 };
|
|
|
|
unsigned m_inode_faults { 0 };
|
|
|
|
unsigned m_zero_faults { 0 };
|
|
|
|
unsigned m_cow_faults { 0 };
|
|
|
|
|
2019-12-01 19:36:06 +03:00
|
|
|
unsigned m_file_read_bytes { 0 };
|
|
|
|
unsigned m_file_write_bytes { 0 };
|
|
|
|
|
|
|
|
unsigned m_unix_socket_read_bytes { 0 };
|
|
|
|
unsigned m_unix_socket_write_bytes { 0 };
|
|
|
|
|
|
|
|
unsigned m_ipv4_socket_read_bytes { 0 };
|
|
|
|
unsigned m_ipv4_socket_write_bytes { 0 };
|
|
|
|
|
2021-08-05 23:29:38 +03:00
|
|
|
FPUState m_fpu_state {};
|
2022-01-30 13:38:50 +03:00
|
|
|
State m_state { Thread::State::Invalid };
|
2021-09-06 13:44:27 +03:00
|
|
|
NonnullOwnPtr<KString> m_name;
|
2019-12-30 20:46:17 +03:00
|
|
|
u32 m_priority { THREAD_PRIORITY_NORMAL };
|
2020-01-27 22:47:10 +03:00
|
|
|
|
2022-01-30 13:38:50 +03:00
|
|
|
State m_stop_state { Thread::State::Invalid };
|
2020-01-27 22:47:10 +03:00
|
|
|
|
2019-08-06 20:43:07 +03:00
|
|
|
bool m_dump_backtrace_on_finalization { false };
|
Kernel: Unwind kernel stacks before dying
While executing in the kernel, a thread can acquire various resources
that need cleanup, such as locks and references to RefCounted objects.
This cleanup normally happens on the exit path, such as in destructors
for various RAII guards. But we weren't calling those exit paths when
killing threads that have been executing in the kernel, such as threads
blocked on reading or sleeping, thus causing leaks.
This commit changes how killing threads works. Now, instead of killing
a thread directly, one is supposed to call thread->set_should_die(),
which will unblock it and make it unwind the stack if it is blocked
in the kernel. Then, just before returning to the userspace, the thread
will automatically die.
2019-11-14 18:46:01 +03:00
|
|
|
bool m_should_die { false };
|
2020-07-05 02:37:36 +03:00
|
|
|
bool m_initialized { false };
|
2021-01-29 06:07:41 +03:00
|
|
|
bool m_is_idle_thread { false };
|
2021-09-07 10:17:45 +03:00
|
|
|
bool m_is_crashing { false };
|
2021-12-29 15:11:51 +03:00
|
|
|
bool m_is_promise_violation_pending { false };
|
2020-09-07 17:31:00 +03:00
|
|
|
Atomic<bool> m_have_any_unmasked_pending_signals { false };
|
2021-05-18 12:26:11 +03:00
|
|
|
Atomic<u32> m_nested_profiler_calls { 0 };
|
2019-07-19 10:57:35 +03:00
|
|
|
|
2022-08-19 21:53:40 +03:00
|
|
|
NonnullLockRefPtr<Timer> m_block_timer;
|
2021-05-20 01:41:51 +03:00
|
|
|
|
2021-05-30 17:24:53 +03:00
|
|
|
bool m_is_profiling_suppressed { false };
|
|
|
|
|
2021-07-16 04:45:22 +03:00
|
|
|
void yield_and_release_relock_big_lock();
|
2021-08-10 22:20:45 +03:00
|
|
|
|
|
|
|
enum class VerifyLockNotHeld {
|
|
|
|
Yes,
|
|
|
|
No
|
|
|
|
};
|
|
|
|
|
|
|
|
void yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held = VerifyLockNotHeld::Yes);
|
2022-01-26 19:34:04 +03:00
|
|
|
void drop_thread_count();
|
2021-08-07 14:28:18 +03:00
|
|
|
|
2021-08-15 13:38:02 +03:00
|
|
|
mutable IntrusiveListNode<Thread> m_global_thread_list_node;
|
|
|
|
|
2021-08-07 14:28:18 +03:00
|
|
|
public:
|
2021-09-09 15:00:59 +03:00
|
|
|
using ListInProcess = IntrusiveList<&Thread::m_process_thread_list_node>;
|
|
|
|
using GlobalList = IntrusiveList<&Thread::m_global_thread_list_node>;
|
2021-08-15 13:38:02 +03:00
|
|
|
|
2021-08-22 02:37:17 +03:00
|
|
|
static SpinlockProtected<GlobalList>& all_instances();
|
2019-03-24 00:03:17 +03:00
|
|
|
};
|
|
|
|
|
2021-03-07 14:01:11 +03:00
|
|
|
AK_ENUM_BITWISE_OPERATORS(Thread::FileBlocker::BlockFlags);
|
|
|
|
|
2021-05-16 12:36:52 +03:00
|
|
|
template<IteratorFunction<Thread&> Callback>
|
2019-07-19 13:16:00 +03:00
|
|
|
inline IterationDecision Thread::for_each(Callback callback)
|
2019-03-24 00:03:17 +03:00
|
|
|
{
|
2021-08-16 22:52:42 +03:00
|
|
|
return Thread::all_instances().with([&](auto& list) -> IterationDecision {
|
2021-08-15 13:38:02 +03:00
|
|
|
for (auto& thread : list) {
|
|
|
|
IterationDecision decision = callback(thread);
|
|
|
|
if (decision != IterationDecision::Continue)
|
|
|
|
return decision;
|
|
|
|
}
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
2019-08-07 21:43:54 +03:00
|
|
|
}
|
|
|
|
|
2021-05-16 12:36:52 +03:00
|
|
|
template<IteratorFunction<Thread&> Callback>
|
2021-01-28 08:58:24 +03:00
|
|
|
inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
|
2019-08-07 21:43:54 +03:00
|
|
|
{
|
2021-08-16 22:52:42 +03:00
|
|
|
return Thread::all_instances().with([&](auto& list) -> IterationDecision {
|
2021-08-15 13:38:02 +03:00
|
|
|
for (auto& thread : list) {
|
|
|
|
if (thread.state() != state)
|
|
|
|
continue;
|
|
|
|
IterationDecision decision = callback(thread);
|
|
|
|
if (decision != IterationDecision::Continue)
|
|
|
|
return decision;
|
|
|
|
}
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
2019-08-07 21:43:54 +03:00
|
|
|
}
|
2019-09-07 16:50:44 +03:00
|
|
|
|
2021-05-16 12:36:52 +03:00
|
|
|
template<VoidFunction<Thread&> Callback>
|
|
|
|
inline IterationDecision Thread::for_each(Callback callback)
|
|
|
|
{
|
2021-08-16 22:52:42 +03:00
|
|
|
return Thread::all_instances().with([&](auto& list) {
|
2021-08-15 13:38:02 +03:00
|
|
|
for (auto& thread : list) {
|
|
|
|
if (callback(thread) == IterationDecision::Break)
|
|
|
|
return IterationDecision::Break;
|
|
|
|
}
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
2021-05-16 12:36:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
template<VoidFunction<Thread&> Callback>
|
|
|
|
inline IterationDecision Thread::for_each_in_state(State state, Callback callback)
|
|
|
|
{
|
|
|
|
return for_each_in_state(state, [&](auto& thread) {
|
|
|
|
callback(thread);
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2022-08-25 13:18:16 +03:00
|
|
|
#if LOCK_DEBUG
|
|
|
|
template<IteratorFunction<Thread::HoldingLockInfo const&> Callback>
|
|
|
|
inline void Thread::for_each_held_lock(Callback callback)
|
|
|
|
{
|
|
|
|
SpinlockLocker list_lock(m_holding_locks_lock);
|
|
|
|
|
|
|
|
for (auto const& lock_info : m_holding_locks_list) {
|
|
|
|
if (callback(lock_info) == IterationDecision::Break)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template<VoidFunction<Thread::HoldingLockInfo const&> Callback>
|
|
|
|
inline void Thread::for_each_held_lock(Callback callback)
|
|
|
|
{
|
|
|
|
for_each_held_lock([&](auto const& lock_info) {
|
|
|
|
callback(lock_info);
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-02-16 03:27:42 +03:00
|
|
|
}
|
2021-01-09 02:42:44 +03:00
|
|
|
|
|
|
|
template<>
|
|
|
|
struct AK::Formatter<Kernel::Thread> : AK::Formatter<FormatString> {
|
2021-11-16 03:15:21 +03:00
|
|
|
ErrorOr<void> format(FormatBuilder&, Kernel::Thread const&);
|
2021-01-09 02:42:44 +03:00
|
|
|
};
|