Kernel: Keep a list of threads per Process

This allow us to iterate only the threads of the process.
This commit is contained in:
Tom 2021-01-22 23:24:33 -07:00 committed by Andreas Kling
parent 03a9ee79fa
commit ac3927086f
Notes: sideshowbarker 2024-07-18 22:48:38 +09:00
4 changed files with 45 additions and 28 deletions

View File

@ -907,4 +907,22 @@ PerformanceEventBuffer& Process::ensure_perf_events()
m_perf_event_buffer = make<PerformanceEventBuffer>();
return *m_perf_event_buffer;
}
bool Process::remove_thread(Thread& thread)
{
auto thread_cnt_before = m_thread_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
ASSERT(thread_cnt_before != 0);
ScopedSpinLock thread_list_lock(m_thread_list_lock);
m_thread_list.remove(thread);
return thread_cnt_before == 1;
}
bool Process::add_thread(Thread& thread)
{
bool is_first = m_thread_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed) == 0;
ScopedSpinLock thread_list_lock(m_thread_list_lock);
m_thread_list.append(thread);
return is_first;
}
}

View File

@ -204,7 +204,7 @@ public:
template<typename Callback>
void for_each_child(Callback);
template<typename Callback>
void for_each_thread(Callback) const;
IterationDecision for_each_thread(Callback) const;
void die();
void finalize();
@ -507,6 +507,9 @@ private:
friend class Scheduler;
friend class Region;
bool add_thread(Thread&);
bool remove_thread(Thread&);
PerformanceEventBuffer& ensure_perf_events();
Process(RefPtr<Thread>& first_thread, const String& name, uid_t, gid_t, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> cwd = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr);
@ -592,6 +595,8 @@ private:
u8 m_termination_status { 0 };
u8 m_termination_signal { 0 };
Atomic<u32> m_thread_count { 0 };
mutable IntrusiveList<Thread, &Thread::m_process_thread_list_node> m_thread_list;
mutable RecursiveSpinLock m_thread_list_lock;
const bool m_is_kernel_process;
bool m_dead { false };
@ -693,12 +698,9 @@ inline void Process::for_each_child(Callback callback)
}
template<typename Callback>
inline void Process::for_each_thread(Callback callback) const
inline IterationDecision Process::for_each_thread(Callback callback) const
{
InterruptDisabler disabler;
ProcessID my_pid = pid();
if (my_pid == 0) {
if (pid() == 0) {
// NOTE: Special case the colonel process, since its main thread is not in the global thread table.
Processor::for_each(
[&](Processor& proc) -> IterationDecision {
@ -707,15 +709,15 @@ inline void Process::for_each_thread(Callback callback) const
return callback(*idle_thread);
return IterationDecision::Continue;
});
return;
} else {
ScopedSpinLock thread_list_lock(m_thread_list_lock);
for (auto& thread : m_thread_list) {
IterationDecision decision = callback(thread);
if (decision != IterationDecision::Continue)
return decision;
}
}
Thread::for_each([callback, my_pid](Thread& thread) -> IterationDecision {
if (thread.pid() == my_pid)
return callback(thread);
return IterationDecision::Continue;
});
return IterationDecision::Continue;
}
template<typename Callback>

View File

@ -49,7 +49,7 @@ Thread::Thread(NonnullRefPtr<Process> process)
: m_process(move(process))
, m_name(m_process->name())
{
bool is_first_thread = m_process->m_thread_count.fetch_add(1, AK::MemoryOrder::memory_order_relaxed) == 0;
bool is_first_thread = m_process->add_thread(*this);
ArmedScopeGuard guard([&]() {
drop_thread_count(is_first_thread);
});
@ -130,6 +130,7 @@ Thread::~Thread()
// block conditions would access m_process, which would be in
// the middle of being destroyed.
ScopedSpinLock lock(g_scheduler_lock);
ASSERT(!m_process_thread_list_node.is_in_list());
g_scheduler_data->thread_list_for_state(m_state).remove(*this);
// We shouldn't be queued
@ -388,10 +389,9 @@ void Thread::finalize()
void Thread::drop_thread_count(bool initializing_first_thread)
{
auto thread_cnt_before = m_process->m_thread_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
bool is_last = process().remove_thread(*this);
ASSERT(thread_cnt_before != 0);
if (!initializing_first_thread && thread_cnt_before == 1)
if (!initializing_first_thread && is_last)
process().finalize();
}

View File

@ -50,6 +50,8 @@
namespace Kernel {
extern RecursiveSpinLock s_mm_lock;
enum class DispatchSignalResult {
Deferred = 0,
Yield,
@ -818,6 +820,7 @@ public:
ASSERT(!Processor::current().in_irq());
ASSERT(this == Thread::current());
ScopedCritical critical;
ASSERT(!s_mm_lock.own_lock());
ScopedSpinLock scheduler_lock(g_scheduler_lock);
ScopedSpinLock block_lock(m_block_lock);
// We need to hold m_block_lock so that nobody can unblock a blocker as soon
@ -1061,18 +1064,12 @@ public:
m_ipv4_socket_write_bytes += bytes;
}
void set_active(bool active)
{
m_is_active.store(active, AK::memory_order_release);
}
void set_active(bool active) { m_is_active = active; }
u32 saved_critical() const { return m_saved_critical; }
void save_critical(u32 critical) { m_saved_critical = critical; }
[[nodiscard]] bool is_active() const
{
return m_is_active.load(AK::MemoryOrder::memory_order_acquire);
}
[[nodiscard]] bool is_active() const { return m_is_active; }
[[nodiscard]] bool is_finalizable() const
{
@ -1170,10 +1167,10 @@ public:
void set_handling_page_fault(bool b) { m_handling_page_fault = b; }
private:
IntrusiveListNode m_process_thread_list_node;
IntrusiveListNode m_runnable_list_node;
int m_runnable_priority { -1 };
private:
friend struct SchedulerData;
friend class WaitQueue;
@ -1274,7 +1271,7 @@ private:
#endif
JoinBlockCondition m_join_condition;
Atomic<bool> m_is_active { false };
Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
bool m_is_joinable { true };
bool m_handling_page_fault { false };
PreviousMode m_previous_mode { PreviousMode::UserMode };