Kernel: Allow Lock to block from BlockCondition

This enables the Lock class to block a thread even while the thread is
working on a BlockCondition. A thread can still only be either blocked
by a Lock or a BlockCondition.

This also establishes a linked list of threads that are blocked by a
Lock and unblocking directly unlocks threads and wakes them directly.
This commit is contained in:
Tom 2021-07-10 10:23:16 -06:00 committed by Andreas Kling
parent d9fb93c5ce
commit 026ffa343d
Notes: sideshowbarker 2024-07-18 09:12:15 +09:00
8 changed files with 442 additions and 267 deletions

View File

@ -8,6 +8,7 @@
#include <Kernel/Debug.h>
#include <Kernel/KSyms.h>
#include <Kernel/Lock.h>
#include <Kernel/SpinLock.h>
#include <Kernel/Thread.h>
namespace Kernel {
@ -23,71 +24,83 @@ void Lock::lock(Mode mode)
VERIFY(!Processor::current().in_irq());
VERIFY(mode != Mode::Unlocked);
auto current_thread = Thread::current();
ScopedCritical critical; // in case we're not in a critical section already
for (;;) {
if (m_lock.exchange(true, AK::memory_order_acq_rel) != false) {
// I don't know *who* is using "m_lock", so just yield.
Scheduler::yield_from_critical();
continue;
}
// FIXME: Do not add new readers if writers are queued.
Mode current_mode = m_mode;
switch (current_mode) {
case Mode::Unlocked: {
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ ({}) {}: acquire {}, currently unlocked", this, m_name, mode_to_string(mode));
m_mode = mode;
VERIFY(!m_holder);
VERIFY(m_shared_holders.is_empty());
if (mode == Mode::Exclusive) {
m_holder = current_thread;
} else {
VERIFY(mode == Mode::Shared);
m_shared_holders.set(current_thread, 1);
}
VERIFY(m_times_locked == 0);
m_times_locked++;
#if LOCK_DEBUG
if (current_thread) {
current_thread->holding_lock(*this, 1, location);
}
#endif
m_queue.should_block(true);
m_lock.store(false, AK::memory_order_release);
return;
}
case Mode::Exclusive: {
VERIFY(m_holder);
if (m_holder != current_thread)
break;
VERIFY(m_shared_holders.is_empty());
if constexpr (LOCK_TRACE_DEBUG) {
if (mode == Mode::Exclusive)
dbgln("Lock::lock @ {} ({}): acquire {}, currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
else
dbgln("Lock::lock @ {} ({}): acquire exclusive (requested {}), currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
}
VERIFY(mode == Mode::Exclusive || mode == Mode::Shared);
VERIFY(m_times_locked > 0);
m_times_locked++;
ScopedSpinLock lock(m_lock);
bool did_block = false;
Mode current_mode = m_mode;
switch (current_mode) {
case Mode::Unlocked: {
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ ({}) {}: acquire {}, currently unlocked", this, m_name, mode_to_string(mode));
m_mode = mode;
VERIFY(!m_holder);
VERIFY(m_shared_holders.is_empty());
if (mode == Mode::Exclusive) {
m_holder = current_thread;
} else {
VERIFY(mode == Mode::Shared);
m_shared_holders.set(current_thread, 1);
}
VERIFY(m_times_locked == 0);
m_times_locked++;
#if LOCK_DEBUG
if (current_thread) {
current_thread->holding_lock(*this, 1, location);
#endif
m_lock.store(false, AK::memory_order_release);
return;
}
case Mode::Shared: {
VERIFY(!m_holder);
if (mode != Mode::Shared)
break;
#endif
return;
}
case Mode::Exclusive: {
VERIFY(m_holder);
if (m_holder != current_thread) {
block(*current_thread, mode, lock, 1);
did_block = true;
}
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}): acquire {}, currently shared, locks held {}", this, m_name, mode_to_string(mode), m_times_locked);
VERIFY(m_shared_holders.is_empty());
VERIFY(m_times_locked > 0);
if constexpr (LOCK_TRACE_DEBUG) {
if (mode == Mode::Exclusive)
dbgln("Lock::lock @ {} ({}): acquire {}, currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
else
dbgln("Lock::lock @ {} ({}): acquire exclusive (requested {}), currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
}
VERIFY(mode == Mode::Exclusive || mode == Mode::Shared);
VERIFY(m_times_locked > 0);
if (!did_block)
m_times_locked++;
#if LOCK_DEBUG
current_thread->holding_lock(*this, 1, location);
#endif
return;
}
case Mode::Shared: {
VERIFY(!m_holder);
if (mode == Mode::Exclusive && m_shared_holders.size() == 1) {
auto it = m_shared_holders.begin();
if (it->key == current_thread) {
it->value++;
m_times_locked++;
m_mode = Mode::Exclusive;
m_holder = current_thread;
m_shared_holders.clear();
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}): acquire {}, converted shared to exclusive lock, locks held {}", this, m_name, mode_to_string(mode), m_times_locked);
return;
}
}
if (mode != Mode::Shared) {
block(*current_thread, mode, lock, 1);
did_block = true;
}
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}): acquire {}, currently shared, locks held {}", this, m_name, mode_to_string(mode), m_times_locked);
VERIFY(m_times_locked > 0);
if (did_block) {
VERIFY(m_shared_holders.contains(current_thread));
} else {
m_times_locked++;
VERIFY(!m_shared_holders.is_empty());
auto it = m_shared_holders.find(current_thread);
@ -95,20 +108,15 @@ void Lock::lock(Mode mode)
it->value++;
else
m_shared_holders.set(current_thread, 1);
}
#if LOCK_DEBUG
current_thread->holding_lock(*this, 1, location);
current_thread->holding_lock(*this, 1, location);
#endif
m_lock.store(false, AK::memory_order_release);
return;
}
default:
VERIFY_NOT_REACHED();
}
m_lock.store(false, AK::memory_order_release);
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}) waiting...", this, m_name);
m_queue.wait_forever(m_name);
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}) waited", this, m_name);
return;
}
default:
VERIFY_NOT_REACHED();
}
}
@ -118,66 +126,107 @@ void Lock::unlock()
// and also from within critical sections!
VERIFY(!Processor::current().in_irq());
auto current_thread = Thread::current();
ScopedCritical critical; // in case we're not in a critical section already
for (;;) {
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
Mode current_mode = m_mode;
if constexpr (LOCK_TRACE_DEBUG) {
if (current_mode == Mode::Shared)
dbgln("Lock::unlock @ {} ({}): release {}, locks held: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
else
dbgln("Lock::unlock @ {} ({}): release {}, holding: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
}
ScopedSpinLock lock(m_lock);
Mode current_mode = m_mode;
if constexpr (LOCK_TRACE_DEBUG) {
if (current_mode == Mode::Shared)
dbgln("Lock::unlock @ {} ({}): release {}, locks held: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
else
dbgln("Lock::unlock @ {} ({}): release {}, holding: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
}
VERIFY(current_mode != Mode::Unlocked);
VERIFY(current_mode != Mode::Unlocked);
VERIFY(m_times_locked > 0);
m_times_locked--;
VERIFY(m_times_locked > 0);
m_times_locked--;
switch (current_mode) {
case Mode::Exclusive:
VERIFY(m_holder == current_thread);
VERIFY(m_shared_holders.is_empty());
if (m_times_locked == 0)
m_holder = nullptr;
break;
case Mode::Shared: {
VERIFY(!m_holder);
auto it = m_shared_holders.find(current_thread);
VERIFY(it != m_shared_holders.end());
if (it->value > 1) {
it->value--;
} else {
VERIFY(it->value > 0);
m_shared_holders.remove(it);
}
break;
}
default:
VERIFY_NOT_REACHED();
}
bool unlocked_last = (m_times_locked == 0);
if (unlocked_last) {
VERIFY(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders.is_empty());
m_mode = Mode::Unlocked;
m_queue.should_block(false);
}
switch (current_mode) {
case Mode::Exclusive:
VERIFY(m_holder == current_thread);
VERIFY(m_shared_holders.is_empty());
if (m_times_locked == 0)
m_holder = nullptr;
break;
case Mode::Shared: {
VERIFY(!m_holder);
auto it = m_shared_holders.find(current_thread);
VERIFY(it != m_shared_holders.end());
if (it->value > 1) {
it->value--;
} else {
VERIFY(it->value > 0);
m_shared_holders.remove(it);
}
break;
}
default:
VERIFY_NOT_REACHED();
}
#if LOCK_DEBUG
if (current_thread) {
current_thread->holding_lock(*this, -1, {});
}
if (current_thread) {
current_thread->holding_lock(*this, -1, {});
}
#endif
m_lock.store(false, AK::memory_order_release);
if (unlocked_last) {
u32 did_wake = m_queue.wake_one();
dbgln_if(LOCK_TRACE_DEBUG, "Lock::unlock @ {} ({}) wake one ({})", this, m_name, did_wake);
}
return;
if (m_times_locked == 0) {
VERIFY(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders.is_empty());
m_mode = Mode::Unlocked;
unblock_waiters(current_mode);
}
}
void Lock::block(Thread& current_thread, Mode mode, ScopedSpinLock<SpinLock<u8>>& lock, u32 requested_locks)
{
auto& blocked_thread_list = thread_list_for_mode(mode);
VERIFY(!blocked_thread_list.contains(current_thread));
blocked_thread_list.append(current_thread);
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}) waiting...", this, m_name);
current_thread.block(*this, lock, requested_locks);
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}) waited", this, m_name);
VERIFY(blocked_thread_list.contains(current_thread));
blocked_thread_list.remove(current_thread);
}
void Lock::unblock_waiters(Mode previous_mode)
{
VERIFY(m_times_locked == 0);
VERIFY(m_mode == Mode::Unlocked);
if (m_blocked_threads_list_exclusive.is_empty() && m_blocked_threads_list_shared.is_empty())
return;
auto unblock_shared = [&]() {
if (m_blocked_threads_list_shared.is_empty())
return false;
m_mode = Mode::Shared;
for (auto& thread : m_blocked_threads_list_shared) {
auto requested_locks = thread.unblock_from_lock(*this);
auto set_result = m_shared_holders.set(&thread, requested_locks);
VERIFY(set_result == AK::HashSetResult::InsertedNewEntry);
m_times_locked += requested_locks;
}
// I don't know *who* is using "m_lock", so just yield.
Scheduler::yield_from_critical();
return true;
};
auto unblock_exclusive = [&]() {
if (auto* next_exclusive_thread = m_blocked_threads_list_exclusive.first()) {
m_mode = Mode::Exclusive;
m_times_locked = next_exclusive_thread->unblock_from_lock(*this);
m_holder = next_exclusive_thread;
return true;
}
return false;
};
if (previous_mode == Mode::Exclusive) {
if (!unblock_shared())
unblock_exclusive();
} else {
if (!unblock_exclusive())
unblock_shared();
}
}
@ -187,77 +236,61 @@ auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
// and also from within critical sections!
VERIFY(!Processor::current().in_irq());
auto current_thread = Thread::current();
ScopedCritical critical; // in case we're not in a critical section already
for (;;) {
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
Mode previous_mode;
auto current_mode = m_mode.load(AK::MemoryOrder::memory_order_relaxed);
switch (current_mode) {
case Mode::Exclusive: {
if (m_holder != current_thread) {
m_lock.store(false, AK::MemoryOrder::memory_order_release);
lock_count_to_restore = 0;
return Mode::Unlocked;
}
dbgln_if(LOCK_RESTORE_DEBUG, "Lock::force_unlock_if_locked @ {}: unlocking exclusive with lock count: {}", this, m_times_locked);
#if LOCK_DEBUG
m_holder->holding_lock(*this, -(int)m_times_locked, {});
#endif
m_holder = nullptr;
VERIFY(m_times_locked > 0);
lock_count_to_restore = m_times_locked;
m_times_locked = 0;
m_mode = Mode::Unlocked;
m_queue.should_block(false);
m_lock.store(false, AK::memory_order_release);
previous_mode = Mode::Exclusive;
break;
}
case Mode::Shared: {
VERIFY(!m_holder);
auto it = m_shared_holders.find(current_thread);
if (it == m_shared_holders.end()) {
m_lock.store(false, AK::MemoryOrder::memory_order_release);
lock_count_to_restore = 0;
return Mode::Unlocked;
}
dbgln_if(LOCK_RESTORE_DEBUG, "Lock::force_unlock_if_locked @ {}: unlocking exclusive with lock count: {}, total locks: {}",
this, it->value, m_times_locked);
VERIFY(it->value > 0);
lock_count_to_restore = it->value;
VERIFY(lock_count_to_restore > 0);
#if LOCK_DEBUG
m_holder->holding_lock(*this, -(int)lock_count_to_restore, {});
#endif
m_shared_holders.remove(it);
VERIFY(m_times_locked >= lock_count_to_restore);
m_times_locked -= lock_count_to_restore;
if (m_times_locked == 0) {
m_mode = Mode::Unlocked;
m_queue.should_block(false);
}
m_lock.store(false, AK::memory_order_release);
previous_mode = Mode::Shared;
break;
}
case Mode::Unlocked: {
m_lock.store(false, AK::memory_order_relaxed);
lock_count_to_restore = 0;
previous_mode = Mode::Unlocked;
break;
}
default:
VERIFY_NOT_REACHED();
}
m_queue.wake_one();
return previous_mode;
ScopedSpinLock lock(m_lock);
auto current_mode = m_mode;
switch (current_mode) {
case Mode::Exclusive: {
if (m_holder != current_thread) {
lock_count_to_restore = 0;
return Mode::Unlocked;
}
// I don't know *who* is using "m_lock", so just yield.
Scheduler::yield_from_critical();
dbgln_if(LOCK_RESTORE_DEBUG, "Lock::force_unlock_if_locked @ {}: unlocking exclusive with lock count: {}", this, m_times_locked);
#if LOCK_DEBUG
m_holder->holding_lock(*this, -(int)m_times_locked, {});
#endif
m_holder = nullptr;
VERIFY(m_times_locked > 0);
lock_count_to_restore = m_times_locked;
m_times_locked = 0;
m_mode = Mode::Unlocked;
unblock_waiters(Mode::Exclusive);
break;
}
case Mode::Shared: {
VERIFY(!m_holder);
auto it = m_shared_holders.find(current_thread);
if (it == m_shared_holders.end()) {
lock_count_to_restore = 0;
return Mode::Unlocked;
}
dbgln_if(LOCK_RESTORE_DEBUG, "Lock::force_unlock_if_locked @ {}: unlocking exclusive with lock count: {}, total locks: {}",
this, it->value, m_times_locked);
VERIFY(it->value > 0);
lock_count_to_restore = it->value;
VERIFY(lock_count_to_restore > 0);
#if LOCK_DEBUG
m_holder->holding_lock(*this, -(int)lock_count_to_restore, {});
#endif
m_shared_holders.remove(it);
VERIFY(m_times_locked >= lock_count_to_restore);
m_times_locked -= lock_count_to_restore;
if (m_times_locked == 0) {
m_mode = Mode::Unlocked;
unblock_waiters(Mode::Shared);
}
break;
}
case Mode::Unlocked: {
lock_count_to_restore = 0;
break;
}
default:
VERIFY_NOT_REACHED();
}
return current_mode;
}
#if LOCK_DEBUG
@ -270,68 +303,66 @@ void Lock::restore_lock(Mode mode, u32 lock_count)
VERIFY(lock_count > 0);
VERIFY(!Processor::current().in_irq());
auto current_thread = Thread::current();
ScopedCritical critical; // in case we're not in a critical section already
for (;;) {
if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) {
switch (mode) {
case Mode::Exclusive: {
auto expected_mode = Mode::Unlocked;
if (!m_mode.compare_exchange_strong(expected_mode, Mode::Exclusive))
break;
dbgln_if(LOCK_RESTORE_DEBUG, "Lock::restore_lock @ {}: restoring {} with lock count {}, was unlocked", this, mode_to_string(mode), lock_count);
VERIFY(m_times_locked == 0);
m_times_locked = lock_count;
VERIFY(!m_holder);
VERIFY(m_shared_holders.is_empty());
m_holder = current_thread;
m_queue.should_block(true);
m_lock.store(false, AK::memory_order_release);
#if LOCK_DEBUG
m_holder->holding_lock(*this, (int)lock_count, location);
#endif
return;
}
case Mode::Shared: {
auto expected_mode = Mode::Unlocked;
if (!m_mode.compare_exchange_strong(expected_mode, Mode::Shared) && expected_mode != Mode::Shared)
break;
dbgln_if(LOCK_RESTORE_DEBUG, "Lock::restore_lock @ {}: restoring {} with lock count {}, was {}",
this, mode_to_string(mode), lock_count, mode_to_string(expected_mode));
VERIFY(expected_mode == Mode::Shared || m_times_locked == 0);
m_times_locked += lock_count;
VERIFY(!m_holder);
VERIFY((expected_mode == Mode::Unlocked) == m_shared_holders.is_empty());
auto set_result = m_shared_holders.set(current_thread, lock_count);
// There may be other shared lock holders already, but we should not have an entry yet
VERIFY(set_result == AK::HashSetResult::InsertedNewEntry);
m_queue.should_block(true);
m_lock.store(false, AK::memory_order_release);
#if LOCK_DEBUG
m_holder->holding_lock(*this, (int)lock_count, location);
#endif
return;
}
default:
VERIFY_NOT_REACHED();
}
m_lock.store(false, AK::memory_order_relaxed);
bool did_block = false;
ScopedSpinLock lock(m_lock);
switch (mode) {
case Mode::Exclusive: {
if (m_mode != Mode::Unlocked) {
block(*current_thread, Mode::Exclusive, lock, lock_count);
did_block = true;
}
// I don't know *who* is using "m_lock", so just yield.
Scheduler::yield_from_critical();
dbgln_if(LOCK_RESTORE_DEBUG, "Lock::restore_lock @ {}: restoring {} with lock count {}, was unlocked", this, mode_to_string(mode), lock_count);
VERIFY(m_shared_holders.is_empty());
if (did_block) {
VERIFY(m_mode == Mode::Exclusive);
VERIFY(m_times_locked > 0);
VERIFY(m_holder == current_thread);
} else {
m_mode = Mode::Exclusive;
VERIFY(m_times_locked == 0);
m_times_locked = lock_count;
VERIFY(!m_holder);
m_holder = current_thread;
}
#if LOCK_DEBUG
m_holder->holding_lock(*this, (int)lock_count, location);
#endif
return;
}
case Mode::Shared: {
auto previous_mode = m_mode;
if (m_mode != Mode::Unlocked && m_mode != Mode::Shared) {
block(*current_thread, Mode::Shared, lock, lock_count);
did_block = true;
}
dbgln_if(LOCK_RESTORE_DEBUG, "Lock::restore_lock @ {}: restoring {} with lock count {}, was {}",
this, mode_to_string(mode), lock_count, mode_to_string(previous_mode));
VERIFY(!m_holder);
if (did_block) {
VERIFY(m_mode == Mode::Shared);
VERIFY(m_times_locked > 0);
VERIFY(m_shared_holders.contains(current_thread));
} else {
m_mode = Mode::Shared;
m_times_locked += lock_count;
auto set_result = m_shared_holders.set(current_thread, lock_count);
// There may be other shared lock holders already, but we should not have an entry yet
VERIFY(set_result == AK::HashSetResult::InsertedNewEntry);
}
#if LOCK_DEBUG
m_holder->holding_lock(*this, (int)lock_count, location);
#endif
return;
}
default:
VERIFY_NOT_REACHED();
}
}
void Lock::clear_waiters()
{
VERIFY(m_mode != Mode::Shared);
m_queue.wake_all();
}
}

View File

@ -17,6 +17,8 @@
namespace Kernel {
class Lock {
friend class Thread;
AK_MAKE_NONCOPYABLE(Lock);
AK_MAKE_NONMOVABLE(Lock);
@ -40,7 +42,6 @@ public:
void unlock();
[[nodiscard]] Mode force_unlock_if_locked(u32&);
[[nodiscard]] bool is_locked() const { return m_mode != Mode::Unlocked; }
void clear_waiters();
[[nodiscard]] const char* name() const { return m_name; }
@ -59,10 +60,19 @@ public:
}
private:
Atomic<bool> m_lock { false };
typedef IntrusiveList<Thread, RawPtr<Thread>, &Thread::m_blocked_threads_list_node> BlockedThreadList;
ALWAYS_INLINE BlockedThreadList& thread_list_for_mode(Mode mode)
{
VERIFY(mode == Mode::Exclusive || mode == Mode::Shared);
return mode == Mode::Exclusive ? m_blocked_threads_list_exclusive : m_blocked_threads_list_shared;
}
void block(Thread&, Mode, ScopedSpinLock<SpinLock<u8>>&, u32);
void unblock_waiters(Mode);
const char* m_name { nullptr };
WaitQueue m_queue;
Atomic<Mode, AK::MemoryOrder::memory_order_relaxed> m_mode { Mode::Unlocked };
Mode m_mode { Mode::Unlocked };
// When locked exclusively, only the thread already holding the lock can
// lock it again. When locked in shared mode, any thread can do that.
@ -75,6 +85,11 @@ private:
// lock.
RefPtr<Thread> m_holder;
HashMap<Thread*, u32> m_shared_holders;
BlockedThreadList m_blocked_threads_list_exclusive;
BlockedThreadList m_blocked_threads_list_shared;
SpinLock<u8> m_lock;
};
class Locker {

View File

@ -8,7 +8,7 @@
namespace Kernel {
enum class LockMode {
enum class LockMode : u8 {
Unlocked,
Shared,
Exclusive

View File

@ -117,7 +117,9 @@ void Process::kill_threads_except_self()
thread.set_should_die();
});
big_lock().clear_waiters();
u32 dropped_lock_count = 0;
if (big_lock().force_unlock_if_locked(dropped_lock_count) != LockMode::Unlocked)
dbgln("Process {} big lock had {} locks", *this, dropped_lock_count);
}
void Process::kill_all_threads()

View File

@ -175,6 +175,100 @@ Thread::~Thread()
}
}
void Thread::block(Kernel::Lock& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock, u32 lock_count)
{
VERIFY(!Processor::current().in_irq());
VERIFY(this == Thread::current());
ScopedCritical critical;
VERIFY(!s_mm_lock.own_lock());
ScopedSpinLock block_lock(m_block_lock);
VERIFY(!m_in_block);
m_in_block = true;
ScopedSpinLock scheduler_lock(g_scheduler_lock);
switch (state()) {
case Thread::Stopped:
// It's possible that we were requested to be stopped!
break;
case Thread::Running:
VERIFY(m_blocker == nullptr);
break;
default:
VERIFY_NOT_REACHED();
}
VERIFY(!m_blocking_lock);
m_blocking_lock = &lock;
m_lock_requested_count = lock_count;
set_state(Thread::Blocked);
scheduler_lock.unlock();
block_lock.unlock();
lock_lock.unlock();
dbgln_if(THREAD_DEBUG, "Thread {} blocking on Lock {}", *this, &lock);
for (;;) {
// Yield to the scheduler, and wait for us to resume unblocked.
VERIFY(!g_scheduler_lock.own_lock());
VERIFY(Processor::current().in_critical());
yield_while_not_holding_big_lock(); // We might hold the big lock though!
VERIFY(Processor::current().in_critical());
ScopedSpinLock block_lock2(m_block_lock);
if (should_be_stopped() || state() == Stopped) {
dbgln("Thread should be stopped, current state: {}", state_string());
set_state(Thread::Blocked);
continue;
}
VERIFY(!m_blocking_lock);
VERIFY(m_in_block);
m_in_block = false;
break;
}
lock_lock.lock();
}
u32 Thread::unblock_from_lock(Kernel::Lock& lock)
{
ScopedSpinLock block_lock(m_block_lock);
VERIFY(m_blocking_lock == &lock);
auto requested_count = m_lock_requested_count;
block_lock.unlock();
auto do_unblock = [&]() {
ScopedSpinLock scheduler_lock(g_scheduler_lock);
ScopedSpinLock block_lock(m_block_lock);
VERIFY(m_blocking_lock == &lock);
VERIFY(!Processor::current().in_irq());
VERIFY(g_scheduler_lock.own_lock());
VERIFY(m_block_lock.own_lock());
VERIFY(m_blocking_lock == &lock);
dbgln_if(THREAD_DEBUG, "Thread {} unblocked from Lock {}", *this, &lock);
m_blocking_lock = nullptr;
if (Thread::current() == this) {
set_state(Thread::Running);
return;
}
VERIFY(m_state != Thread::Runnable && m_state != Thread::Running);
set_state(Thread::Runnable);
};
if (Processor::current().in_irq()) {
Processor::current().deferred_call_queue([do_unblock = move(do_unblock), self = make_weak_ptr()]() {
if (auto this_thread = self.strong_ref())
do_unblock();
});
} else {
do_unblock();
}
return requested_count;
}
void Thread::unblock_from_blocker(Blocker& blocker)
{
auto do_unblock = [&]() {
@ -202,6 +296,8 @@ void Thread::unblock(u8 signal)
VERIFY(m_block_lock.own_lock());
if (m_state != Thread::Blocked)
return;
if (m_blocking_lock)
return;
VERIFY(m_blocker);
if (signal != 0) {
if (is_handling_page_fault()) {
@ -314,9 +410,11 @@ void Thread::exit(void* exit_value)
void Thread::yield_while_not_holding_big_lock()
{
VERIFY(!g_scheduler_lock.own_lock());
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
InterruptDisabler disable;
Scheduler::yield(); // flag a switch
u32 prev_flags;
u32 prev_crit = Processor::current().clear_critical(prev_flags, true);
Scheduler::yield();
// NOTE: We may be on a different CPU now!
Processor::current().restore_critical(prev_crit, prev_flags);
}
@ -324,12 +422,14 @@ void Thread::yield_while_not_holding_big_lock()
void Thread::yield_without_holding_big_lock()
{
VERIFY(!g_scheduler_lock.own_lock());
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
InterruptDisabler disable;
Scheduler::yield(); // flag a switch
u32 lock_count_to_restore = 0;
auto previous_locked = unlock_process_if_locked(lock_count_to_restore);
// NOTE: Even though we call Scheduler::yield here, unless we happen
// to be outside of a critical section, the yield will be postponed
// until leaving it in relock_process.
Scheduler::yield();
relock_process(previous_locked, lock_count_to_restore);
}
@ -387,8 +487,11 @@ const char* Thread::state_string() const
return "Stopped";
case Thread::Blocked: {
ScopedSpinLock block_lock(m_block_lock);
VERIFY(m_blocker != nullptr);
return m_blocker->state_string();
if (m_blocking_lock)
return "Lock";
if (m_blocker)
return m_blocker->state_string();
VERIFY_NOT_REACHED();
}
}
PANIC("Thread::state_string(): Invalid state: {}", (int)state());
@ -705,7 +808,7 @@ void Thread::resume_from_stopped()
VERIFY(g_scheduler_lock.own_lock());
if (m_stop_state == Blocked) {
ScopedSpinLock block_lock(m_block_lock);
if (m_blocker) {
if (m_blocker || m_blocking_lock) {
// Hasn't been unblocked yet
set_state(Blocked, 0);
} else {

View File

@ -116,6 +116,7 @@ class Thread
AK_MAKE_NONCOPYABLE(Thread);
AK_MAKE_NONMOVABLE(Thread);
friend class Lock;
friend class Process;
friend class ProtectedProcessBase;
friend class Scheduler;
@ -823,6 +824,8 @@ public:
}
}
void block(Kernel::Lock&, ScopedSpinLock<SpinLock<u8>>&, u32);
template<typename BlockerType, class... Args>
[[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args)
{
@ -954,6 +957,7 @@ public:
return result;
}
u32 unblock_from_lock(Kernel::Lock&);
void unblock_from_blocker(Blocker&);
void unblock(u8 signal = 0);
@ -1280,6 +1284,9 @@ private:
Optional<Range> m_thread_specific_range;
Array<SignalActionData, NSIG> m_signal_action_data;
Blocker* m_blocker { nullptr };
Kernel::Lock* m_blocking_lock { nullptr };
u32 m_lock_requested_count { 0 };
IntrusiveListNode<Thread> m_blocked_threads_list_node;
bool m_may_die_immediately { true };
#if LOCK_DEBUG

View File

@ -436,7 +436,7 @@ PageFaultResponse Region::handle_fault(const PageFault& fault, ScopedSpinLock<Re
remap_vmobject_page(translate_to_vmobject_page(page_index_in_region));
return PageFaultResponse::Continue;
}
return handle_zero_fault(page_index_in_region);
return handle_zero_fault(page_index_in_region, mm_lock);
#else
dbgln("BUG! Unexpected NP fault at {}", fault.vaddr());
return PageFaultResponse::ShouldCrash;
@ -448,7 +448,7 @@ PageFaultResponse Region::handle_fault(const PageFault& fault, ScopedSpinLock<Re
auto* phys_page = physical_page(page_index_in_region);
if (phys_page->is_shared_zero_page() || phys_page->is_lazy_committed_page()) {
dbgln_if(PAGE_FAULT_DEBUG, "NP(zero) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
return handle_zero_fault(page_index_in_region);
return handle_zero_fault(page_index_in_region, mm_lock);
}
return handle_cow_fault(page_index_in_region);
}
@ -456,12 +456,29 @@ PageFaultResponse Region::handle_fault(const PageFault& fault, ScopedSpinLock<Re
return PageFaultResponse::ShouldCrash;
}
PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region, ScopedSpinLock<RecursiveSpinLock>& mm_lock)
{
VERIFY_INTERRUPTS_DISABLED();
VERIFY(vmobject().is_anonymous());
Locker locker(vmobject().m_paging_lock);
bool can_lock = Thread::current() && !g_scheduler_lock.own_lock();
if (can_lock) {
// TODO: This seems rather weird. If we don't have a current thread
// then we're in the Kernel in early initialization still. So we
// can't actually wait on the paging lock. And if we currently
// own the scheduler lock and we trigger zero faults, we also
// can't really wait. But do we actually need to wait here?
mm_lock.unlock();
VERIFY(!s_mm_lock.own_lock());
vmobject().m_paging_lock.lock();
mm_lock.lock();
}
ScopeGuard guard([&]() {
if (can_lock)
vmobject().m_paging_lock.unlock();
});
auto& page_slot = physical_page_slot(page_index_in_region);
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);

View File

@ -243,7 +243,7 @@ private:
PageFaultResponse handle_cow_fault(size_t page_index);
PageFaultResponse handle_inode_fault(size_t page_index, ScopedSpinLock<RecursiveSpinLock>&);
PageFaultResponse handle_zero_fault(size_t page_index);
PageFaultResponse handle_zero_fault(size_t page_index, ScopedSpinLock<RecursiveSpinLock>&);
bool map_individual_page_impl(size_t page_index);