diff --git a/CMakeLists.txt b/CMakeLists.txt index 925382276b9..4babb9f3f8c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -62,7 +62,7 @@ if (ALL_THE_DEBUG_MACROS) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DICMP_DEBUG -DICO_DEBUG -DImage_DEBUG -DIMAGE_DECODER_CLIENT_DEBUG -DIMAGE_DECODER_DEBUG -DIMAGE_LOADER_DEBUG -DINTERRUPT_DEBUG -DIOAPIC_DEBUG -DIPC_DEBUG -DIPV4_DEBUG -DIPV4_SOCKET_DEBUG -DIRC_DEBUG -DIRQ_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DJOB_DEBUG -DJPG_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DKEYBOARD_DEBUG -DKEYBOARD_SHORTCUTS_DEBUG -DKMALLOC_DEBUG_LARGE_ALLOCATIONS") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DLEXER_DEBUG -DLoader_DEBUG -DLOCK_DEBUG -DLOOKUPSERVER_DEBUG") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DLEXER_DEBUG -DLoader_DEBUG -DLOCK_DEBUG -DLOCK_TRACE_DEBUG -DLOCK_RESTORE_DEBUG -DLOOKUPSERVER_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMALLOC_DEBUG -DMASTERPTY_DEBUG -DMBR_DEBUG -DMEMORY_DEBUG -DMENU_DEBUG -DMINIMIZE_ANIMATION_DEBUG -DMM_DEBUG -DMOVE_DEBUG -DMULTIPROCESSOR_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DNETWORK_TASK_DEBUG -DNT_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DOBJECT_DEBUG -DOCCLUSIONS_DEBUG -DOFFD_DEBUG") diff --git a/Kernel/Lock.cpp b/Kernel/Lock.cpp index 7ef642cce62..416edee21f8 100644 --- a/Kernel/Lock.cpp +++ b/Kernel/Lock.cpp @@ -27,6 +27,10 @@ #include #include #include +#include + +//#define LOCK_TRACE_DEBUG +//#define LOCK_RESTORE_DEBUG namespace Kernel { @@ -46,38 +50,78 @@ void Lock::lock(Mode mode) ASSERT(!Processor::current().in_irq()); ASSERT(mode != Mode::Unlocked); auto current_thread = Thread::current(); - ScopedCritical critical; + ScopedCritical critical; // in case we're not in a critical section already for (;;) { if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) { do { // FIXME: Do not add new readers if writers are queued. - bool can_lock; - switch (m_mode) { - case Lock::Mode::Unlocked: - can_lock = true; - break; - case Lock::Mode::Shared: - can_lock = (mode == Lock::Mode::Shared); - break; - case Lock::Mode::Exclusive: - can_lock = (m_holder == current_thread); - break; - } - if (can_lock) { - // We got the lock! - if (m_mode == Lock::Mode::Unlocked) { - m_mode = mode; - ASSERT(m_times_locked == 0); - if (mode == Mode::Exclusive) - m_holder = current_thread; - } -#ifdef LOCK_DEBUG - current_thread->holding_lock(*this, true, file, line); + auto current_mode = m_mode.load(AK::MemoryOrder::memory_order_relaxed); + switch (current_mode) { + case Mode::Unlocked: { +#ifdef LOCK_TRACE_DEBUG + dbg() << "Lock::lock @ " << this << ": acquire " << mode_to_string(mode) << ", currently unlocked"; #endif + m_mode.store(mode, AK::MemoryOrder::memory_order_relaxed); + ASSERT(!m_holder); + ASSERT(m_shared_holders.is_empty()); + if (mode == Mode::Exclusive) { + m_holder = current_thread; + } else { + ASSERT(mode == Mode::Shared); + m_shared_holders.set(current_thread, 1); + } + ASSERT(m_times_locked == 0); m_times_locked++; +#ifdef LOCK_DEBUG + current_thread->holding_lock(*this, 1, file, line); +#endif m_lock.store(false, AK::memory_order_release); return; } + case Mode::Exclusive: { + ASSERT(m_holder); + if (m_holder != current_thread) + break; + ASSERT(m_shared_holders.is_empty()); +#ifdef LOCK_TRACE_DEBUG + if (mode == Mode::Exclusive) + dbg() << "Lock::lock @ " << this << ": acquire " << mode_to_string(mode) << ", currently exclusive, holding: " << m_times_locked; + else + dbg() << "Lock::lock @ " << this << ": acquire exclusive (requested " << mode_to_string(mode) << "), currently exclusive, holding " << m_times_locked; +#endif + ASSERT(mode == Mode::Exclusive || mode == Mode::Shared); + ASSERT(m_times_locked > 0); + m_times_locked++; +#ifdef LOCK_DEBUG + current_thread->holding_lock(*this, 1, file, line); +#endif + m_lock.store(false, AK::memory_order_release); + return; + } + case Mode::Shared: { + ASSERT(!m_holder); + if (mode != Mode::Shared) + break; +#ifdef LOCK_TRACE_DEBUG + dbg() << "Lock::lock @ " << this << ": acquire " << mode_to_string(mode) << ", currently shared, locks held: " << m_times_locked; +#endif + ASSERT(m_times_locked > 0); + m_times_locked++; + ASSERT(!m_shared_holders.is_empty()); + auto it = m_shared_holders.find(current_thread); + if (it != m_shared_holders.end()) + it->value++; + else + m_shared_holders.set(current_thread, 1); +#ifdef LOCK_DEBUG + current_thread->holding_lock(*this, 1, file, line); +#endif + m_lock.store(false, AK::memory_order_release); + return; + } + default: + ASSERT_NOT_REACHED(); + } m_lock.store(false, AK::memory_order_release); } while (m_queue.wait_on(nullptr, m_name) == Thread::BlockResult::NotBlocked); } else { @@ -93,28 +137,53 @@ void Lock::unlock() // and also from within critical sections! ASSERT(!Processor::current().in_irq()); auto current_thread = Thread::current(); - ScopedCritical critical; + ScopedCritical critical; // in case we're not in a critical section already for (;;) { if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) { - ASSERT(m_times_locked); - --m_times_locked; + auto current_mode = m_mode.load(AK::MemoryOrder::memory_order_relaxed); +#ifdef LOCK_TRACE_DEBUG + if (current_mode == Mode::Shared) + dbg() << "Lock::unlock @ " << this << ": release " << mode_to_string(current_mode) << ", locks held: " << m_times_locked; + else + dbg() << "Lock::unlock @ " << this << ": release " << mode_to_string(current_mode) << ", holding: " << m_times_locked; +#endif + ASSERT(current_mode != Mode::Unlocked); - ASSERT(m_mode != Mode::Unlocked); + ASSERT(m_times_locked > 0); + m_times_locked--; - if (m_mode == Mode::Exclusive) { + switch (current_mode) { + case Mode::Exclusive: ASSERT(m_holder == current_thread); + ASSERT(m_shared_holders.is_empty()); if (m_times_locked == 0) m_holder = nullptr; + break; + case Mode::Shared: { + ASSERT(!m_holder); + auto it = m_shared_holders.find(current_thread); + ASSERT(it != m_shared_holders.end()); + if (it->value > 1) { + it->value--; + } else { + ASSERT(it->value > 0); + m_shared_holders.remove(it); + } + break; } + default: + ASSERT_NOT_REACHED(); + } + + if (m_times_locked == 0) { + ASSERT(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders.is_empty()); + m_mode.store(Mode::Unlocked, AK::MemoryOrder::memory_order_relaxed); + } + #ifdef LOCK_DEBUG - current_thread->holding_lock(*this, false); + current_thread->holding_lock(*this, -1); #endif - if (m_times_locked > 0) { - m_lock.store(false, AK::memory_order_release); - return; - } - m_mode = Mode::Unlocked; m_lock.store(false, AK::memory_order_release); m_queue.wake_one(); return; @@ -124,39 +193,153 @@ void Lock::unlock() } } -bool Lock::force_unlock_if_locked() +auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode { // NOTE: This may be called from an interrupt handler (not an IRQ handler) // and also from within critical sections! ASSERT(!Processor::current().in_irq()); - ScopedCritical critical; + auto current_thread = Thread::current(); + ScopedCritical critical; // in case we're not in a critical section already for (;;) { if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) { - if (m_holder != Thread::current()) { - m_lock.store(false, AK::MemoryOrder::memory_order_release); - return false; - } - ASSERT(m_mode != Mode::Shared); - ASSERT(m_times_locked == 1); -#ifdef LOCK_DEBUG - m_holder->holding_lock(*this, false); + Mode previous_mode; + auto current_mode = m_mode.load(AK::MemoryOrder::memory_order_relaxed); + switch (current_mode) { + case Mode::Exclusive: { + if (m_holder != current_thread) { + m_lock.store(false, AK::MemoryOrder::memory_order_release); + lock_count_to_restore = 0; + return Mode::Unlocked; + } +#ifdef LOCK_RESTORE_DEBUG + dbg() << "Lock::force_unlock_if_locked @ " << this << ": unlocking exclusive with lock count: " << m_times_locked; #endif - m_holder = nullptr; - m_mode = Mode::Unlocked; - m_times_locked = 0; - m_lock.store(false, AK::memory_order_release); + m_holder = nullptr; + ASSERT(m_times_locked > 0); + lock_count_to_restore = m_times_locked; + m_times_locked = 0; + m_mode.store(Mode::Unlocked, AK::MemoryOrder::memory_order_relaxed); + m_lock.store(false, AK::memory_order_release); +#ifdef LOCK_DEBUG + m_holder->holding_lock(*this, -(int)lock_count_to_restore); +#endif + previous_mode = Mode::Exclusive; + break; + } + case Mode::Shared: { + ASSERT(!m_holder); + auto it = m_shared_holders.find(current_thread); + if (it == m_shared_holders.end()) { + m_lock.store(false, AK::MemoryOrder::memory_order_release); + lock_count_to_restore = 0; + return Mode::Unlocked; + } +#ifdef LOCK_RESTORE_DEBUG + dbg() << "Lock::force_unlock_if_locked @ " << this << ": unlocking exclusive with lock count: " << it->value << ", total locks: " << m_times_locked; +#endif + ASSERT(it->value > 0); + lock_count_to_restore = it->value; + ASSERT(lock_count_to_restore > 0); +#ifdef LOCK_DEBUG + m_holder->holding_lock(*this, -(int)lock_count_to_restore); +#endif + m_shared_holders.remove(it); + ASSERT(m_times_locked >= lock_count_to_restore); + m_times_locked -= lock_count_to_restore; + if (m_times_locked == 0) + m_mode.store(Mode::Unlocked, AK::MemoryOrder::memory_order_relaxed); + m_lock.store(false, AK::memory_order_release); + previous_mode = Mode::Shared; + break; + } + case Mode::Unlocked: { + m_lock.store(false, AK::memory_order_relaxed); + lock_count_to_restore = 0; + previous_mode = Mode::Unlocked; + break; + } + default: + ASSERT_NOT_REACHED(); + } m_queue.wake_one(); - break; + return previous_mode; + } + // I don't know *who* is using "m_lock", so just yield. + Scheduler::yield_from_critical(); + } +} + +#ifdef LOCK_DEBUG +void Lock::restore_lock(Mode mode, u32 lock_count) +{ + return restore_lock("unknown", 0, mode, lock_count); +} + +void Lock::restore_lock(const char* file, int line, Mode mode, u32 lock_count) +#else +void Lock::restore_lock(Mode mode, u32 lock_count) +#endif +{ + ASSERT(mode != Mode::Unlocked); + ASSERT(lock_count > 0); + ASSERT(!Processor::current().in_irq()); + auto current_thread = Thread::current(); + ScopedCritical critical; // in case we're not in a critical section already + for (;;) { + if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) { + switch (mode) { + case Mode::Exclusive: { + auto expected_mode = Mode::Unlocked; + if (!m_mode.compare_exchange_strong(expected_mode, Mode::Exclusive, AK::MemoryOrder::memory_order_relaxed)) + break; +#ifdef LOCK_RESTORE_DEBUG + dbg() << "Lock::restore_lock @ " << this << ": restoring " << mode_to_string(mode) << " with lock count " << lock_count << ", was unlocked"; +#endif + ASSERT(m_times_locked == 0); + m_times_locked = lock_count; + ASSERT(!m_holder); + ASSERT(m_shared_holders.is_empty()); + m_holder = current_thread; + m_lock.store(false, AK::memory_order_release); +#ifdef LOCK_DEBUG + m_holder->holding_lock(*this, (int)lock_count, file, line); +#endif + return; + } + case Mode::Shared: { + auto expected_mode = Mode::Unlocked; + if (!m_mode.compare_exchange_strong(expected_mode, Mode::Shared, AK::MemoryOrder::memory_order_relaxed) && expected_mode != Mode::Shared) + break; +#ifdef LOCK_RESTORE_DEBUG + dbg() << "Lock::restore_lock @ " << this << ": restoring " << mode_to_string(mode) << " with lock count " << lock_count << ", was " << mode_to_string(expected_mode); +#endif + ASSERT(expected_mode == Mode::Shared || m_times_locked == 0); + m_times_locked += lock_count; + ASSERT(!m_holder); + ASSERT((expected_mode == Mode::Unlocked) == m_shared_holders.is_empty()); + auto set_result = m_shared_holders.set(current_thread, lock_count); + // There may be other shared lock holders already, but we should not have an entry yet + ASSERT(set_result == AK::HashSetResult::InsertedNewEntry); + m_lock.store(false, AK::memory_order_release); +#ifdef LOCK_DEBUG + m_holder->holding_lock(*this, (int)lock_count, file, line); +#endif + return; + } + default: + ASSERT_NOT_REACHED(); + } + + m_lock.store(false, AK::memory_order_relaxed); } // I don't know *who* is using "m_lock", so just yield. Scheduler::yield_from_critical(); } - return true; } void Lock::clear_waiters() { - ASSERT(m_mode != Mode::Shared); + ASSERT(m_mode.load(AK::MemoryOrder::memory_order_relaxed) != Mode::Shared); m_queue.wake_all(); } diff --git a/Kernel/Lock.h b/Kernel/Lock.h index 81971551beb..85a0ad08b41 100644 --- a/Kernel/Lock.h +++ b/Kernel/Lock.h @@ -28,44 +28,60 @@ #include #include +#include #include #include #include -#include +#include #include namespace Kernel { class Lock { + AK_MAKE_NONCOPYABLE(Lock); + AK_MAKE_NONMOVABLE(Lock); + public: + using Mode = LockMode; + Lock(const char* name = nullptr) : m_name(name) { } ~Lock() { } - enum class Mode { - Unlocked, - Shared, - Exclusive - }; - void lock(Mode = Mode::Exclusive); #ifdef LOCK_DEBUG void lock(const char* file, int line, Mode mode = Mode::Exclusive); + void restore_lock(const char* file, int line, Mode, u32); #endif void unlock(); - bool force_unlock_if_locked(); - bool is_locked() const { return m_holder; } + [[nodiscard]] Mode force_unlock_if_locked(u32&); + void restore_lock(Mode, u32); + bool is_locked() const { return m_mode.load(AK::MemoryOrder::memory_order_relaxed) != Mode::Unlocked; } void clear_waiters(); const char* name() const { return m_name; } + static const char* mode_to_string(Mode mode) + { + switch (mode) { + case Mode::Unlocked: + return "unlocked"; + case Mode::Exclusive: + return "exclusive"; + case Mode::Shared: + return "shared"; + default: + return "invalid"; + } + } + private: Atomic m_lock { false }; const char* m_name { nullptr }; WaitQueue m_queue; - Mode m_mode { Mode::Unlocked }; + Atomic m_mode { Mode::Unlocked }; // When locked exclusively, only the thread already holding the lock can // lock it again. When locked in shared mode, any thread can do that. @@ -77,6 +93,7 @@ private: // When locked exclusively, this is always the one thread that holds the // lock. RefPtr m_holder; + HashMap m_shared_holders; }; class Locker { @@ -103,8 +120,10 @@ private: #ifdef LOCK_DEBUG # define LOCKER(...) Locker locker(__FILE__, __LINE__, __VA_ARGS__) +# define RESTORE_LOCK(lock, ...) (lock).restore_lock(__FILE__, __LINE__, __VA_ARGS__) #else # define LOCKER(...) Locker locker(__VA_ARGS__) +# define RESTORE_LOCK(lock, ...) (lock).restore_lock(__VA_ARGS__) #endif template diff --git a/Kernel/LockMode.h b/Kernel/LockMode.h new file mode 100644 index 00000000000..d08222b6710 --- /dev/null +++ b/Kernel/LockMode.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2020, The SerenityOS developers. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +namespace Kernel { + +enum class LockMode { + Unlocked, + Shared, + Exclusive +}; + +} diff --git a/Kernel/Syscalls/execve.cpp b/Kernel/Syscalls/execve.cpp index 9fbd7c1a975..6b037b3f1dd 100644 --- a/Kernel/Syscalls/execve.cpp +++ b/Kernel/Syscalls/execve.cpp @@ -348,7 +348,8 @@ int Process::do_exec(NonnullRefPtr main_program_description, Ve ScopedSpinLock lock(g_scheduler_lock); new_main_thread->set_state(Thread::State::Runnable); } - big_lock().force_unlock_if_locked(); + u32 lock_count_to_restore; + (void)big_lock().force_unlock_if_locked(lock_count_to_restore); ASSERT_INTERRUPTS_DISABLED(); ASSERT(Processor::current().in_critical()); return 0; diff --git a/Kernel/Syscalls/sched.cpp b/Kernel/Syscalls/sched.cpp index 8882a0c627d..f497a26ca0f 100644 --- a/Kernel/Syscalls/sched.cpp +++ b/Kernel/Syscalls/sched.cpp @@ -41,14 +41,11 @@ int Process::sys$donate(pid_t tid) if (tid < 0) return -EINVAL; - // We don't strictly need to grab the scheduler lock here, but it - // will close a race where we can find the thread but it disappears - // before we call Scheduler::donate_to. - ScopedSpinLock lock(g_scheduler_lock); + ScopedCritical critical; auto thread = Thread::from_tid(tid); if (!thread || thread->pid() != pid()) return -ESRCH; - Scheduler::donate_to(thread, "sys$donate"); + Thread::current()->donate_without_holding_big_lock(thread, "sys$donate"); return 0; } diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 0a006213c24..18482823de6 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -212,7 +212,8 @@ void Thread::die_if_needed() if (!m_should_die) return; - unlock_process_if_locked(); + u32 unlock_count; + (void)unlock_process_if_locked(unlock_count); ScopedCritical critical; set_should_die(); @@ -238,7 +239,8 @@ void Thread::exit(void* exit_value) ASSERT(Thread::current() == this); m_join_condition.thread_did_exit(exit_value); set_should_die(); - unlock_process_if_locked(); + u32 unlock_count; + (void)unlock_process_if_locked(unlock_count); die_if_needed(); } @@ -255,25 +257,33 @@ void Thread::yield_while_not_holding_big_lock() void Thread::yield_without_holding_big_lock() { ASSERT(!g_scheduler_lock.own_lock()); - bool did_unlock = unlock_process_if_locked(); + u32 lock_count_to_restore = 0; + auto previous_locked = unlock_process_if_locked(lock_count_to_restore); // NOTE: Even though we call Scheduler::yield here, unless we happen // to be outside of a critical section, the yield will be postponed // until leaving it in relock_process. Scheduler::yield(); - relock_process(did_unlock); + relock_process(previous_locked, lock_count_to_restore); } -bool Thread::unlock_process_if_locked() +void Thread::donate_without_holding_big_lock(RefPtr& thread, const char* reason) { - return process().big_lock().force_unlock_if_locked(); + ASSERT(!g_scheduler_lock.own_lock()); + u32 lock_count_to_restore = 0; + auto previous_locked = unlock_process_if_locked(lock_count_to_restore); + // NOTE: Even though we call Scheduler::yield here, unless we happen + // to be outside of a critical section, the yield will be postponed + // until leaving it in relock_process. + Scheduler::donate_to(thread, reason); + relock_process(previous_locked, lock_count_to_restore); } -void Thread::lock_process() +LockMode Thread::unlock_process_if_locked(u32& lock_count_to_restore) { - process().big_lock().lock(); + return process().big_lock().force_unlock_if_locked(lock_count_to_restore); } -void Thread::relock_process(bool did_unlock) +void Thread::relock_process(LockMode previous_locked, u32 lock_count_to_restore) { // Clearing the critical section may trigger the context switch // flagged by calling Scheduler::donate_to or Scheduler::yield @@ -282,13 +292,15 @@ void Thread::relock_process(bool did_unlock) u32 prev_flags; u32 prev_crit = Processor::current().clear_critical(prev_flags, true); - if (did_unlock) { - // We've unblocked, relock the process if needed and carry on. - process().big_lock().lock(); - } + // CONTEXT SWITCH HAPPENS HERE! // NOTE: We may be on a different CPU now! Processor::current().restore_critical(prev_crit, prev_flags); + + if (previous_locked != LockMode::Unlocked) { + // We've unblocked, relock the process if needed and carry on. + RESTORE_LOCK(process().big_lock(), previous_locked, lock_count_to_restore); + } } auto Thread::sleep(clockid_t clock_id, const timespec& duration, timespec* remaining_time) -> BlockResult diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 498afeebe8b..5a1c51702cb 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -781,6 +782,7 @@ public: [[nodiscard]] BlockResult block(const BlockTimeout& timeout, Args&&... args) { ASSERT(!Processor::current().in_irq()); + ASSERT(this == Thread::current()); ScopedCritical critical; ScopedSpinLock scheduler_lock(g_scheduler_lock); ScopedSpinLock block_lock(m_block_lock); @@ -844,12 +846,14 @@ public: block_lock.unlock(); bool did_timeout = false; - bool did_unlock = false; + auto previous_locked = LockMode::Unlocked; + u32 lock_count_to_restore = 0; for (;;) { scheduler_lock.unlock(); // Yield to the scheduler, and wait for us to resume unblocked. - did_unlock |= unlock_process_if_locked(); + if (previous_locked == LockMode::Unlocked) + previous_locked = unlock_process_if_locked(lock_count_to_restore); ASSERT(!g_scheduler_lock.own_lock()); ASSERT(Processor::current().in_critical()); @@ -896,10 +900,10 @@ public: // (e.g. if it's on another processor) TimerQueue::the().cancel_timer(timer.release_nonnull()); } - if (did_unlock) { + if (previous_locked != LockMode::Unlocked) { // NOTE: this may trigger another call to Thread::block(), so // we need to do this after we're all done and restored m_in_block! - lock_process(); + relock_process(previous_locked, lock_count_to_restore); } return result; } @@ -907,6 +911,13 @@ public: void unblock_from_blocker(Blocker&); void unblock(u8 signal = 0); + template + Thread::BlockResult wait_on(WaitQueue& wait_queue, const Thread::BlockTimeout& timeout, Args&&... args) + { + ASSERT(this == Thread::current()); + return block(timeout, wait_queue, forward(args)...); + } + BlockResult sleep(clockid_t, const timespec&, timespec* = nullptr); BlockResult sleep(const timespec& duration, timespec* remaining_time = nullptr) { @@ -1062,29 +1073,32 @@ public: RecursiveSpinLock& get_lock() const { return m_lock; } #ifdef LOCK_DEBUG - void holding_lock(Lock& lock, bool holding, const char* file = nullptr, int line = 0) + void holding_lock(Lock& lock, int refs_delta, const char* file = nullptr, int line = 0) { - m_holding_locks.fetch_add(holding ? 1 : -1, AK::MemoryOrder::memory_order_relaxed); + ASSERT(refs_delta != 0); + m_holding_locks.fetch_add(refs_delta, AK::MemoryOrder::memory_order_relaxed); ScopedSpinLock list_lock(m_holding_locks_lock); - if (holding) { + if (refs_delta > 0) { bool have_existing = false; for (size_t i = 0; i < m_holding_locks_list.size(); i++) { auto& info = m_holding_locks_list[i]; if (info.lock == &lock) { have_existing = true; - info.count++; + info.count += refs_delta; break; } } if (!have_existing) m_holding_locks_list.append({ &lock, file ? file : "unknown", line, 1 }); } else { + ASSERT(refs_delta < 0); bool found = false; for (size_t i = 0; i < m_holding_locks_list.size(); i++) { auto& info = m_holding_locks_list[i]; if (info.lock == &lock) { - ASSERT(info.count > 0); - if (--info.count == 0) + ASSERT(info.count >= (unsigned)-refs_delta); + info.count -= (unsigned)-refs_delta; + if (info.count == 0) m_holding_locks_list.remove(i); found = true; break; @@ -1162,9 +1176,8 @@ private: bool m_thread_did_exit { false }; }; - bool unlock_process_if_locked(); - void lock_process(); - void relock_process(bool did_unlock); + LockMode unlock_process_if_locked(u32&); + void relock_process(LockMode, u32); String backtrace_impl(); void reset_fpu_state(); @@ -1234,6 +1247,7 @@ private: Atomic m_have_any_unmasked_pending_signals { false }; void yield_without_holding_big_lock(); + void donate_without_holding_big_lock(RefPtr&, const char*); void yield_while_not_holding_big_lock(); void update_state_for_thread(Thread::State previous_state); }; diff --git a/Kernel/WaitQueue.cpp b/Kernel/WaitQueue.cpp index 47daebdf73b..d9afc20fbef 100644 --- a/Kernel/WaitQueue.cpp +++ b/Kernel/WaitQueue.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020, Andreas Kling + * Copyright (c) 2020, The SerenityOS developers. * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/Kernel/WaitQueue.h b/Kernel/WaitQueue.h index dd2b45a3130..d0273b6e3b6 100644 --- a/Kernel/WaitQueue.h +++ b/Kernel/WaitQueue.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020, Andreas Kling + * Copyright (c) 2020, The SerenityOS developers. * All rights reserved. * * Redistribution and use in source and binary forms, with or without