diff --git a/CMakeLists.txt b/CMakeLists.txt index c2df09f98d6..5468cdc687c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -62,7 +62,7 @@ if (ALL_THE_DEBUG_MACROS) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DICMP_DEBUG -DICO_DEBUG -DImage_DEBUG -DIMAGE_DECODER_CLIENT_DEBUG -DIMAGE_DECODER_DEBUG -DIMAGE_LOADER_DEBUG -DINTERPRETER_DEBUG -DINTERRUPT_DEBUG -DIOAPIC_DEBUG -DIPC_DEBUG -DIPV4_DEBUG -DIPV4_SOCKET_DEBUG -DIRC_DEBUG -DIRQ_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DJOB_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DKEYBOARD_DEBUG -DKEYBOARD_SHORTCUTS_DEBUG -DKMALLOC_DEBUG_LARGE_ALLOCATIONS") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DLEXER_DEBUG -DLoader_DEBUG") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DLEXER_DEBUG -DLoader_DEBUG -DLOCK_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMALLOC_DEBUG -DMASTERPTY_DEBUG -DMBR_DEBUG -DMEMORY_DEBUG -DMENU_DEBUG -DMINIMIZE_ANIMATION_DEBUG -DMM_DEBUG -DMOVE_DEBUG -DMULTIPROCESSOR_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DNETWORK_TASK_DEBUG -DNT_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DOBJECT_DEBUG -DOCCLUSIONS_DEBUG -DOFFD_DEBUG") diff --git a/Kernel/Arch/i386/CPU.h b/Kernel/Arch/i386/CPU.h index 9ae0f3941b3..9f6a6b9d1c5 100644 --- a/Kernel/Arch/i386/CPU.h +++ b/Kernel/Arch/i386/CPU.h @@ -1052,14 +1052,6 @@ public: return *this; } - void set_interrupt_flag_on_destruction(bool flag) - { - if (flag) - m_prev_flags |= 0x200; - else - m_prev_flags &= ~0x200; - } - void leave() { ASSERT(m_valid); diff --git a/Kernel/Forward.h b/Kernel/Forward.h index 8711757201e..8dc4241e0d9 100644 --- a/Kernel/Forward.h +++ b/Kernel/Forward.h @@ -44,6 +44,7 @@ class InodeWatcher; class KBuffer; class KResult; class LocalSocket; +class Lock; class MappedROM; class MasterPTY; class PageDirectory; diff --git a/Kernel/Lock.cpp b/Kernel/Lock.cpp index 870fe9b86d5..e7c523d5718 100644 --- a/Kernel/Lock.cpp +++ b/Kernel/Lock.cpp @@ -27,78 +27,87 @@ #include #include #include -#include namespace Kernel { -static bool modes_conflict(Lock::Mode mode1, Lock::Mode mode2) -{ - if (mode1 == Lock::Mode::Unlocked || mode2 == Lock::Mode::Unlocked) - return false; - - if (mode1 == Lock::Mode::Shared && mode2 == Lock::Mode::Shared) - return false; - - return true; -} - +#ifdef LOCK_DEBUG void Lock::lock(Mode mode) { + lock("unknown", 0, mode); +} + +void Lock::lock(const char* file, int line, Mode mode) +#else +void Lock::lock(Mode mode) +#endif +{ + // NOTE: This may be called from an interrupt handler (not an IRQ handler) + // and also from within critical sections! + ASSERT(!Processor::current().in_irq()); ASSERT(mode != Mode::Unlocked); - if (!are_interrupts_enabled()) { - klog() << "Interrupts disabled when trying to take Lock{" << m_name << "}"; - dump_backtrace(); - Processor::halt(); - } auto current_thread = Thread::current(); + ScopedCritical critical; for (;;) { if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) { do { // FIXME: Do not add new readers if writers are queued. - bool modes_dont_conflict = !modes_conflict(m_mode, mode); - bool already_hold_exclusive_lock = m_mode == Mode::Exclusive && m_holder == current_thread; - if (modes_dont_conflict || already_hold_exclusive_lock) { + bool can_lock; + switch (m_mode) { + case Lock::Mode::Unlocked: + can_lock = true; + break; + case Lock::Mode::Shared: + can_lock = (mode == Lock::Mode::Shared); + break; + case Lock::Mode::Exclusive: + can_lock = (m_holder == current_thread); + break; + } + if (can_lock) { // We got the lock! - if (!already_hold_exclusive_lock) + if (m_mode == Lock::Mode::Unlocked) { m_mode = mode; - m_holder = current_thread; + ASSERT(m_times_locked == 0); + if (mode == Mode::Exclusive) + m_holder = current_thread; + } +#ifdef LOCK_DEBUG + current_thread->holding_lock(*this, true, file, line); +#endif m_times_locked++; m_lock.store(false, AK::memory_order_release); return; } } while (current_thread->wait_on(m_queue, m_name, nullptr, &m_lock, m_holder) == Thread::BlockResult::NotBlocked); - } else if (Processor::current().in_critical()) { - // If we're in a critical section and trying to lock, no context - // switch will happen, so yield. - // The assumption is that if we call this from a critical section - // that we DO want to temporarily leave it - u32 prev_flags; - u32 prev_crit = Processor::current().clear_critical(prev_flags, !Processor::current().in_irq()); - - Scheduler::yield(); - - // Note, we may now be on a different CPU! - Processor::current().restore_critical(prev_crit, prev_flags); } else { - // We need to process e.g. smp messages - Processor::wait_check(); + // I don't know *who* is using "m_lock", so just yield. + Scheduler::yield_from_critical(); } } } void Lock::unlock() { + // NOTE: This may be called from an interrupt handler (not an IRQ handler) + // and also from within critical sections! + ASSERT(!Processor::current().in_irq()); auto current_thread = Thread::current(); + ScopedCritical critical; for (;;) { if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) { ASSERT(m_times_locked); --m_times_locked; ASSERT(m_mode != Mode::Unlocked); - if (m_mode == Mode::Exclusive) + + if (m_mode == Mode::Exclusive) { ASSERT(m_holder == current_thread); - if (m_holder == current_thread && (m_mode == Mode::Shared || m_times_locked == 0)) - m_holder = nullptr; + if (m_times_locked == 0) + m_holder = nullptr; + } +#ifdef LOCK_DEBUG + current_thread->holding_lock(*this, false); +#endif if (m_times_locked > 0) { m_lock.store(false, AK::memory_order_release); @@ -109,29 +118,36 @@ void Lock::unlock() return; } // I don't know *who* is using "m_lock", so just yield. - // The assumption is that if we call this from a critical section - // that we DO want to temporarily leave it - u32 prev_flags; - u32 prev_crit = Processor::current().clear_critical(prev_flags, false); - - Scheduler::yield(); - - // Note, we may now be on a different CPU! - Processor::current().restore_critical(prev_crit, prev_flags); + Scheduler::yield_from_critical(); } } bool Lock::force_unlock_if_locked() { - ASSERT(m_mode != Mode::Shared); + // NOTE: This may be called from an interrupt handler (not an IRQ handler) + // and also from within critical sections! + ASSERT(!Processor::current().in_irq()); ScopedCritical critical; - if (m_holder != Thread::current()) - return false; - ASSERT(m_times_locked == 1); - m_holder = nullptr; - m_mode = Mode::Unlocked; - m_times_locked = 0; - m_queue.wake_one(); + for (;;) { + if (m_lock.exchange(true, AK::memory_order_acq_rel) == false) { + if (m_holder != Thread::current()) { + m_lock.store(false, AK::MemoryOrder::memory_order_release); + return false; + } + ASSERT(m_mode != Mode::Shared); + ASSERT(m_times_locked == 1); +#ifdef LOCK_DEBUG + m_holder->holding_lock(*this, false); +#endif + m_holder = nullptr; + m_mode = Mode::Unlocked; + m_times_locked = 0; + m_queue.wake_one(&m_lock); + break; + } + // I don't know *who* is using "m_lock", so just yield. + Scheduler::yield_from_critical(); + } return true; } diff --git a/Kernel/Lock.h b/Kernel/Lock.h index 5d27ff936fc..81971551beb 100644 --- a/Kernel/Lock.h +++ b/Kernel/Lock.h @@ -31,6 +31,7 @@ #include #include #include +#include #include namespace Kernel { @@ -50,6 +51,9 @@ public: }; void lock(Mode = Mode::Exclusive); +#ifdef LOCK_DEBUG + void lock(const char* file, int line, Mode mode = Mode::Exclusive); +#endif void unlock(); bool force_unlock_if_locked(); bool is_locked() const { return m_holder; } @@ -77,6 +81,13 @@ private: class Locker { public: +#ifdef LOCK_DEBUG + ALWAYS_INLINE explicit Locker(const char* file, int line, Lock& l, Lock::Mode mode = Lock::Mode::Exclusive) + : m_lock(l) + { + m_lock.lock(file, line, mode); + } +#endif ALWAYS_INLINE explicit Locker(Lock& l, Lock::Mode mode = Lock::Mode::Exclusive) : m_lock(l) { @@ -90,7 +101,11 @@ private: Lock& m_lock; }; -#define LOCKER(...) Locker locker(__VA_ARGS__) +#ifdef LOCK_DEBUG +# define LOCKER(...) Locker locker(__FILE__, __LINE__, __VA_ARGS__) +#else +# define LOCKER(...) Locker locker(__VA_ARGS__) +#endif template class Lockable { diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index 20ef1e6810c..759732b8736 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -511,6 +511,21 @@ void Scheduler::invoke_async() pick_next(); } +void Scheduler::yield_from_critical() +{ + auto& proc = Processor::current(); + ASSERT(proc.in_critical()); + ASSERT(!proc.in_irq()); + + yield(); // Flag a context switch + + u32 prev_flags; + u32 prev_crit = Processor::current().clear_critical(prev_flags, false); + + // Note, we may now be on a different CPU! + Processor::current().restore_critical(prev_crit, prev_flags); +} + void Scheduler::notify_finalizer() { if (g_finalizer_has_work.exchange(true, AK::MemoryOrder::memory_order_acq_rel) == false) diff --git a/Kernel/Scheduler.h b/Kernel/Scheduler.h index c26fe3ac7e6..55a05ddc6c4 100644 --- a/Kernel/Scheduler.h +++ b/Kernel/Scheduler.h @@ -57,6 +57,7 @@ public: [[noreturn]] static void start(); static bool pick_next(); static bool yield(); + static void yield_from_critical(); static bool donate_to_and_switch(Thread*, const char* reason); static bool donate_to(RefPtr&, const char* reason); static bool context_switch(Thread*); diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index e5e2db25e63..512bb4c71ea 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -316,7 +316,16 @@ void Thread::finalize() ASSERT(Thread::current() == g_finalizer); ASSERT(Thread::current() != this); +#ifdef LOCK_DEBUG ASSERT(!m_lock.own_lock()); + if (lock_count() > 0) { + dbg() << "Thread " << *this << " leaking " << lock_count() << " Locks!"; + ScopedSpinLock list_lock(m_holding_locks_lock); + for (auto& info : m_holding_locks_list) + dbg() << " - " << info.lock->name() << " @ " << info.lock << " locked at " << info.file << ":" << info.line << " count: " << info.count; + ASSERT_NOT_REACHED(); + } +#endif { ScopedSpinLock lock(g_scheduler_lock); @@ -1080,10 +1089,9 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, const } }); if (!timer) { + if (lock) + *lock = false; // We timed out already, don't block - // The API contract guarantees we return with interrupts enabled, - // regardless of how we got called - critical.set_interrupt_flag_on_destruction(true); return BlockResult::InterruptedByTimeout; } } @@ -1094,16 +1102,13 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, const // The WaitQueue was already requested to wake someone when // nobody was waiting. So return right away as we shouldn't // be waiting - - // The API contract guarantees we return with interrupts enabled, - // regardless of how we got called - critical.set_interrupt_flag_on_destruction(true); + // NOTE: Do not set lock to false in this case! return BlockResult::NotBlocked; } - did_unlock = unlock_process_if_locked(); if (lock) *lock = false; + did_unlock = unlock_process_if_locked(); set_state(State::Queued); m_wait_reason = reason; @@ -1158,9 +1163,6 @@ Thread::BlockResult Thread::wait_on(WaitQueue& queue, const char* reason, const TimerQueue::the().cancel_timer(timer.release_nonnull()); } - // The API contract guarantees we return with interrupts enabled, - // regardless of how we got called - sti(); return result; } diff --git a/Kernel/Thread.h b/Kernel/Thread.h index bb40634f182..6dea7b95ec3 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -45,6 +45,8 @@ #include #include +//#define LOCK_DEBUG + namespace Kernel { enum class DispatchSignalResult { @@ -961,6 +963,44 @@ public: RecursiveSpinLock& get_lock() const { return m_lock; } +#ifdef LOCK_DEBUG + void holding_lock(Lock& lock, bool holding, const char* file = nullptr, int line = 0) + { + m_holding_locks.fetch_add(holding ? 1 : -1, AK::MemoryOrder::memory_order_relaxed); + ScopedSpinLock list_lock(m_holding_locks_lock); + if (holding) { + bool have_existing = false; + for (size_t i = 0; i < m_holding_locks_list.size(); i++) { + auto& info = m_holding_locks_list[i]; + if (info.lock == &lock) { + have_existing = true; + info.count++; + break; + } + } + if (!have_existing) + m_holding_locks_list.append({ &lock, file ? file : "unknown", line, 1 }); + } else { + bool found = false; + for (size_t i = 0; i < m_holding_locks_list.size(); i++) { + auto& info = m_holding_locks_list[i]; + if (info.lock == &lock) { + ASSERT(info.count > 0); + if (--info.count == 0) + m_holding_locks_list.remove(i); + found = true; + break; + } + } + ASSERT(found); + } + } + u32 lock_count() const + { + return m_holding_locks.load(AK::MemoryOrder::memory_order_relaxed); + } +#endif + private: IntrusiveListNode m_runnable_list_node; IntrusiveListNode m_wait_queue_node; @@ -1004,9 +1044,11 @@ private: auto& blocker = static_cast(b); // NOTE: m_lock is held already! - if (m_thread_did_exit) + if (m_thread_did_exit) { blocker.unblock(exit_value(), true); - return m_thread_did_exit; + return false; + } + return true; } private: @@ -1050,6 +1092,18 @@ private: const char* m_wait_reason { nullptr }; WaitQueue* m_queue { nullptr }; +#ifdef LOCK_DEBUG + struct HoldingLockInfo { + Lock* lock; + const char* file; + int line; + unsigned count; + }; + Atomic m_holding_locks { 0 }; + SpinLock m_holding_locks_lock; + Vector m_holding_locks_list; +#endif + JoinBlockCondition m_join_condition; Atomic m_is_active { false }; bool m_is_joinable { true }; diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index 9552264eb17..4182dee77cc 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -474,10 +474,9 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) ASSERT_INTERRUPTS_DISABLED(); ASSERT(vmobject().is_inode()); - sti(); LOCKER(vmobject().m_paging_lock); - cli(); + ASSERT_INTERRUPTS_DISABLED(); auto& inode_vmobject = static_cast(vmobject()); auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[first_page_index() + page_index_in_region]; @@ -501,7 +500,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) #ifdef MM_DEBUG dbg() << "MM: page_in_from_inode ready to read from inode"; #endif - sti(); + u8 page_buffer[PAGE_SIZE]; auto& inode = inode_vmobject.inode(); auto buffer = UserOrKernelBuffer::for_kernel_buffer(page_buffer); @@ -514,7 +513,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) // If we read less than a page, zero out the rest to avoid leaking uninitialized data. memset(page_buffer + nread, 0, PAGE_SIZE - nread); } - cli(); + vmobject_physical_page_entry = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No); if (vmobject_physical_page_entry.is_null()) { klog() << "MM: handle_inode_fault was unable to allocate a physical page";