ladybird/Kernel/FutexQueue.cpp
Andreas Kling 5d180d1f99 Everywhere: Rename ASSERT => VERIFY
(...and ASSERT_NOT_REACHED => VERIFY_NOT_REACHED)

Since all of these checks are done in release builds as well,
let's rename them to VERIFY to prevent confusion, as everyone is
used to assertions being compiled out in release.

We can introduce a new ASSERT macro that is specifically for debug
checks, but I'm doing this wholesale conversion first since we've
accumulated thousands of these already, and it's not immediately
obvious which ones are suitable for ASSERT.
2021-02-23 20:56:54 +01:00

148 lines
6.2 KiB
C++

/*
* Copyright (c) 2020, The SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <Kernel/Debug.h>
#include <Kernel/FutexQueue.h>
#include <Kernel/Thread.h>
namespace Kernel {
bool FutexQueue::should_add_blocker(Thread::Blocker& b, void* data)
{
VERIFY(data != nullptr); // Thread that is requesting to be blocked
VERIFY(m_lock.is_locked());
VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: should block thread {}", this, *static_cast<Thread*>(data));
return true;
}
u32 FutexQueue::wake_n_requeue(u32 wake_count, const Function<FutexQueue*()>& get_target_queue, u32 requeue_count, bool& is_empty, bool& is_empty_target)
{
is_empty_target = false;
ScopedSpinLock lock(m_lock);
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue({}, {})", this, wake_count, requeue_count);
u32 did_wake = 0, did_requeue = 0;
do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) {
VERIFY(data);
VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);
auto& blocker = static_cast<Thread::FutexBlocker&>(b);
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue unblocking {}", this, *static_cast<Thread*>(data));
VERIFY(did_wake < wake_count);
if (blocker.unblock()) {
if (++did_wake >= wake_count)
stop_iterating = true;
return true;
}
return false;
});
is_empty = is_empty_locked();
if (requeue_count > 0) {
auto blockers_to_requeue = do_take_blockers(requeue_count);
if (!blockers_to_requeue.is_empty()) {
if (auto* target_futex_queue = get_target_queue()) {
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue requeueing {} blockers to {}", this, blockers_to_requeue.size(), target_futex_queue);
// While still holding m_lock, notify each blocker
for (auto& info : blockers_to_requeue) {
VERIFY(info.blocker->blocker_type() == Thread::Blocker::Type::Futex);
auto& blocker = *static_cast<Thread::FutexBlocker*>(info.blocker);
blocker.begin_requeue();
}
lock.unlock();
did_requeue = blockers_to_requeue.size();
ScopedSpinLock target_lock(target_futex_queue->m_lock);
// Now that we have the lock of the target, append the blockers
// and notify them that they completed the move
for (auto& info : blockers_to_requeue) {
VERIFY(info.blocker->blocker_type() == Thread::Blocker::Type::Futex);
auto& blocker = *static_cast<Thread::FutexBlocker*>(info.blocker);
blocker.finish_requeue(*target_futex_queue);
}
target_futex_queue->do_append_blockers(move(blockers_to_requeue));
is_empty_target = target_futex_queue->is_empty_locked();
} else {
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue could not get target queue to requeue {} blockers", this, blockers_to_requeue.size());
do_append_blockers(move(blockers_to_requeue));
}
}
}
return did_wake + did_requeue;
}
u32 FutexQueue::wake_n(u32 wake_count, const Optional<u32>& bitset, bool& is_empty)
{
if (wake_count == 0)
return 0; // should we assert instead?
ScopedSpinLock lock(m_lock);
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n({})", this, wake_count);
u32 did_wake = 0;
do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) {
VERIFY(data);
VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);
auto& blocker = static_cast<Thread::FutexBlocker&>(b);
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n unblocking {}", this, *static_cast<Thread*>(data));
VERIFY(did_wake < wake_count);
if (bitset.has_value() ? blocker.unblock_bitset(bitset.value()) : blocker.unblock()) {
if (++did_wake >= wake_count)
stop_iterating = true;
return true;
}
return false;
});
is_empty = is_empty_locked();
return did_wake;
}
u32 FutexQueue::wake_all(bool& is_empty)
{
ScopedSpinLock lock(m_lock);
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_all", this);
u32 did_wake = 0;
do_unblock([&](Thread::Blocker& b, void* data, bool&) {
VERIFY(data);
VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);
auto& blocker = static_cast<Thread::FutexBlocker&>(b);
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_all unblocking {}", this, *static_cast<Thread*>(data));
if (blocker.unblock(true)) {
did_wake++;
return true;
}
return false;
});
is_empty = is_empty_locked();
return did_wake;
}
}