mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-01-04 09:14:21 +03:00
a6a439243f
This step would ideally not have been necessary (increases amount of refactoring and templates necessary, which in turn increases build times), but it gives us a couple of nice properties: - SpinlockProtected inside Singleton (a very common combination) can now obtain any lock rank just via the template parameter. It was not previously possible to do this with SingletonInstanceCreator magic. - SpinlockProtected's lock rank is now mandatory; this is the majority of cases and allows us to see where we're still missing proper ranks. - The type already informs us what lock rank a lock has, which aids code readability and (possibly, if gdb cooperates) lock mismatch debugging. - The rank of a lock can no longer be dynamic, which is not something we wanted in the first place (or made use of). Locks randomly changing their rank sounds like a disaster waiting to happen. - In some places, we might be able to statically check that locks are taken in the right order (with the right lock rank checking implementation) as rank information is fully statically known. This refactoring even more exposes the fact that Mutex has no lock rank capabilites, which is not fixed here.
158 lines
4.4 KiB
C++
158 lines
4.4 KiB
C++
/*
|
|
* Copyright (c) 2020, the SerenityOS developers.
|
|
*
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
|
*/
|
|
|
|
#pragma once
|
|
|
|
#include <AK/IntrusiveList.h>
|
|
#include <Kernel/Library/NonnullLockRefPtr.h>
|
|
#include <Kernel/Memory/ScopedAddressSpaceSwitcher.h>
|
|
#include <Kernel/Process.h>
|
|
#include <Kernel/Thread.h>
|
|
#include <Kernel/UserOrKernelBuffer.h>
|
|
#include <Kernel/WaitQueue.h>
|
|
|
|
namespace Kernel {
|
|
|
|
class Device;
|
|
|
|
extern WorkQueue* g_io_work;
|
|
|
|
class AsyncDeviceRequest : public AtomicRefCounted<AsyncDeviceRequest> {
|
|
AK_MAKE_NONCOPYABLE(AsyncDeviceRequest);
|
|
AK_MAKE_NONMOVABLE(AsyncDeviceRequest);
|
|
|
|
public:
|
|
enum [[nodiscard]] RequestResult {
|
|
Pending = 0,
|
|
Started,
|
|
Success,
|
|
Failure,
|
|
MemoryFault,
|
|
OutOfMemory,
|
|
Cancelled
|
|
};
|
|
|
|
class RequestWaitResult {
|
|
friend class AsyncDeviceRequest;
|
|
|
|
public:
|
|
RequestResult request_result() const { return m_request_result; }
|
|
Thread::BlockResult wait_result() const { return m_wait_result; }
|
|
|
|
private:
|
|
RequestWaitResult(RequestResult request_result, Thread::BlockResult wait_result)
|
|
: m_request_result(request_result)
|
|
, m_wait_result(wait_result)
|
|
{
|
|
}
|
|
|
|
RequestResult m_request_result;
|
|
Thread::BlockResult m_wait_result;
|
|
};
|
|
|
|
virtual ~AsyncDeviceRequest();
|
|
|
|
virtual StringView name() const = 0;
|
|
virtual void start() = 0;
|
|
|
|
void add_sub_request(NonnullLockRefPtr<AsyncDeviceRequest>);
|
|
|
|
[[nodiscard]] RequestWaitResult wait(Time* = nullptr);
|
|
|
|
void do_start(SpinlockLocker<Spinlock<LockRank::None>>&& requests_lock)
|
|
{
|
|
if (is_completed_result(m_result))
|
|
return;
|
|
m_result = Started;
|
|
requests_lock.unlock();
|
|
|
|
start();
|
|
}
|
|
|
|
void complete(RequestResult result);
|
|
|
|
void set_private(void* priv)
|
|
{
|
|
VERIFY(!m_private || !priv);
|
|
m_private = priv;
|
|
}
|
|
void* get_private() const { return m_private; }
|
|
|
|
template<typename... Args>
|
|
ErrorOr<void> write_to_buffer(UserOrKernelBuffer& buffer, Args... args)
|
|
{
|
|
if (in_target_context(buffer))
|
|
return buffer.write(forward<Args>(args)...);
|
|
ScopedAddressSpaceSwitcher switcher(m_process);
|
|
return buffer.write(forward<Args>(args)...);
|
|
}
|
|
|
|
template<size_t BUFFER_BYTES, typename... Args>
|
|
ErrorOr<size_t> write_to_buffer_buffered(UserOrKernelBuffer& buffer, Args... args)
|
|
{
|
|
if (in_target_context(buffer))
|
|
return buffer.write_buffered<BUFFER_BYTES>(forward<Args>(args)...);
|
|
ScopedAddressSpaceSwitcher switcher(m_process);
|
|
return buffer.write_buffered<BUFFER_BYTES>(forward<Args>(args)...);
|
|
}
|
|
|
|
template<typename... Args>
|
|
ErrorOr<void> read_from_buffer(UserOrKernelBuffer const& buffer, Args... args)
|
|
{
|
|
if (in_target_context(buffer))
|
|
return buffer.read(forward<Args>(args)...);
|
|
ScopedAddressSpaceSwitcher switcher(m_process);
|
|
return buffer.read(forward<Args>(args)...);
|
|
}
|
|
|
|
template<size_t BUFFER_BYTES, typename... Args>
|
|
ErrorOr<size_t> read_from_buffer_buffered(UserOrKernelBuffer const& buffer, Args... args)
|
|
{
|
|
if (in_target_context(buffer))
|
|
return buffer.read_buffered<BUFFER_BYTES>(forward<Args>(args)...);
|
|
ScopedAddressSpaceSwitcher switcher(m_process);
|
|
return buffer.read_buffered<BUFFER_BYTES>(forward<Args>(args)...);
|
|
}
|
|
|
|
protected:
|
|
AsyncDeviceRequest(Device&);
|
|
|
|
RequestResult get_request_result() const;
|
|
|
|
private:
|
|
void sub_request_finished(AsyncDeviceRequest&);
|
|
void request_finished();
|
|
|
|
[[nodiscard]] bool in_target_context(UserOrKernelBuffer const& buffer) const
|
|
{
|
|
if (buffer.is_kernel_buffer())
|
|
return true;
|
|
return m_process == &Process::current();
|
|
}
|
|
|
|
[[nodiscard]] static bool is_completed_result(RequestResult result)
|
|
{
|
|
return result > Started;
|
|
}
|
|
|
|
Device& m_device;
|
|
|
|
AsyncDeviceRequest* m_parent_request { nullptr };
|
|
RequestResult m_result { Pending };
|
|
IntrusiveListNode<AsyncDeviceRequest, LockRefPtr<AsyncDeviceRequest>> m_list_node;
|
|
|
|
using AsyncDeviceSubRequestList = IntrusiveList<&AsyncDeviceRequest::m_list_node>;
|
|
|
|
AsyncDeviceSubRequestList m_sub_requests_pending;
|
|
AsyncDeviceSubRequestList m_sub_requests_complete;
|
|
WaitQueue m_queue;
|
|
NonnullLockRefPtr<Process> m_process;
|
|
void* m_private { nullptr };
|
|
mutable Spinlock<LockRank::None> m_lock {};
|
|
};
|
|
|
|
}
|