Kernel: Move Spinlock functions back to arch independent Locking folder

Now that the Spinlock code is not dependent on architectural specific
code anymore, we can move it back to the Locking folder. This also means
that the Spinlock implemenation is now used for the aarch64 kernel.
This commit is contained in:
Timon Kruiper 2022-08-23 22:14:07 +02:00 committed by Andreas Kling
parent c9118de5a6
commit 026f37b031
Notes: sideshowbarker 2024-07-17 07:43:14 +09:00
6 changed files with 75 additions and 116 deletions

View File

@ -1,80 +0,0 @@
/*
* Copyright (c) 2020-2022, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Arch/Processor.h>
#include <Kernel/Locking/LockRank.h>
namespace Kernel {
class Spinlock {
AK_MAKE_NONCOPYABLE(Spinlock);
AK_MAKE_NONMOVABLE(Spinlock);
public:
Spinlock(LockRank rank)
: m_rank(rank)
{
}
InterruptsState lock();
void unlock(InterruptsState);
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
// FIXME: Implement Spinlock on aarch64
#if ARCH(AARCH64)
return true;
#endif
return m_lock.load(AK::memory_order_relaxed) != 0;
}
ALWAYS_INLINE void initialize()
{
m_lock.store(0, AK::memory_order_relaxed);
}
private:
Atomic<u8> m_lock { 0 };
const LockRank m_rank;
};
class RecursiveSpinlock {
AK_MAKE_NONCOPYABLE(RecursiveSpinlock);
AK_MAKE_NONMOVABLE(RecursiveSpinlock);
public:
RecursiveSpinlock(LockRank rank)
: m_rank(rank)
{
}
InterruptsState lock();
void unlock(InterruptsState);
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return m_lock.load(AK::memory_order_relaxed) != 0;
}
[[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const
{
return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current());
}
ALWAYS_INLINE void initialize()
{
m_lock.store(0, AK::memory_order_relaxed);
}
private:
Atomic<FlatPtr> m_lock { 0 };
u32 m_recursions { 0 };
const LockRank m_rank;
};
}

View File

@ -50,6 +50,14 @@ void Mutex::unlock()
}
// LockRank
namespace Kernel {
void track_lock_acquire(LockRank) { }
void track_lock_release(LockRank) { }
}
// Inode
namespace Kernel {

View File

@ -1,32 +0,0 @@
/*
* Copyright (c) 2022, Timon Kruiper <timonkruiper@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/Spinlock.h>
// FIXME: Actually implement the correct logic once the aarch64 build can
// do interrupts and/or has support for multiple processors.
namespace Kernel {
InterruptsState Spinlock::lock()
{
return InterruptsState::Disabled;
}
void Spinlock::unlock(InterruptsState)
{
}
InterruptsState RecursiveSpinlock::lock()
{
return InterruptsState::Disabled;
}
void RecursiveSpinlock::unlock(InterruptsState)
{
}
}

View File

@ -202,6 +202,7 @@ set(KERNEL_SOURCES
MiniStdLib.cpp
Locking/LockRank.cpp
Locking/Mutex.cpp
Locking/Spinlock.cpp
Net/Intel/E1000ENetworkAdapter.cpp
Net/Intel/E1000NetworkAdapter.cpp
Net/NE2000/NetworkAdapter.cpp
@ -333,7 +334,6 @@ if ("${SERENITY_ARCH}" STREQUAL "i686" OR "${SERENITY_ARCH}" STREQUAL "x86_64")
Arch/x86/common/ScopedCritical.cpp
Arch/x86/common/SmapDisabler.cpp
Arch/x86/common/Spinlock.cpp
)
set(KERNEL_SOURCES
@ -464,7 +464,6 @@ else()
Arch/aarch64/SafeMem.cpp
Arch/aarch64/ScopedCritical.cpp
Arch/aarch64/SmapDisabler.cpp
Arch/aarch64/Spinlock.cpp
Arch/aarch64/vector_table.S
# Files from base Kernel
@ -475,6 +474,8 @@ else()
Graphics/Console/BootFramebufferConsole.cpp
Graphics/Console/GenericFramebufferConsole.cpp
Locking/Spinlock.cpp
Memory/AddressSpace.cpp
Memory/AnonymousVMObject.cpp
Memory/InodeVMObject.cpp

View File

@ -4,7 +4,7 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/Spinlock.h>
#include <Kernel/Locking/Spinlock.h>
namespace Kernel {

View File

@ -8,11 +8,73 @@
#include <AK/Atomic.h>
#include <AK/Types.h>
#include <Kernel/Arch/Spinlock.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Locking/LockRank.h>
namespace Kernel {
class Spinlock {
AK_MAKE_NONCOPYABLE(Spinlock);
AK_MAKE_NONMOVABLE(Spinlock);
public:
Spinlock(LockRank rank)
: m_rank(rank)
{
}
InterruptsState lock();
void unlock(InterruptsState);
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return m_lock.load(AK::memory_order_relaxed) != 0;
}
ALWAYS_INLINE void initialize()
{
m_lock.store(0, AK::memory_order_relaxed);
}
private:
Atomic<u8> m_lock { 0 };
const LockRank m_rank;
};
class RecursiveSpinlock {
AK_MAKE_NONCOPYABLE(RecursiveSpinlock);
AK_MAKE_NONMOVABLE(RecursiveSpinlock);
public:
RecursiveSpinlock(LockRank rank)
: m_rank(rank)
{
}
InterruptsState lock();
void unlock(InterruptsState);
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return m_lock.load(AK::memory_order_relaxed) != 0;
}
[[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const
{
return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current());
}
ALWAYS_INLINE void initialize()
{
m_lock.store(0, AK::memory_order_relaxed);
}
private:
Atomic<FlatPtr> m_lock { 0 };
u32 m_recursions { 0 };
const LockRank m_rank;
};
template<typename LockType>
class [[nodiscard]] SpinlockLocker {
AK_MAKE_NONCOPYABLE(SpinlockLocker);