Kernel: Rename Locker => MutexLocker

This commit is contained in:
Andreas Kling 2021-07-18 01:13:34 +02:00
parent ab50a1480f
commit 9457d83986
Notes: sideshowbarker 2024-07-18 08:50:48 +09:00
40 changed files with 230 additions and 230 deletions

View File

@ -45,7 +45,7 @@ KResultOr<size_t> DoubleBuffer::write(const UserOrKernelBuffer& data, size_t siz
{
if (!size || m_storage.is_null())
return 0;
Locker locker(m_lock);
MutexLocker locker(m_lock);
size_t bytes_to_write = min(size, m_space_for_writing);
u8* write_ptr = m_write_buffer->data + m_write_buffer->size;
if (!data.read(write_ptr, bytes_to_write))
@ -61,7 +61,7 @@ KResultOr<size_t> DoubleBuffer::read(UserOrKernelBuffer& data, size_t size)
{
if (!size || m_storage.is_null())
return 0;
Locker locker(m_lock);
MutexLocker locker(m_lock);
if (m_read_buffer_index >= m_read_buffer->size && m_write_buffer->size != 0)
flip();
if (m_read_buffer_index >= m_read_buffer->size)
@ -80,7 +80,7 @@ KResultOr<size_t> DoubleBuffer::peek(UserOrKernelBuffer& data, size_t size)
{
if (!size || m_storage.is_null())
return 0;
Locker locker(m_lock);
MutexLocker locker(m_lock);
if (m_read_buffer_index >= m_read_buffer->size && m_write_buffer->size != 0) {
flip();
}

View File

@ -120,7 +120,7 @@ KResult BlockBasedFileSystem::write_block(BlockIndex index, const UserOrKernelBu
VERIFY(offset + count <= block_size());
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::write_block {}, size={}", index, count);
Locker locker(m_cache_lock);
MutexLocker locker(m_cache_lock);
if (!allow_cache) {
flush_specific_block_if_needed(index);
@ -205,7 +205,7 @@ KResult BlockBasedFileSystem::read_block(BlockIndex index, UserOrKernelBuffer* b
VERIFY(offset + count <= block_size());
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::read_block {}", index);
Locker locker(m_cache_lock);
MutexLocker locker(m_cache_lock);
if (!allow_cache) {
const_cast<BlockBasedFileSystem*>(this)->flush_specific_block_if_needed(index);
@ -252,7 +252,7 @@ KResult BlockBasedFileSystem::read_blocks(BlockIndex index, unsigned count, User
void BlockBasedFileSystem::flush_specific_block_if_needed(BlockIndex index)
{
Locker locker(m_cache_lock);
MutexLocker locker(m_cache_lock);
if (!cache().is_dirty())
return;
Vector<CacheEntry*, 32> cleaned_entries;
@ -272,7 +272,7 @@ void BlockBasedFileSystem::flush_specific_block_if_needed(BlockIndex index)
void BlockBasedFileSystem::flush_writes_impl()
{
Locker locker(m_cache_lock);
MutexLocker locker(m_cache_lock);
if (!cache().is_dirty())
return;
u32 count = 0;

View File

@ -19,7 +19,7 @@ NonnullRefPtr<DevFS> DevFS::create()
DevFS::DevFS()
: m_root_inode(adopt_ref(*new DevFSRootDirectoryInode(*this)))
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
Device::for_each([&](Device& device) {
// FIXME: Find a better way to not add MasterPTYs or SlavePTYs!
if (device.is_master_pty() || (device.is_character_device() && device.major() == 201))
@ -33,7 +33,7 @@ void DevFS::notify_new_device(Device& device)
auto name = KString::try_create(device.device_name());
VERIFY(name);
Locker locker(m_lock);
MutexLocker locker(m_lock);
auto new_device_inode = adopt_ref(*new DevFSDeviceInode(*this, device, name.release_nonnull()));
m_nodes.append(new_device_inode);
m_root_inode->m_devices.append(new_device_inode);
@ -41,7 +41,7 @@ void DevFS::notify_new_device(Device& device)
size_t DevFS::allocate_inode_index()
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
m_next_inode_index = m_next_inode_index.value() + 1;
VERIFY(m_next_inode_index > 0);
return 1 + m_next_inode_index.value();
@ -68,7 +68,7 @@ NonnullRefPtr<Inode> DevFS::root_inode() const
RefPtr<Inode> DevFS::get_inode(InodeIdentifier inode_id) const
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
if (inode_id.index() == 1)
return m_root_inode;
for (auto& node : m_nodes) {
@ -154,7 +154,7 @@ DevFSLinkInode::DevFSLinkInode(DevFS& fs, NonnullOwnPtr<KString> name)
KResultOr<size_t> DevFSLinkInode::read_bytes(off_t offset, size_t, UserOrKernelBuffer& buffer, FileDescription*) const
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
VERIFY(offset == 0);
VERIFY(m_link);
if (!buffer.write(m_link->characters() + offset, m_link->length()))
@ -180,7 +180,7 @@ KResultOr<size_t> DevFSLinkInode::write_bytes(off_t offset, size_t count, UserOr
if (kstring_or_error.is_error())
return kstring_or_error.error();
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
VERIFY(offset == 0);
VERIFY(buffer.is_kernel_buffer());
m_link = kstring_or_error.release_value();
@ -223,7 +223,7 @@ DevFSRootDirectoryInode::DevFSRootDirectoryInode(DevFS& fs)
}
KResult DevFSRootDirectoryInode::traverse_as_directory(Function<bool(FileSystem::DirectoryEntryView const&)> callback) const
{
Locker locker(m_parent_fs.m_lock);
MutexLocker locker(m_parent_fs.m_lock);
callback({ ".", identifier(), 0 });
callback({ "..", identifier(), 0 });
@ -244,7 +244,7 @@ KResult DevFSRootDirectoryInode::traverse_as_directory(Function<bool(FileSystem:
}
RefPtr<Inode> DevFSRootDirectoryInode::lookup(StringView name)
{
Locker locker(m_parent_fs.m_lock);
MutexLocker locker(m_parent_fs.m_lock);
for (auto& subdirectory : m_subdirectories) {
if (subdirectory.name() == name)
return subdirectory;
@ -263,7 +263,7 @@ RefPtr<Inode> DevFSRootDirectoryInode::lookup(StringView name)
}
KResultOr<NonnullRefPtr<Inode>> DevFSRootDirectoryInode::create_child(StringView name, mode_t mode, dev_t, uid_t, gid_t)
{
Locker locker(m_parent_fs.m_lock);
MutexLocker locker(m_parent_fs.m_lock);
InodeMetadata metadata;
metadata.mode = mode;
@ -335,7 +335,7 @@ DevFSDeviceInode::~DevFSDeviceInode()
KResult DevFSDeviceInode::chown(uid_t uid, gid_t gid)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
m_uid = uid;
m_gid = gid;
return KSuccess;
@ -348,7 +348,7 @@ StringView DevFSDeviceInode::name() const
KResultOr<size_t> DevFSDeviceInode::read_bytes(off_t offset, size_t count, UserOrKernelBuffer& buffer, FileDescription* description) const
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
VERIFY(!!description);
if (!m_attached_device->can_read(*description, offset))
return 0;
@ -360,7 +360,7 @@ KResultOr<size_t> DevFSDeviceInode::read_bytes(off_t offset, size_t count, UserO
InodeMetadata DevFSDeviceInode::metadata() const
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
InodeMetadata metadata;
metadata.inode = { fsid(), index() };
metadata.mode = (m_attached_device->is_block_device() ? S_IFBLK : S_IFCHR) | m_attached_device->required_mode();
@ -374,7 +374,7 @@ InodeMetadata DevFSDeviceInode::metadata() const
}
KResultOr<size_t> DevFSDeviceInode::write_bytes(off_t offset, size_t count, const UserOrKernelBuffer& buffer, FileDescription* description)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
VERIFY(!!description);
if (!m_attached_device->can_write(*description, offset))
return 0;
@ -390,7 +390,7 @@ DevFSPtsDirectoryInode::DevFSPtsDirectoryInode(DevFS& fs)
}
KResult DevFSPtsDirectoryInode::traverse_as_directory(Function<bool(FileSystem::DirectoryEntryView const&)> callback) const
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
callback({ ".", identifier(), 0 });
callback({ "..", identifier(), 0 });
return KSuccess;

View File

@ -70,7 +70,7 @@ Ext2FS::~Ext2FS()
bool Ext2FS::flush_super_block()
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
VERIFY((sizeof(ext2_super_block) % logical_block_size()) == 0);
auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block);
bool success = raw_write_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer);
@ -88,7 +88,7 @@ const ext2_group_desc& Ext2FS::group_descriptor(GroupIndex group_index) const
bool Ext2FS::initialize()
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
VERIFY((sizeof(ext2_super_block) % logical_block_size()) == 0);
auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block);
bool success = raw_read_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer);
@ -384,7 +384,7 @@ KResult Ext2FSInode::shrink_triply_indirect_block(BlockBasedFileSystem::BlockInd
KResult Ext2FSInode::flush_block_list()
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (m_block_list.is_empty()) {
m_raw_inode.i_blocks = 0;
@ -639,7 +639,7 @@ Vector<Ext2FS::BlockIndex> Ext2FSInode::compute_block_list_impl_internal(const e
void Ext2FS::free_inode(Ext2FSInode& inode)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
VERIFY(inode.m_raw_inode.i_links_count == 0);
dbgln_if(EXT2_DEBUG, "Ext2FS[{}]::free_inode(): Inode {} has no more links, time to delete!", fsid(), inode.index());
@ -673,7 +673,7 @@ void Ext2FS::free_inode(Ext2FSInode& inode)
void Ext2FS::flush_block_group_descriptor_table()
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
auto blocks_to_write = ceil_div(m_block_group_count * sizeof(ext2_group_desc), block_size());
auto first_block_of_bgdt = block_size() == 1024 ? 2 : 1;
auto buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)block_group_descriptors());
@ -684,7 +684,7 @@ void Ext2FS::flush_block_group_descriptor_table()
void Ext2FS::flush_writes()
{
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
if (m_super_block_dirty) {
flush_super_block();
m_super_block_dirty = false;
@ -752,7 +752,7 @@ u64 Ext2FSInode::size() const
InodeMetadata Ext2FSInode::metadata() const
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
InodeMetadata metadata;
metadata.inode = identifier();
metadata.size = size();
@ -779,7 +779,7 @@ InodeMetadata Ext2FSInode::metadata() const
void Ext2FSInode::flush_metadata()
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]::flush_metadata(): Flushing inode", identifier());
fs().write_ext2_inode(index(), m_raw_inode);
if (is_directory()) {
@ -794,7 +794,7 @@ void Ext2FSInode::flush_metadata()
RefPtr<Inode> Ext2FS::get_inode(InodeIdentifier inode) const
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
VERIFY(inode.fsid() == fsid());
{
@ -829,7 +829,7 @@ RefPtr<Inode> Ext2FS::get_inode(InodeIdentifier inode) const
KResultOr<size_t> Ext2FSInode::read_bytes(off_t offset, size_t count, UserOrKernelBuffer& buffer, FileDescription* description) const
{
Locker inode_locker(m_inode_lock);
MutexLocker inode_locker(m_inode_lock);
VERIFY(offset >= 0);
if (m_raw_inode.i_size == 0)
return 0;
@ -979,7 +979,7 @@ KResultOr<size_t> Ext2FSInode::write_bytes(off_t offset, size_t count, const Use
if (count == 0)
return 0;
Locker inode_locker(m_inode_lock);
MutexLocker inode_locker(m_inode_lock);
if (auto result = prepare_to_write_data(); result.is_error())
return result;
@ -1106,7 +1106,7 @@ KResult Ext2FSInode::traverse_as_directory(Function<bool(FileSystem::DirectoryEn
KResult Ext2FSInode::write_directory(Vector<Ext2FSDirectoryEntry>& entries)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
auto block_size = fs().block_size();
// Calculate directory size and record length of entries so that
@ -1173,7 +1173,7 @@ KResultOr<NonnullRefPtr<Inode>> Ext2FSInode::create_child(StringView name, mode_
KResult Ext2FSInode::add_child(Inode& child, const StringView& name, mode_t mode)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
VERIFY(is_directory());
if (name.length() > EXT2_NAME_LEN)
@ -1219,7 +1219,7 @@ KResult Ext2FSInode::add_child(Inode& child, const StringView& name, mode_t mode
KResult Ext2FSInode::remove_child(const StringView& name)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]::remove_child(): Removing '{}'", identifier(), name);
VERIFY(is_directory());
@ -1288,7 +1288,7 @@ bool Ext2FS::write_ext2_inode(InodeIndex inode, const ext2_inode& e2inode)
auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) -> KResultOr<Vector<BlockIndex>>
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
dbgln_if(EXT2_DEBUG, "Ext2FS: allocate_blocks(preferred group: {}, count {})", preferred_group_index, count);
if (count == 0)
return Vector<BlockIndex> {};
@ -1353,7 +1353,7 @@ auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) ->
KResultOr<InodeIndex> Ext2FS::allocate_inode(GroupIndex preferred_group)
{
dbgln_if(EXT2_DEBUG, "Ext2FS: allocate_inode(preferred_group: {})", preferred_group);
Locker locker(m_lock);
MutexLocker locker(m_lock);
// FIXME: We shouldn't refuse to allocate an inode if there is no group that can house the whole thing.
// In those cases we should just spread it across multiple groups.
@ -1429,7 +1429,7 @@ auto Ext2FS::group_index_from_inode(InodeIndex inode) const -> GroupIndex
KResultOr<bool> Ext2FS::get_inode_allocation_state(InodeIndex index) const
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
if (index == 0)
return EINVAL;
auto group_index = group_index_from_inode(index);
@ -1472,7 +1472,7 @@ KResult Ext2FS::update_bitmap_block(BlockIndex bitmap_block, size_t bit_index, b
KResult Ext2FS::set_inode_allocation_state(InodeIndex inode_index, bool new_state)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
auto group_index = group_index_from_inode(inode_index);
unsigned index_in_group = inode_index.value() - ((group_index.value() - 1) * inodes_per_group());
unsigned bit_index = (index_in_group - 1) % inodes_per_group();
@ -1511,7 +1511,7 @@ KResultOr<Ext2FS::CachedBitmap*> Ext2FS::get_bitmap_block(BlockIndex bitmap_bloc
KResult Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_state)
{
VERIFY(block_index != 0);
Locker locker(m_lock);
MutexLocker locker(m_lock);
auto group_index = group_index_from_block_index(block_index);
unsigned index_in_group = (block_index.value() - first_block_index().value()) - ((group_index.value() - 1) * blocks_per_group());
@ -1524,7 +1524,7 @@ KResult Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_stat
KResult Ext2FS::create_directory(Ext2FSInode& parent_inode, const String& name, mode_t mode, uid_t uid, gid_t gid)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
VERIFY(is_directory(mode));
auto inode_or_error = create_inode(parent_inode, name, mode, 0, uid, gid);
@ -1599,7 +1599,7 @@ KResultOr<NonnullRefPtr<Inode>> Ext2FS::create_inode(Ext2FSInode& parent_inode,
KResult Ext2FSInode::populate_lookup_cache() const
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (!m_lookup_cache.is_empty())
return KSuccess;
HashMap<String, InodeIndex> children;
@ -1626,7 +1626,7 @@ RefPtr<Inode> Ext2FSInode::lookup(StringView name)
InodeIndex inode_index;
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
auto it = m_lookup_cache.find(name.hash(), [&](auto& entry) { return entry.key == name; });
if (it == m_lookup_cache.end()) {
dbgln_if(EXT2_DEBUG, "Ext2FSInode[{}]:lookup(): '{}' not found", identifier(), name);
@ -1644,7 +1644,7 @@ void Ext2FSInode::one_ref_left()
KResult Ext2FSInode::set_atime(time_t t)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (fs().is_readonly())
return EROFS;
m_raw_inode.i_atime = t;
@ -1654,7 +1654,7 @@ KResult Ext2FSInode::set_atime(time_t t)
KResult Ext2FSInode::set_ctime(time_t t)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (fs().is_readonly())
return EROFS;
m_raw_inode.i_ctime = t;
@ -1664,7 +1664,7 @@ KResult Ext2FSInode::set_ctime(time_t t)
KResult Ext2FSInode::set_mtime(time_t t)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (fs().is_readonly())
return EROFS;
m_raw_inode.i_mtime = t;
@ -1674,7 +1674,7 @@ KResult Ext2FSInode::set_mtime(time_t t)
KResult Ext2FSInode::increment_link_count()
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (fs().is_readonly())
return EROFS;
constexpr size_t max_link_count = 65535;
@ -1687,7 +1687,7 @@ KResult Ext2FSInode::increment_link_count()
KResult Ext2FSInode::decrement_link_count()
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (fs().is_readonly())
return EROFS;
VERIFY(m_raw_inode.i_links_count);
@ -1705,13 +1705,13 @@ KResult Ext2FSInode::decrement_link_count()
void Ext2FS::uncache_inode(InodeIndex index)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
m_inode_cache.remove(index);
}
KResult Ext2FSInode::chmod(mode_t mode)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (m_raw_inode.i_mode == mode)
return KSuccess;
m_raw_inode.i_mode = mode;
@ -1721,7 +1721,7 @@ KResult Ext2FSInode::chmod(mode_t mode)
KResult Ext2FSInode::chown(uid_t uid, gid_t gid)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (m_raw_inode.i_uid == uid && m_raw_inode.i_gid == gid)
return KSuccess;
m_raw_inode.i_uid = uid;
@ -1732,7 +1732,7 @@ KResult Ext2FSInode::chown(uid_t uid, gid_t gid)
KResult Ext2FSInode::truncate(u64 size)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (static_cast<u64>(m_raw_inode.i_size) == size)
return KSuccess;
if (auto result = resize(size); result.is_error())
@ -1743,7 +1743,7 @@ KResult Ext2FSInode::truncate(u64 size)
KResultOr<int> Ext2FSInode::get_block_address(int index)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (m_block_list.is_empty())
m_block_list = compute_block_list();
@ -1756,31 +1756,31 @@ KResultOr<int> Ext2FSInode::get_block_address(int index)
unsigned Ext2FS::total_block_count() const
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
return super_block().s_blocks_count;
}
unsigned Ext2FS::free_block_count() const
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
return super_block().s_free_blocks_count;
}
unsigned Ext2FS::total_inode_count() const
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
return super_block().s_inodes_count;
}
unsigned Ext2FS::free_inode_count() const
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
return super_block().s_free_inodes_count;
}
KResult Ext2FS::prepare_to_unmount() const
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
for (auto& it : m_inode_cache) {
if (it.value->ref_count() > 1)

View File

@ -41,7 +41,7 @@ KResultOr<NonnullRefPtr<FileDescription>> FIFO::open_direction(FIFO::Direction d
KResultOr<NonnullRefPtr<FileDescription>> FIFO::open_direction_blocking(FIFO::Direction direction)
{
Locker locker(m_open_lock);
MutexLocker locker(m_open_lock);
auto description = open_direction(direction);
if (description.is_error())
@ -73,7 +73,7 @@ KResultOr<NonnullRefPtr<FileDescription>> FIFO::open_direction_blocking(FIFO::Di
FIFO::FIFO(uid_t uid)
: m_uid(uid)
{
Locker locker(all_fifos().lock());
MutexLocker locker(all_fifos().lock());
all_fifos().resource().set(this);
m_fifo_id = ++s_next_fifo_id;
@ -85,7 +85,7 @@ FIFO::FIFO(uid_t uid)
FIFO::~FIFO()
{
Locker locker(all_fifos().lock());
MutexLocker locker(all_fifos().lock());
all_fifos().resource().remove(this);
}

View File

@ -109,7 +109,7 @@ Thread::FileBlocker::BlockFlags FileDescription::should_unblock(Thread::FileBloc
KResult FileDescription::stat(::stat& buffer)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
// FIXME: This is due to the Device class not overriding File::stat().
if (m_inode)
return m_inode->metadata().stat(buffer);
@ -118,7 +118,7 @@ KResult FileDescription::stat(::stat& buffer)
KResultOr<off_t> FileDescription::seek(off_t offset, int whence)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
if (!m_file->is_seekable())
return ESPIPE;
@ -173,7 +173,7 @@ KResultOr<size_t> FileDescription::write(u64 offset, UserOrKernelBuffer const& d
KResultOr<size_t> FileDescription::read(UserOrKernelBuffer& buffer, size_t count)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
if (Checked<off_t>::addition_would_overflow(m_current_offset, count))
return EOVERFLOW;
auto nread_or_error = m_file->read(*this, offset(), buffer, count);
@ -187,7 +187,7 @@ KResultOr<size_t> FileDescription::read(UserOrKernelBuffer& buffer, size_t count
KResultOr<size_t> FileDescription::write(const UserOrKernelBuffer& data, size_t size)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
if (Checked<off_t>::addition_would_overflow(m_current_offset, size))
return EOVERFLOW;
auto nwritten_or_error = m_file->write(*this, offset(), data, size);
@ -219,7 +219,7 @@ KResultOr<NonnullOwnPtr<KBuffer>> FileDescription::read_entire_file()
KResultOr<size_t> FileDescription::get_dir_entries(UserOrKernelBuffer& output_buffer, size_t size)
{
Locker locker(m_lock, Mutex::Mode::Shared);
MutexLocker locker(m_lock, Mutex::Mode::Shared);
if (!is_directory())
return ENOTDIR;
@ -379,13 +379,13 @@ InodeMetadata FileDescription::metadata() const
KResultOr<Region*> FileDescription::mmap(Process& process, const Range& range, u64 offset, int prot, bool shared)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
return m_file->mmap(process, *this, range, offset, prot, shared);
}
KResult FileDescription::truncate(u64 length)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
return m_file->truncate(length);
}
@ -422,7 +422,7 @@ const Socket* FileDescription::socket() const
void FileDescription::set_file_flags(u32 flags)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
m_is_blocking = !(flags & O_NONBLOCK);
m_should_append = flags & O_APPEND;
m_direct = flags & O_DIRECT;
@ -431,13 +431,13 @@ void FileDescription::set_file_flags(u32 flags)
KResult FileDescription::chmod(mode_t mode)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
return m_file->chmod(*this, mode);
}
KResult FileDescription::chown(uid_t uid, gid_t gid)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
return m_file->chown(*this, uid, gid);
}

View File

@ -109,7 +109,7 @@ Inode::~Inode()
void Inode::will_be_destroyed()
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (m_metadata_dirty)
flush_metadata();
}
@ -141,13 +141,13 @@ KResult Inode::decrement_link_count()
void Inode::set_shared_vmobject(SharedInodeVMObject& vmobject)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
m_shared_vmobject = vmobject;
}
bool Inode::bind_socket(LocalSocket& socket)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (m_socket)
return false;
m_socket = socket;
@ -156,7 +156,7 @@ bool Inode::bind_socket(LocalSocket& socket)
bool Inode::unbind_socket()
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (!m_socket)
return false;
m_socket = nullptr;
@ -165,21 +165,21 @@ bool Inode::unbind_socket()
void Inode::register_watcher(Badge<InodeWatcher>, InodeWatcher& watcher)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
VERIFY(!m_watchers.contains(&watcher));
m_watchers.set(&watcher);
}
void Inode::unregister_watcher(Badge<InodeWatcher>, InodeWatcher& watcher)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
VERIFY(m_watchers.contains(&watcher));
m_watchers.remove(&watcher);
}
NonnullRefPtr<FIFO> Inode::fifo()
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
VERIFY(metadata().is_fifo());
// FIXME: Release m_fifo when it is closed by all readers and writers
@ -192,7 +192,7 @@ NonnullRefPtr<FIFO> Inode::fifo()
void Inode::set_metadata_dirty(bool metadata_dirty)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (metadata_dirty) {
// Sanity check.
@ -214,7 +214,7 @@ void Inode::set_metadata_dirty(bool metadata_dirty)
void Inode::did_add_child(InodeIdentifier const&, String const& name)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
for (auto& watcher : m_watchers) {
watcher->notify_inode_event({}, identifier(), InodeWatcherEvent::Type::ChildCreated, name);
@ -223,7 +223,7 @@ void Inode::did_add_child(InodeIdentifier const&, String const& name)
void Inode::did_remove_child(InodeIdentifier const&, String const& name)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (name == "." || name == "..") {
// These are just aliases and are not interesting to userspace.
@ -237,7 +237,7 @@ void Inode::did_remove_child(InodeIdentifier const&, String const& name)
void Inode::did_modify_contents()
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
for (auto& watcher : m_watchers) {
watcher->notify_inode_event({}, identifier(), InodeWatcherEvent::Type::ContentModified);
}
@ -245,7 +245,7 @@ void Inode::did_modify_contents()
void Inode::did_delete_self()
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
for (auto& watcher : m_watchers) {
watcher->notify_inode_event({}, identifier(), InodeWatcherEvent::Type::Deleted);
}
@ -255,7 +255,7 @@ KResult Inode::prepare_to_write_data()
{
// FIXME: It's a poor design that filesystems are expected to call this before writing out data.
// We should funnel everything through an interface at the VirtualFileSystem layer so this can happen from a single place.
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
if (fs().is_readonly())
return EROFS;
auto metadata = this->metadata();
@ -268,7 +268,7 @@ KResult Inode::prepare_to_write_data()
RefPtr<SharedInodeVMObject> Inode::shared_vmobject() const
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
return m_shared_vmobject.strong_ref();
}

View File

@ -27,13 +27,13 @@ InodeWatcher::~InodeWatcher()
bool InodeWatcher::can_read(const FileDescription&, size_t) const
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
return !m_queue.is_empty();
}
KResultOr<size_t> InodeWatcher::read(FileDescription&, u64, UserOrKernelBuffer& buffer, size_t buffer_size)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
if (m_queue.is_empty())
// can_read will catch the blocking case.
return EAGAIN;
@ -72,7 +72,7 @@ KResultOr<size_t> InodeWatcher::read(FileDescription&, u64, UserOrKernelBuffer&
KResult InodeWatcher::close()
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
for (auto& entry : m_wd_to_watches) {
auto& inode = const_cast<Inode&>(entry.value->inode);
@ -91,7 +91,7 @@ String InodeWatcher::absolute_path(const FileDescription&) const
void InodeWatcher::notify_inode_event(Badge<Inode>, InodeIdentifier inode_id, InodeWatcherEvent::Type event_type, String const& name)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
auto it = m_inode_to_watches.find(inode_id);
if (it == m_inode_to_watches.end())
@ -107,7 +107,7 @@ void InodeWatcher::notify_inode_event(Badge<Inode>, InodeIdentifier inode_id, In
KResultOr<int> InodeWatcher::register_inode(Inode& inode, unsigned event_mask)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
if (m_inode_to_watches.find(inode.identifier()) != m_inode_to_watches.end())
return EEXIST;
@ -135,7 +135,7 @@ KResultOr<int> InodeWatcher::register_inode(Inode& inode, unsigned event_mask)
KResult InodeWatcher::unregister_by_wd(int wd)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
auto it = m_wd_to_watches.find(wd);
if (it == m_wd_to_watches.end())
@ -152,7 +152,7 @@ KResult InodeWatcher::unregister_by_wd(int wd)
void InodeWatcher::unregister_by_inode(Badge<Inode>, InodeIdentifier identifier)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
auto it = m_inode_to_watches.find(identifier);
if (it == m_inode_to_watches.end())

View File

@ -475,7 +475,7 @@ void Plan9FS::Plan9FSBlockCondition::try_unblock(Plan9FS::Blocker& blocker)
bool Plan9FS::is_complete(const ReceiveCompletion& completion)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
if (m_completions.contains(completion.tag)) {
// If it's still in the map then it can't be complete
VERIFY(!completion.completed);
@ -495,12 +495,12 @@ KResult Plan9FS::post_message(Message& message, RefPtr<ReceiveCompletion> comple
size_t size = buffer.size();
auto& description = file_description();
Locker locker(m_send_lock);
MutexLocker locker(m_send_lock);
if (completion) {
// Save the completion record *before* we send the message. This
// ensures that it exists when the thread reads the response
Locker locker(m_lock);
MutexLocker locker(m_lock);
auto tag = completion->tag;
m_completions.set(tag, completion.release_nonnull());
// TODO: What if there is a collision? Do we need to wait until
@ -569,7 +569,7 @@ KResult Plan9FS::read_and_dispatch_one_message()
if (result.is_error())
return result;
Locker locker(m_lock);
MutexLocker locker(m_lock);
auto optional_completion = m_completions.get(header.tag);
if (optional_completion.has_value()) {
@ -647,7 +647,7 @@ void Plan9FS::thread_main()
auto result = read_and_dispatch_one_message();
if (result.is_error()) {
// If we fail to read, wake up everyone with an error.
Locker locker(m_lock);
MutexLocker locker(m_lock);
for (auto& it : m_completions) {
it.value->result = result;
@ -698,7 +698,7 @@ KResult Plan9FSInode::ensure_open_for_mode(int mode)
u8 p9_mode = 0;
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
// If it's already open in this mode, we're done.
if ((m_open_mode & mode) == mode)

View File

@ -38,7 +38,7 @@ UNMAP_AFTER_INIT ProcFSComponentRegistry::ProcFSComponentRegistry()
void ProcFSComponentRegistry::register_new_process(Process& new_process)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
m_root_directory->m_process_directories.append(ProcFSProcessDirectory::create(new_process));
}
@ -127,7 +127,7 @@ RefPtr<Inode> ProcFSInode::lookup(StringView)
InodeMetadata ProcFSInode::metadata() const
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
InodeMetadata metadata;
metadata.inode = { fsid(), m_associated_component->component_index() };
metadata.mode = m_associated_component->required_mode();
@ -193,7 +193,7 @@ ProcFSDirectoryInode::~ProcFSDirectoryInode()
}
InodeMetadata ProcFSDirectoryInode::metadata() const
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
InodeMetadata metadata;
metadata.inode = { fsid(), m_associated_component->component_index() };
metadata.mode = S_IFDIR | m_associated_component->required_mode();
@ -205,13 +205,13 @@ InodeMetadata ProcFSDirectoryInode::metadata() const
}
KResult ProcFSDirectoryInode::traverse_as_directory(Function<bool(FileSystem::DirectoryEntryView const&)> callback) const
{
Locker locker(m_parent_fs.m_lock);
MutexLocker locker(m_parent_fs.m_lock);
return m_associated_component->traverse_as_directory(m_parent_fs.fsid(), move(callback));
}
RefPtr<Inode> ProcFSDirectoryInode::lookup(StringView name)
{
Locker locker(m_parent_fs.m_lock);
MutexLocker locker(m_parent_fs.m_lock);
auto component = m_associated_component->lookup(name);
if (!component)
return {};
@ -229,7 +229,7 @@ ProcFSLinkInode::ProcFSLinkInode(const ProcFS& fs, const ProcFSExposedComponent&
}
InodeMetadata ProcFSLinkInode::metadata() const
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
InodeMetadata metadata;
metadata.inode = { fsid(), m_associated_component->component_index() };
metadata.mode = S_IFLNK | m_associated_component->required_mode();

View File

@ -31,7 +31,7 @@ UNMAP_AFTER_INIT SysFSComponentRegistry::SysFSComponentRegistry()
UNMAP_AFTER_INIT void SysFSComponentRegistry::register_new_component(SysFSComponent& component)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
m_root_directory->m_components.append(component);
}
@ -42,7 +42,7 @@ NonnullRefPtr<SysFSRootDirectory> SysFSRootDirectory::create()
KResult SysFSRootDirectory::traverse_as_directory(unsigned fsid, Function<bool(FileSystem::DirectoryEntryView const&)> callback) const
{
Locker locker(SysFSComponentRegistry::the().get_lock());
MutexLocker locker(SysFSComponentRegistry::the().get_lock());
callback({ ".", { fsid, component_index() }, 0 });
callback({ "..", { fsid, 0 }, 0 });
@ -113,7 +113,7 @@ RefPtr<Inode> SysFSInode::lookup(StringView)
InodeMetadata SysFSInode::metadata() const
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
InodeMetadata metadata;
metadata.inode = { fsid(), m_associated_component->component_index() };
metadata.mode = S_IFREG | S_IRUSR | S_IRGRP | S_IROTH;
@ -180,7 +180,7 @@ SysFSDirectoryInode::~SysFSDirectoryInode()
InodeMetadata SysFSDirectoryInode::metadata() const
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
InodeMetadata metadata;
metadata.inode = { fsid(), m_associated_component->component_index() };
metadata.mode = S_IFDIR | S_IRUSR | S_IRGRP | S_IROTH | S_IXOTH;
@ -192,13 +192,13 @@ InodeMetadata SysFSDirectoryInode::metadata() const
}
KResult SysFSDirectoryInode::traverse_as_directory(Function<bool(FileSystem::DirectoryEntryView const&)> callback) const
{
Locker locker(m_parent_fs.m_lock);
MutexLocker locker(m_parent_fs.m_lock);
return m_associated_component->traverse_as_directory(m_parent_fs.fsid(), move(callback));
}
RefPtr<Inode> SysFSDirectoryInode::lookup(StringView name)
{
Locker locker(m_parent_fs.m_lock);
MutexLocker locker(m_parent_fs.m_lock);
auto component = m_associated_component->lookup(name);
if (!component)
return {};

View File

@ -28,7 +28,7 @@ SysFSComponent::SysFSComponent(StringView name)
KResult SysFSDirectory::traverse_as_directory(unsigned fsid, Function<bool(FileSystem::DirectoryEntryView const&)> callback) const
{
Locker locker(SysFSComponentRegistry::the().get_lock());
MutexLocker locker(SysFSComponentRegistry::the().get_lock());
VERIFY(m_parent_directory);
callback({ ".", { fsid, component_index() }, 0 });
callback({ "..", { fsid, m_parent_directory->component_index() }, 0 });

View File

@ -39,7 +39,7 @@ NonnullRefPtr<Inode> TmpFS::root_inode() const
void TmpFS::register_inode(TmpFSInode& inode)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
VERIFY(inode.identifier().fsid() == fsid());
auto index = inode.identifier().index();
@ -48,7 +48,7 @@ void TmpFS::register_inode(TmpFSInode& inode)
void TmpFS::unregister_inode(InodeIdentifier identifier)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
VERIFY(identifier.fsid() == fsid());
m_inodes.remove(identifier.index());
@ -56,14 +56,14 @@ void TmpFS::unregister_inode(InodeIdentifier identifier)
unsigned TmpFS::next_inode_index()
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
return m_next_inode_index++;
}
RefPtr<Inode> TmpFS::get_inode(InodeIdentifier identifier) const
{
Locker locker(m_lock, Mutex::Mode::Shared);
MutexLocker locker(m_lock, Mutex::Mode::Shared);
VERIFY(identifier.fsid() == fsid());
auto it = m_inodes.find(identifier.index());
@ -105,14 +105,14 @@ RefPtr<TmpFSInode> TmpFSInode::create_root(TmpFS& fs)
InodeMetadata TmpFSInode::metadata() const
{
Locker locker(m_inode_lock, Mutex::Mode::Shared);
MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
return m_metadata;
}
KResult TmpFSInode::traverse_as_directory(Function<bool(FileSystem::DirectoryEntryView const&)> callback) const
{
Locker locker(m_inode_lock, Mutex::Mode::Shared);
MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
if (!is_directory())
return ENOTDIR;
@ -129,7 +129,7 @@ KResult TmpFSInode::traverse_as_directory(Function<bool(FileSystem::DirectoryEnt
KResultOr<size_t> TmpFSInode::read_bytes(off_t offset, size_t size, UserOrKernelBuffer& buffer, FileDescription*) const
{
Locker locker(m_inode_lock, Mutex::Mode::Shared);
MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
VERIFY(!is_directory());
VERIFY(offset >= 0);
@ -149,7 +149,7 @@ KResultOr<size_t> TmpFSInode::read_bytes(off_t offset, size_t size, UserOrKernel
KResultOr<size_t> TmpFSInode::write_bytes(off_t offset, size_t size, const UserOrKernelBuffer& buffer, FileDescription*)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
VERIFY(!is_directory());
VERIFY(offset >= 0);
@ -198,7 +198,7 @@ KResultOr<size_t> TmpFSInode::write_bytes(off_t offset, size_t size, const UserO
RefPtr<Inode> TmpFSInode::lookup(StringView name)
{
Locker locker(m_inode_lock, Mutex::Mode::Shared);
MutexLocker locker(m_inode_lock, Mutex::Mode::Shared);
VERIFY(is_directory());
if (name == ".")
@ -230,7 +230,7 @@ void TmpFSInode::flush_metadata()
KResult TmpFSInode::chmod(mode_t mode)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
m_metadata.mode = mode;
notify_watchers();
@ -239,7 +239,7 @@ KResult TmpFSInode::chmod(mode_t mode)
KResult TmpFSInode::chown(uid_t uid, gid_t gid)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
m_metadata.uid = uid;
m_metadata.gid = gid;
@ -249,7 +249,7 @@ KResult TmpFSInode::chown(uid_t uid, gid_t gid)
KResultOr<NonnullRefPtr<Inode>> TmpFSInode::create_child(StringView name, mode_t mode, dev_t dev, uid_t uid, gid_t gid)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
// TODO: Support creating devices on TmpFS.
if (dev != 0)
@ -276,7 +276,7 @@ KResultOr<NonnullRefPtr<Inode>> TmpFSInode::create_child(StringView name, mode_t
KResult TmpFSInode::add_child(Inode& child, const StringView& name, mode_t)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
VERIFY(is_directory());
VERIFY(child.fsid() == fsid());
@ -290,7 +290,7 @@ KResult TmpFSInode::add_child(Inode& child, const StringView& name, mode_t)
KResult TmpFSInode::remove_child(const StringView& name)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
VERIFY(is_directory());
if (name == "." || name == "..")
@ -308,7 +308,7 @@ KResult TmpFSInode::remove_child(const StringView& name)
KResult TmpFSInode::truncate(u64 size)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
VERIFY(!is_directory());
if (size == 0)
@ -338,7 +338,7 @@ KResult TmpFSInode::truncate(u64 size)
KResult TmpFSInode::set_atime(time_t time)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
m_metadata.atime = time;
set_metadata_dirty(true);
@ -348,7 +348,7 @@ KResult TmpFSInode::set_atime(time_t time)
KResult TmpFSInode::set_ctime(time_t time)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
m_metadata.ctime = time;
notify_watchers();
@ -357,7 +357,7 @@ KResult TmpFSInode::set_ctime(time_t time)
KResult TmpFSInode::set_mtime(time_t t)
{
Locker locker(m_inode_lock);
MutexLocker locker(m_inode_lock);
m_metadata.mtime = t;
notify_watchers();

View File

@ -51,7 +51,7 @@ InodeIdentifier VirtualFileSystem::root_inode_id() const
KResult VirtualFileSystem::mount(FileSystem& fs, Custody& mount_point, int flags)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
auto& inode = mount_point.inode();
dbgln("VirtualFileSystem: Mounting {} at {} (inode: {}) with flags {}",
@ -67,7 +67,7 @@ KResult VirtualFileSystem::mount(FileSystem& fs, Custody& mount_point, int flags
KResult VirtualFileSystem::bind_mount(Custody& source, Custody& mount_point, int flags)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
dbgln("VirtualFileSystem: Bind-mounting {} at {}", source.try_create_absolute_path(), mount_point.try_create_absolute_path());
// FIXME: check that this is not already a mount point
@ -78,7 +78,7 @@ KResult VirtualFileSystem::bind_mount(Custody& source, Custody& mount_point, int
KResult VirtualFileSystem::remount(Custody& mount_point, int new_flags)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
dbgln("VirtualFileSystem: Remounting {}", mount_point.try_create_absolute_path());
@ -92,7 +92,7 @@ KResult VirtualFileSystem::remount(Custody& mount_point, int new_flags)
KResult VirtualFileSystem::unmount(Inode& guest_inode)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
dbgln("VirtualFileSystem: unmount called with inode {}", guest_inode.identifier());
for (size_t i = 0; i < m_mounts.size(); ++i) {

View File

@ -74,7 +74,7 @@ private:
virtual bool output(KBufferBuilder& builder) override
{
JsonArraySerializer array { builder };
Locker locker(arp_table().lock(), Mutex::Mode::Shared);
MutexLocker locker(arp_table().lock(), Mutex::Mode::Shared);
for (auto& it : arp_table().resource()) {
auto obj = array.add_object();
obj.add("mac_address", it.value.to_string());
@ -236,12 +236,12 @@ public:
static NonnullRefPtr<ProcFSDumpKmallocStacks> must_create(const ProcFSSystemDirectory&);
virtual bool value() const override
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
return g_dump_kmalloc_stacks;
}
virtual void set_value(bool new_value) override
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
g_dump_kmalloc_stacks = new_value;
}
@ -255,12 +255,12 @@ public:
static NonnullRefPtr<ProcFSUBSanDeadly> must_create(const ProcFSSystemDirectory&);
virtual bool value() const override
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
return AK::UBSanitizer::g_ubsan_is_deadly;
}
virtual void set_value(bool new_value) override
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
AK::UBSanitizer::g_ubsan_is_deadly = new_value;
}
@ -274,12 +274,12 @@ public:
static NonnullRefPtr<ProcFSCapsLockRemap> must_create(const ProcFSSystemDirectory&);
virtual bool value() const override
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
return g_caps_lock_remapped_to_ctrl.load();
}
virtual void set_value(bool new_value) override
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
g_caps_lock_remapped_to_ctrl.exchange(new_value);
}
@ -852,7 +852,7 @@ UNMAP_AFTER_INIT NonnullRefPtr<ProcFSRootDirectory> ProcFSRootDirectory::must_cr
KResult ProcFSRootDirectory::traverse_as_directory(unsigned fsid, Function<bool(FileSystem::DirectoryEntryView const&)> callback) const
{
Locker locker(ProcFSComponentRegistry::the().get_lock());
MutexLocker locker(ProcFSComponentRegistry::the().get_lock());
callback({ ".", { fsid, component_index() }, 0 });
callback({ "..", { fsid, 0 }, 0 });

View File

@ -42,7 +42,7 @@ void VirtIOFrameBufferDevice::create_framebuffer()
}
m_framebuffer_sink_vmobject = AnonymousVMObject::try_create_with_physical_pages(move(pages));
Locker locker(m_gpu.operation_lock());
MutexLocker locker(m_gpu.operation_lock());
m_current_buffer = &buffer_from_index(m_last_set_buffer_index.load());
create_buffer(m_main_buffer, 0, m_buffer_size);
create_buffer(m_back_buffer, m_buffer_size, m_buffer_size);
@ -108,7 +108,7 @@ bool VirtIOFrameBufferDevice::try_to_set_resolution(size_t width, size_t height)
auto& info = display_info();
Locker locker(m_gpu.operation_lock());
MutexLocker locker(m_gpu.operation_lock());
info.rect = {
.x = 0,
@ -123,7 +123,7 @@ bool VirtIOFrameBufferDevice::try_to_set_resolution(size_t width, size_t height)
void VirtIOFrameBufferDevice::set_buffer(int buffer_index)
{
auto& buffer = buffer_index == 0 ? m_main_buffer : m_back_buffer;
Locker locker(m_gpu.operation_lock());
MutexLocker locker(m_gpu.operation_lock());
if (&buffer == m_current_buffer)
return;
m_current_buffer = &buffer;
@ -183,7 +183,7 @@ int VirtIOFrameBufferDevice::ioctl(FileDescription&, unsigned request, FlatPtr a
return -EFAULT;
if (m_are_writes_active && user_flush_rects.count > 0) {
auto& buffer = buffer_from_index(user_flush_rects.buffer_index);
Locker locker(m_gpu.operation_lock());
MutexLocker locker(m_gpu.operation_lock());
for (unsigned i = 0; i < user_flush_rects.count; i++) {
FBRect user_dirty_rect;
if (!copy_from_user(&user_dirty_rect, &user_flush_rects.rects[i]))

View File

@ -38,7 +38,7 @@ VirtIOGPU::VirtIOGPU(PCI::Address address)
}
VERIFY(success);
finish_init();
Locker locker(m_operation_lock);
MutexLocker locker(m_operation_lock);
// Get display information using VIRTIO_GPU_CMD_GET_DISPLAY_INFO
query_display_information();
} else {
@ -244,7 +244,7 @@ void VirtIOGPU::populate_virtio_gpu_request_header(VirtIOGPUCtrlHeader& header,
void VirtIOGPU::flush_dirty_window(VirtIOGPUScanoutID scanout, VirtIOGPURect const& dirty_rect, VirtIOGPUResourceID resource_id)
{
Locker locker(m_operation_lock);
MutexLocker locker(m_operation_lock);
transfer_framebuffer_data_to_host(scanout, dirty_rect, resource_id);
flush_displayed_image(dirty_rect, resource_id);
}

View File

@ -105,12 +105,12 @@ private:
mutable SpinLock<u8> m_lock;
};
class Locker {
class MutexLocker {
public:
#if LOCK_DEBUG
ALWAYS_INLINE explicit Locker(Mutex& l, Mutex::Mode mode = Mutex::Mode::Exclusive, const SourceLocation& location = SourceLocation::current())
ALWAYS_INLINE explicit MutexLocker(Mutex& l, Mutex::Mode mode = Mutex::Mode::Exclusive, const SourceLocation& location = SourceLocation::current())
#else
ALWAYS_INLINE explicit Locker(Mutex& l, Mutex::Mode mode = Mutex::Mode::Exclusive)
ALWAYS_INLINE explicit MutexLocker(Mutex& l, Mutex::Mode mode = Mutex::Mode::Exclusive)
#endif
: m_lock(l)
{
@ -121,7 +121,7 @@ public:
#endif
}
ALWAYS_INLINE ~Locker()
ALWAYS_INLINE ~MutexLocker()
{
if (m_locked)
unlock();
@ -170,7 +170,7 @@ public:
[[nodiscard]] T lock_and_copy()
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
return m_resource;
}

View File

@ -66,13 +66,13 @@ IPv4Socket::IPv4Socket(int type, int protocol)
if (m_buffer_mode == BufferMode::Bytes) {
m_scratch_buffer = KBuffer::create_with_size(65536);
}
Locker locker(all_sockets().lock());
MutexLocker locker(all_sockets().lock());
all_sockets().resource().set(this);
}
IPv4Socket::~IPv4Socket()
{
Locker locker(all_sockets().lock());
MutexLocker locker(all_sockets().lock());
all_sockets().resource().remove(this);
}
@ -121,7 +121,7 @@ KResult IPv4Socket::bind(Userspace<const sockaddr*> user_address, socklen_t addr
KResult IPv4Socket::listen(size_t backlog)
{
Locker locker(lock());
MutexLocker locker(lock());
auto result = allocate_local_port_if_needed();
if (result.error_or_port.is_error() && result.error_or_port.error() != -ENOPROTOOPT)
return result.error_or_port.error();
@ -176,7 +176,7 @@ bool IPv4Socket::can_write(const FileDescription&, size_t) const
PortAllocationResult IPv4Socket::allocate_local_port_if_needed()
{
Locker locker(lock());
MutexLocker locker(lock());
if (m_local_port)
return { m_local_port, false };
auto port_or_error = protocol_allocate_local_port();
@ -188,7 +188,7 @@ PortAllocationResult IPv4Socket::allocate_local_port_if_needed()
KResultOr<size_t> IPv4Socket::sendto(FileDescription&, const UserOrKernelBuffer& data, size_t data_length, [[maybe_unused]] int flags, Userspace<const sockaddr*> addr, socklen_t addr_length)
{
Locker locker(lock());
MutexLocker locker(lock());
if (addr && addr_length != sizeof(sockaddr_in))
return EINVAL;
@ -247,7 +247,7 @@ KResultOr<size_t> IPv4Socket::sendto(FileDescription&, const UserOrKernelBuffer&
KResultOr<size_t> IPv4Socket::receive_byte_buffered(FileDescription& description, UserOrKernelBuffer& buffer, size_t buffer_length, int flags, Userspace<sockaddr*>, Userspace<socklen_t*>)
{
Locker locker(lock());
MutexLocker locker(lock());
if (m_receive_buffer.is_empty()) {
if (protocol_is_disconnected())
return 0;
@ -283,7 +283,7 @@ KResultOr<size_t> IPv4Socket::receive_byte_buffered(FileDescription& description
KResultOr<size_t> IPv4Socket::receive_packet_buffered(FileDescription& description, UserOrKernelBuffer& buffer, size_t buffer_length, int flags, Userspace<sockaddr*> addr, Userspace<socklen_t*> addr_length, Time& packet_timestamp)
{
Locker locker(lock());
MutexLocker locker(lock());
ReceivedPacket packet;
{
if (m_receive_queue.is_empty()) {
@ -398,7 +398,7 @@ KResultOr<size_t> IPv4Socket::recvfrom(FileDescription& description, UserOrKerne
bool IPv4Socket::did_receive(const IPv4Address& source_address, u16 source_port, ReadonlyBytes packet, const Time& packet_timestamp)
{
Locker locker(lock());
MutexLocker locker(lock());
if (is_shut_down_for_reading())
return false;

View File

@ -26,7 +26,7 @@ static Lockable<LocalSocket::List>& all_sockets()
void LocalSocket::for_each(Function<void(const LocalSocket&)> callback)
{
Locker locker(all_sockets().lock(), Mutex::Mode::Shared);
MutexLocker locker(all_sockets().lock(), Mutex::Mode::Shared);
for (auto& socket : all_sockets().resource())
callback(socket);
}
@ -70,7 +70,7 @@ LocalSocket::LocalSocket(int type)
: Socket(AF_LOCAL, type, 0)
{
{
Locker locker(all_sockets().lock());
MutexLocker locker(all_sockets().lock());
all_sockets().resource().append(*this);
}
@ -91,7 +91,7 @@ LocalSocket::LocalSocket(int type)
LocalSocket::~LocalSocket()
{
Locker locker(all_sockets().lock());
MutexLocker locker(all_sockets().lock());
all_sockets().resource().remove(*this);
}
@ -214,7 +214,7 @@ KResult LocalSocket::connect(FileDescription& description, Userspace<const socka
KResult LocalSocket::listen(size_t backlog)
{
Locker locker(lock());
MutexLocker locker(lock());
if (type() != SOCK_STREAM)
return EOPNOTSUPP;
set_backlog(backlog);
@ -465,7 +465,7 @@ NonnullRefPtrVector<FileDescription>& LocalSocket::sendfd_queue_for(const FileDe
KResult LocalSocket::sendfd(const FileDescription& socket_description, FileDescription& passing_description)
{
Locker locker(lock());
MutexLocker locker(lock());
auto role = this->role(socket_description);
if (role != Role::Connected && role != Role::Accepted)
return EINVAL;
@ -480,7 +480,7 @@ KResult LocalSocket::sendfd(const FileDescription& socket_description, FileDescr
KResultOr<NonnullRefPtr<FileDescription>> LocalSocket::recvfd(const FileDescription& socket_description)
{
Locker locker(lock());
MutexLocker locker(lock());
auto role = this->role(socket_description);
if (role != Role::Connected && role != Role::Accepted)
return EINVAL;

View File

@ -224,7 +224,7 @@ void handle_icmp(const EthernetFrameHeader& eth, const IPv4Packet& ipv4_packet,
{
NonnullRefPtrVector<IPv4Socket> icmp_sockets;
{
Locker locker(IPv4Socket::all_sockets().lock(), Mutex::Mode::Shared);
MutexLocker locker(IPv4Socket::all_sockets().lock(), Mutex::Mode::Shared);
for (auto* socket : IPv4Socket::all_sockets().resource()) {
if (socket->protocol() != (unsigned)IPv4Protocol::ICMP)
continue;
@ -312,7 +312,7 @@ void flush_delayed_tcp_acks()
{
Vector<RefPtr<TCPSocket>, 32> remaining_sockets;
for (auto& socket : *delayed_ack_sockets) {
Locker locker(socket->lock());
MutexLocker locker(socket->lock());
if (socket->should_delay_next_ack()) {
remaining_sockets.append(socket);
continue;
@ -395,7 +395,7 @@ void handle_tcp(const IPv4Packet& ipv4_packet, const Time& packet_timestamp)
return;
}
Locker locker(socket->lock());
MutexLocker locker(socket->lock());
VERIFY(socket->type() == SOCK_STREAM);
VERIFY(socket->local_port() == tcp_packet.destination_port());
@ -426,7 +426,7 @@ void handle_tcp(const IPv4Packet& ipv4_packet, const Time& packet_timestamp)
dmesgln("handle_tcp: couldn't create client socket");
return;
}
Locker locker(client->lock());
MutexLocker locker(client->lock());
dbgln_if(TCP_DEBUG, "handle_tcp: created new client socket with tuple {}", client->tuple().to_string());
client->set_sequence_number(1000);
client->set_ack_number(tcp_packet.sequence_number() + payload_size + 1);
@ -624,13 +624,13 @@ void retransmit_tcp_packets()
// in case retransmit_packets() realizes that it wants to close the socket.
NonnullRefPtrVector<TCPSocket, 16> sockets;
{
Locker locker(TCPSocket::sockets_for_retransmit().lock(), LockMode::Shared);
MutexLocker locker(TCPSocket::sockets_for_retransmit().lock(), LockMode::Shared);
for (auto& socket : TCPSocket::sockets_for_retransmit().resource())
sockets.append(*socket);
}
for (auto& socket : sockets) {
Locker socket_locker(socket.lock());
MutexLocker socket_locker(socket.lock());
socket.retransmit_packets();
}
}

View File

@ -43,14 +43,14 @@ NonnullRefPtr<NetworkAdapter> NetworkingManagement::loopback_adapter() const
void NetworkingManagement::for_each(Function<void(NetworkAdapter&)> callback)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
for (auto& it : m_adapters)
callback(it);
}
RefPtr<NetworkAdapter> NetworkingManagement::from_ipv4_address(const IPv4Address& address) const
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
for (auto& adapter : m_adapters) {
if (adapter.ipv4_address() == address || adapter.ipv4_broadcast() == address)
return adapter;
@ -63,7 +63,7 @@ RefPtr<NetworkAdapter> NetworkingManagement::from_ipv4_address(const IPv4Address
}
RefPtr<NetworkAdapter> NetworkingManagement::lookup_by_name(const StringView& name) const
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
RefPtr<NetworkAdapter> found_adapter;
for (auto& it : m_adapters) {
if (it.name() == name)

View File

@ -106,7 +106,7 @@ Lockable<HashMap<IPv4Address, MACAddress>>& arp_table()
void update_arp_table(const IPv4Address& ip_addr, const MACAddress& addr)
{
Locker locker(arp_table().lock());
MutexLocker locker(arp_table().lock());
arp_table().resource().set(ip_addr, addr);
s_arp_table_block_condition->unblock(ip_addr, addr);
@ -221,7 +221,7 @@ RoutingDecision route_to(const IPv4Address& target, const IPv4Address& source, c
return { adapter, multicast_ethernet_address(target) };
{
Locker locker(arp_table().lock());
MutexLocker locker(arp_table().lock());
auto addr = arp_table().resource().get(next_hop_ip);
if (addr.has_value()) {
dbgln_if(ROUTING_DEBUG, "Routing: Using cached ARP entry for {} ({})", next_hop_ip, addr.value().to_string());

View File

@ -51,7 +51,7 @@ void Socket::set_setup_state(SetupState new_setup_state)
RefPtr<Socket> Socket::accept()
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
if (m_pending.is_empty())
return nullptr;
dbgln_if(SOCKET_DEBUG, "Socket({}) de-queueing connection", this);
@ -69,7 +69,7 @@ RefPtr<Socket> Socket::accept()
KResult Socket::queue_connection_from(NonnullRefPtr<Socket> peer)
{
dbgln_if(SOCKET_DEBUG, "Socket({}) queueing connection", this);
Locker locker(m_lock);
MutexLocker locker(m_lock);
if (m_pending.size() >= m_backlog)
return ECONNREFUSED;
if (!m_pending.try_append(peer))
@ -241,7 +241,7 @@ KResultOr<size_t> Socket::write(FileDescription& description, u64, const UserOrK
KResult Socket::shutdown(int how)
{
Locker locker(lock());
MutexLocker locker(lock());
if (type() == SOCK_STREAM && !is_connected())
return ENOTCONN;
if (m_role == Role::Listener)
@ -264,7 +264,7 @@ KResult Socket::stat(::stat& st) const
void Socket::set_connected(bool connected)
{
Locker locker(lock());
MutexLocker locker(lock());
if (m_connected == connected)
return;
m_connected = connected;

View File

@ -23,7 +23,7 @@ namespace Kernel {
void TCPSocket::for_each(Function<void(const TCPSocket&)> callback)
{
Locker locker(sockets_by_tuple().lock(), Mutex::Mode::Shared);
MutexLocker locker(sockets_by_tuple().lock(), Mutex::Mode::Shared);
for (auto& it : sockets_by_tuple().resource())
callback(*it.value);
}
@ -41,7 +41,7 @@ void TCPSocket::set_state(State new_state)
m_role = Role::Connected;
if (new_state == State::Closed) {
Locker locker(closing_sockets().lock());
MutexLocker locker(closing_sockets().lock());
closing_sockets().resource().remove(tuple());
if (m_originator)
@ -68,7 +68,7 @@ Lockable<HashMap<IPv4SocketTuple, TCPSocket*>>& TCPSocket::sockets_by_tuple()
RefPtr<TCPSocket> TCPSocket::from_tuple(const IPv4SocketTuple& tuple)
{
Locker locker(sockets_by_tuple().lock(), Mutex::Mode::Shared);
MutexLocker locker(sockets_by_tuple().lock(), Mutex::Mode::Shared);
auto exact_match = sockets_by_tuple().resource().get(tuple);
if (exact_match.has_value())
@ -91,7 +91,7 @@ RefPtr<TCPSocket> TCPSocket::create_client(const IPv4Address& new_local_address,
auto tuple = IPv4SocketTuple(new_local_address, new_local_port, new_peer_address, new_peer_port);
{
Locker locker(sockets_by_tuple().lock(), Mutex::Mode::Shared);
MutexLocker locker(sockets_by_tuple().lock(), Mutex::Mode::Shared);
if (sockets_by_tuple().resource().contains(tuple))
return {};
}
@ -109,7 +109,7 @@ RefPtr<TCPSocket> TCPSocket::create_client(const IPv4Address& new_local_address,
client->set_direction(Direction::Incoming);
client->set_originator(*this);
Locker locker(sockets_by_tuple().lock());
MutexLocker locker(sockets_by_tuple().lock());
m_pending_release_for_accept.set(tuple, client);
sockets_by_tuple().resource().set(tuple, client);
@ -139,7 +139,7 @@ TCPSocket::TCPSocket(int protocol)
TCPSocket::~TCPSocket()
{
Locker locker(sockets_by_tuple().lock());
MutexLocker locker(sockets_by_tuple().lock());
sockets_by_tuple().resource().remove(tuple());
dequeue_for_retransmit();
@ -246,7 +246,7 @@ KResult TCPSocket::send_tcp_packet(u16 flags, const UserOrKernelBuffer* payload,
m_packets_out++;
m_bytes_out += buffer_size;
if (tcp_packet.has_syn() || payload_size > 0) {
Locker locker(m_not_acked_lock);
MutexLocker locker(m_not_acked_lock);
m_not_acked.append({ m_sequence_number, move(packet), ipv4_payload_offset, *routing_decision.adapter });
m_not_acked_size += payload_size;
enqueue_for_retransmit();
@ -265,7 +265,7 @@ void TCPSocket::receive_tcp_packet(const TCPPacket& packet, u16 size)
dbgln_if(TCP_SOCKET_DEBUG, "TCPSocket: receive_tcp_packet: {}", ack_number);
int removed = 0;
Locker locker(m_not_acked_lock);
MutexLocker locker(m_not_acked_lock);
while (!m_not_acked.is_empty()) {
auto& packet = m_not_acked.first();
@ -369,7 +369,7 @@ KResult TCPSocket::protocol_bind()
KResult TCPSocket::protocol_listen(bool did_allocate_port)
{
if (!did_allocate_port) {
Locker socket_locker(sockets_by_tuple().lock());
MutexLocker socket_locker(sockets_by_tuple().lock());
if (sockets_by_tuple().resource().contains(tuple()))
return EADDRINUSE;
sockets_by_tuple().resource().set(tuple(), this);
@ -383,7 +383,7 @@ KResult TCPSocket::protocol_listen(bool did_allocate_port)
KResult TCPSocket::protocol_connect(FileDescription& description, ShouldBlock should_block)
{
Locker locker(lock());
MutexLocker locker(lock());
auto routing_decision = route_to(peer_address(), local_address());
if (routing_decision.is_zero())
@ -434,7 +434,7 @@ KResultOr<u16> TCPSocket::protocol_allocate_local_port()
constexpr u16 ephemeral_port_range_size = last_ephemeral_port - first_ephemeral_port;
u16 first_scan_port = first_ephemeral_port + get_good_random<u16>() % ephemeral_port_range_size;
Locker locker(sockets_by_tuple().lock());
MutexLocker locker(sockets_by_tuple().lock());
for (u16 port = first_scan_port;;) {
IPv4SocketTuple proposed_tuple(local_address(), port, peer_address(), peer_port());
@ -482,7 +482,7 @@ void TCPSocket::shut_down_for_writing()
KResult TCPSocket::close()
{
Locker socket_locker(lock());
MutexLocker socket_locker(lock());
auto result = IPv4Socket::close();
if (state() == State::CloseWait) {
dbgln_if(TCP_SOCKET_DEBUG, " Sending FIN from CloseWait and moving into LastAck");
@ -491,7 +491,7 @@ KResult TCPSocket::close()
}
if (state() != State::Closed && state() != State::Listen) {
Locker locker(closing_sockets().lock());
MutexLocker locker(closing_sockets().lock());
closing_sockets().resource().set(tuple(), *this);
}
return result;
@ -506,13 +506,13 @@ Lockable<HashTable<TCPSocket*>>& TCPSocket::sockets_for_retransmit()
void TCPSocket::enqueue_for_retransmit()
{
Locker locker(sockets_for_retransmit().lock());
MutexLocker locker(sockets_for_retransmit().lock());
sockets_for_retransmit().resource().set(this);
}
void TCPSocket::dequeue_for_retransmit()
{
Locker locker(sockets_for_retransmit().lock());
MutexLocker locker(sockets_for_retransmit().lock());
sockets_for_retransmit().resource().remove(this);
}
@ -545,7 +545,7 @@ void TCPSocket::retransmit_packets()
if (routing_decision.is_zero())
return;
Locker locker(m_not_acked_lock, Mutex::Mode::Shared);
MutexLocker locker(m_not_acked_lock, Mutex::Mode::Shared);
for (auto& packet : m_not_acked) {
packet.tx_counter++;
@ -590,7 +590,7 @@ bool TCPSocket::can_write(const FileDescription& file_description, size_t size)
if (!file_description.is_blocking())
return true;
Locker lock(m_not_acked_lock);
MutexLocker lock(m_not_acked_lock);
return m_not_acked_size + size <= m_send_window_size;
}

View File

@ -17,7 +17,7 @@ namespace Kernel {
void UDPSocket::for_each(Function<void(const UDPSocket&)> callback)
{
Locker locker(sockets_by_port().lock(), Mutex::Mode::Shared);
MutexLocker locker(sockets_by_port().lock(), Mutex::Mode::Shared);
for (auto it : sockets_by_port().resource())
callback(*it.value);
}
@ -33,7 +33,7 @@ SocketHandle<UDPSocket> UDPSocket::from_port(u16 port)
{
RefPtr<UDPSocket> socket;
{
Locker locker(sockets_by_port().lock(), Mutex::Mode::Shared);
MutexLocker locker(sockets_by_port().lock(), Mutex::Mode::Shared);
auto it = sockets_by_port().resource().find(port);
if (it == sockets_by_port().resource().end())
return {};
@ -50,7 +50,7 @@ UDPSocket::UDPSocket(int protocol)
UDPSocket::~UDPSocket()
{
Locker locker(sockets_by_port().lock());
MutexLocker locker(sockets_by_port().lock());
sockets_by_port().resource().remove(local_port());
}
@ -112,7 +112,7 @@ KResultOr<u16> UDPSocket::protocol_allocate_local_port()
constexpr u16 ephemeral_port_range_size = last_ephemeral_port - first_ephemeral_port;
u16 first_scan_port = first_ephemeral_port + get_good_random<u16>() % ephemeral_port_range_size;
Locker locker(sockets_by_port().lock());
MutexLocker locker(sockets_by_port().lock());
for (u16 port = first_scan_port;;) {
auto it = sockets_by_port().resource().find(port);
if (it == sockets_by_port().resource().end()) {
@ -131,7 +131,7 @@ KResultOr<u16> UDPSocket::protocol_allocate_local_port()
KResult UDPSocket::protocol_bind()
{
Locker locker(sockets_by_port().lock());
MutexLocker locker(sockets_by_port().lock());
if (sockets_by_port().resource().contains(local_port()))
return EADDRINUSE;
sockets_by_port().resource().set(local_port(), this);

View File

@ -198,7 +198,7 @@ KResult ProcFSProcessInformation::refresh_data(FileDescription& description) con
KResultOr<size_t> ProcFSExposedLink::read_bytes(off_t offset, size_t count, UserOrKernelBuffer& buffer, FileDescription*) const
{
VERIFY(offset == 0);
Locker locker(m_lock);
MutexLocker locker(m_lock);
KBufferBuilder builder;
if (!const_cast<ProcFSExposedLink&>(*this).acquire_link(builder))
return KResult(EFAULT);
@ -244,7 +244,7 @@ RefPtr<ProcFSExposedComponent> ProcFSExposedDirectory::lookup(StringView name)
KResult ProcFSExposedDirectory::traverse_as_directory(unsigned fsid, Function<bool(FileSystem::DirectoryEntryView const&)> callback) const
{
Locker locker(ProcFSComponentRegistry::the().get_lock());
MutexLocker locker(ProcFSComponentRegistry::the().get_lock());
auto parent_directory = m_parent_directory.strong_ref();
if (parent_directory.is_null())
return KResult(EINVAL);

View File

@ -88,7 +88,7 @@ private:
KResult ProcFSProcessStacks::traverse_as_directory(unsigned fsid, Function<bool(FileSystem::DirectoryEntryView const&)> callback) const
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
auto parent_directory = m_process_directory.strong_ref();
if (parent_directory.is_null())
return KResult(EINVAL);
@ -108,7 +108,7 @@ KResult ProcFSProcessStacks::traverse_as_directory(unsigned fsid, Function<bool(
RefPtr<ProcFSExposedComponent> ProcFSProcessStacks::lookup(StringView name)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
auto parent_directory = m_process_directory.strong_ref();
if (parent_directory.is_null())
return nullptr;
@ -184,7 +184,7 @@ private:
KResult ProcFSProcessFileDescriptions::traverse_as_directory(unsigned fsid, Function<bool(FileSystem::DirectoryEntryView const&)> callback) const
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
auto parent_directory = m_process_directory.strong_ref();
if (parent_directory.is_null())
return KResult(EINVAL);
@ -208,7 +208,7 @@ KResult ProcFSProcessFileDescriptions::traverse_as_directory(unsigned fsid, Func
}
RefPtr<ProcFSExposedComponent> ProcFSProcessFileDescriptions::lookup(StringView name)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
auto parent_directory = m_process_directory.strong_ref();
if (parent_directory.is_null())
return nullptr;

View File

@ -126,7 +126,7 @@ bool get_good_random_bytes(u8* buffer, size_t buffer_size, bool allow_wait, bool
if (can_wait && allow_wait) {
for (;;) {
{
Locker locker(KernelRng::the().lock());
MutexLocker locker(KernelRng::the().lock());
if (kernel_rng.resource().get_random_bytes(buffer, buffer_size)) {
result = true;
break;

View File

@ -95,7 +95,7 @@ void AHCIPort::handle_interrupt()
} else {
g_io_work->queue([this]() {
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request handled", representative_port_index());
Locker locker(m_lock);
MutexLocker locker(m_lock);
VERIFY(m_current_request);
VERIFY(m_current_scatter_list);
if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) {
@ -123,7 +123,7 @@ bool AHCIPort::is_interrupts_enabled() const
void AHCIPort::recover_from_fatal_error()
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
ScopedSpinLock lock(m_hard_lock);
dmesgln("{}: AHCI Port {} fatal error, shutting down!", m_parent_handler->hba_controller()->pci_address(), representative_port_index());
dmesgln("{}: AHCI Port {} fatal error, SError {}", m_parent_handler->hba_controller()->pci_address(), representative_port_index(), (u32)m_port_registers.serr);
@ -207,7 +207,7 @@ void AHCIPort::eject()
bool AHCIPort::reset()
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
ScopedSpinLock lock(m_hard_lock);
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Resetting", representative_port_index());
@ -232,7 +232,7 @@ bool AHCIPort::reset()
bool AHCIPort::initialize_without_reset()
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
ScopedSpinLock lock(m_hard_lock);
dmesgln("AHCI Port {}: {}", representative_port_index(), try_disambiguate_sata_status());
return initialize(lock);
@ -450,7 +450,7 @@ Optional<AsyncDeviceRequest::RequestResult> AHCIPort::prepare_and_set_scatter_li
void AHCIPort::start_request(AsyncBlockDeviceRequest& request)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Request start", representative_port_index());
VERIFY(!m_current_request);
VERIFY(!m_current_scatter_list);
@ -653,7 +653,7 @@ bool AHCIPort::identify_device(ScopedSpinLock<SpinLock<u8>>& main_lock)
bool AHCIPort::shutdown()
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
ScopedSpinLock lock(m_hard_lock);
rebase();
set_interface_state(AHCI::DeviceDetectionInitialization::DisableInterface);

View File

@ -107,7 +107,7 @@ UNMAP_AFTER_INIT IDEChannel::~IDEChannel()
void IDEChannel::start_request(AsyncBlockDeviceRequest& request, bool is_slave, u16 capabilities)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
VERIFY(m_current_request.is_null());
dbgln_if(PATA_DEBUG, "IDEChannel::start_request");
@ -134,7 +134,7 @@ void IDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult resu
// before Processor::deferred_call_queue returns!
g_io_work->queue([this, result]() {
dbgln_if(PATA_DEBUG, "IDEChannel::complete_current_request result: {}", (int)result);
Locker locker(m_lock);
MutexLocker locker(m_lock);
VERIFY(m_current_request);
auto current_request = m_current_request;
m_current_request.clear();
@ -222,7 +222,7 @@ bool IDEChannel::handle_irq(const RegisterState&)
// This is important so that we can safely access the buffers, which could
// trigger page faults
g_io_work->queue([this]() {
Locker locker(m_lock);
MutexLocker locker(m_lock);
ScopedSpinLock lock(m_request_lock);
if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) {
dbgln_if(PATA_DEBUG, "IDEChannel: Read block {}/{}", m_current_request_block_index, m_current_request->block_count());

View File

@ -35,7 +35,7 @@ StringView RamdiskDevice::class_name() const
void RamdiskDevice::start_request(AsyncBlockDeviceRequest& request)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
u8* base = m_region->vaddr().as_ptr();
size_t size = m_region->size();

View File

@ -525,7 +525,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
// We commit to the new executable at this point. There is no turning back!
// Prevent other processes from attaching to us with ptrace while we're doing this.
Locker ptrace_locker(ptrace_lock());
MutexLocker ptrace_locker(ptrace_lock());
// Disable profiling temporarily in case it's running on this process.
auto was_profiling = m_profiling;

View File

@ -16,7 +16,7 @@ KResultOr<FlatPtr> Process::sys$gethostname(Userspace<char*> buffer, size_t size
REQUIRE_PROMISE(stdio);
if (size > NumericLimits<ssize_t>::max())
return EINVAL;
Locker locker(*g_hostname_lock, Mutex::Mode::Shared);
MutexLocker locker(*g_hostname_lock, Mutex::Mode::Shared);
if (size < (g_hostname->length() + 1))
return ENAMETOOLONG;
if (!copy_to_user(buffer, g_hostname->characters(), g_hostname->length() + 1))
@ -29,7 +29,7 @@ KResultOr<FlatPtr> Process::sys$sethostname(Userspace<const char*> hostname, siz
REQUIRE_NO_PROMISES;
if (!is_superuser())
return EPERM;
Locker locker(*g_hostname_lock, Mutex::Mode::Exclusive);
MutexLocker locker(*g_hostname_lock, Mutex::Mode::Exclusive);
if (length > 64)
return ENAMETOOLONG;
auto copied_hostname = copy_string_from_user(hostname, length);

View File

@ -37,7 +37,7 @@ static KResultOr<u32> handle_ptrace(const Kernel::Syscall::SC_ptrace_params& par
if (!peer)
return ESRCH;
Locker ptrace_locker(peer->process().ptrace_lock());
MutexLocker ptrace_locker(peer->process().ptrace_lock());
if ((peer->process().uid() != caller.euid())
|| (peer->process().uid() != peer->process().euid())) // Disallow tracing setuid processes

View File

@ -15,7 +15,7 @@ KResultOr<FlatPtr> Process::sys$uname(Userspace<utsname*> user_buf)
REQUIRE_PROMISE(stdio);
Locker locker(*g_hostname_lock, Mutex::Mode::Shared);
MutexLocker locker(*g_hostname_lock, Mutex::Mode::Shared);
if (g_hostname->length() + 1 > sizeof(utsname::nodename))
return ENAMETOOLONG;

View File

@ -36,7 +36,7 @@ UNMAP_AFTER_INIT PTYMultiplexer::~PTYMultiplexer()
KResultOr<NonnullRefPtr<FileDescription>> PTYMultiplexer::open(int options)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
if (m_freelist.is_empty())
return EBUSY;
auto master_index = m_freelist.take_last();
@ -54,7 +54,7 @@ KResultOr<NonnullRefPtr<FileDescription>> PTYMultiplexer::open(int options)
void PTYMultiplexer::notify_master_destroyed(Badge<MasterPTY>, unsigned index)
{
Locker locker(m_lock);
MutexLocker locker(m_lock);
m_freelist.append(index);
dbgln_if(PTMX_DEBUG, "PTYMultiplexer: {} added to freelist", index);
}

View File

@ -166,7 +166,7 @@ AnonymousVMObject::~AnonymousVMObject()
int AnonymousVMObject::purge()
{
Locker locker(m_paging_lock);
MutexLocker locker(m_paging_lock);
return purge_impl();
}

View File

@ -53,7 +53,7 @@ size_t InodeVMObject::amount_dirty() const
int InodeVMObject::release_all_clean_pages()
{
Locker locker(m_paging_lock);
MutexLocker locker(m_paging_lock);
return release_all_clean_pages_impl();
}

View File

@ -539,7 +539,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region, Scoped
VERIFY(!s_mm_lock.own_lock());
VERIFY(!g_scheduler_lock.own_lock());
Locker locker(vmobject().m_paging_lock);
MutexLocker locker(vmobject().m_paging_lock);
mm_lock.lock();