2020-07-02 12:48:08 +03:00
|
|
|
/*
|
2022-01-08 21:57:33 +03:00
|
|
|
* Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
|
2020-07-02 12:48:08 +03:00
|
|
|
*
|
2021-04-22 11:24:48 +03:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-07-02 12:48:08 +03:00
|
|
|
*/
|
|
|
|
|
2020-11-24 18:38:30 +03:00
|
|
|
#include <AK/IntrusiveList.h>
|
2021-01-25 18:07:10 +03:00
|
|
|
#include <Kernel/Debug.h>
|
2020-07-02 12:48:08 +03:00
|
|
|
#include <Kernel/FileSystem/BlockBasedFileSystem.h>
|
|
|
|
#include <Kernel/Process.h>
|
|
|
|
|
|
|
|
namespace Kernel {
|
|
|
|
|
2020-12-29 02:05:37 +03:00
|
|
|
struct CacheEntry {
|
2021-04-16 15:03:24 +03:00
|
|
|
IntrusiveListNode<CacheEntry> list_node;
|
2021-07-11 01:34:36 +03:00
|
|
|
BlockBasedFileSystem::BlockIndex block_index { 0 };
|
2020-12-29 02:05:37 +03:00
|
|
|
u8* data { nullptr };
|
|
|
|
bool has_data { false };
|
|
|
|
};
|
|
|
|
|
2020-07-02 12:48:08 +03:00
|
|
|
class DiskCache {
|
|
|
|
public:
|
2021-08-01 14:33:06 +03:00
|
|
|
static constexpr size_t EntryCount = 10000;
|
|
|
|
explicit DiskCache(BlockBasedFileSystem& fs, NonnullOwnPtr<KBuffer> cached_block_data, NonnullOwnPtr<KBuffer> entries_buffer)
|
2020-07-02 12:48:08 +03:00
|
|
|
: m_fs(fs)
|
2021-08-01 14:33:06 +03:00
|
|
|
, m_cached_block_data(move(cached_block_data))
|
|
|
|
, m_entries(move(entries_buffer))
|
2020-07-02 12:48:08 +03:00
|
|
|
{
|
2021-08-01 14:33:06 +03:00
|
|
|
for (size_t i = 0; i < EntryCount; ++i) {
|
Kernel/FileSystem: Discard safely filesystems when unmounted last time
This commit reached that goal of "safely discarding" a filesystem by
doing the following:
1. Stop using the s_file_system_map HashMap as it was an unsafe measure
to access pointers of FileSystems. Instead, make sure to register all
FileSystems at the VFS layer, with an IntrusiveList, to avoid problems
related to OOM conditions.
2. Make sure to cleanly remove the DiskCache object from a BlockBased
filesystem, so the destructor of such object will not need to do that in
the destruction point.
3. For ext2 filesystems, don't cache the root inode at m_inode_cache
HashMap. The reason for this is that when unmounting an ext2 filesystem,
we lookup at the cache to see if there's a reference to a cached inode
and if that's the case, we fail with EBUSY. If we keep the m_root_inode
also being referenced at the m_inode_cache map, we have 2 references to
that object, which will lead to fail with EBUSY. Also, it's much simpler
to always ask for a root inode and get it immediately from m_root_inode,
instead of looking up the cache for that inode.
2022-08-20 09:28:02 +03:00
|
|
|
entries()[i].data = m_cached_block_data->data() + i * m_fs->block_size();
|
2020-11-24 18:38:30 +03:00
|
|
|
m_clean_list.append(entries()[i]);
|
2020-07-02 12:48:08 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-28 16:42:08 +03:00
|
|
|
~DiskCache() = default;
|
2020-07-02 12:48:08 +03:00
|
|
|
|
2022-01-08 20:57:37 +03:00
|
|
|
bool is_dirty() const { return !m_dirty_list.is_empty(); }
|
2022-01-08 22:11:47 +03:00
|
|
|
bool entry_is_dirty(CacheEntry const& entry) const { return m_dirty_list.contains(entry); }
|
2020-07-02 12:48:08 +03:00
|
|
|
|
2020-11-24 18:38:30 +03:00
|
|
|
void mark_all_clean()
|
|
|
|
{
|
|
|
|
while (auto* entry = m_dirty_list.first())
|
|
|
|
m_clean_list.prepend(*entry);
|
|
|
|
}
|
|
|
|
|
2020-12-29 02:05:37 +03:00
|
|
|
void mark_dirty(CacheEntry& entry)
|
2020-11-24 18:38:30 +03:00
|
|
|
{
|
|
|
|
m_dirty_list.prepend(entry);
|
|
|
|
}
|
|
|
|
|
2020-12-29 02:05:37 +03:00
|
|
|
void mark_clean(CacheEntry& entry)
|
2020-07-02 12:48:08 +03:00
|
|
|
{
|
2020-11-24 18:38:30 +03:00
|
|
|
m_clean_list.prepend(entry);
|
|
|
|
}
|
2020-07-02 12:48:08 +03:00
|
|
|
|
2022-01-08 22:11:47 +03:00
|
|
|
CacheEntry* get(BlockBasedFileSystem::BlockIndex block_index) const
|
|
|
|
{
|
|
|
|
auto it = m_hash.find(block_index);
|
|
|
|
if (it == m_hash.end())
|
|
|
|
return nullptr;
|
|
|
|
auto& entry = const_cast<CacheEntry&>(*it->value);
|
|
|
|
VERIFY(entry.block_index == block_index);
|
2023-05-05 18:33:22 +03:00
|
|
|
if (!entry_is_dirty(entry) && (m_clean_list.first() != &entry)) {
|
|
|
|
// Cache hit! Promote the entry to the front of the list.
|
|
|
|
m_clean_list.prepend(entry);
|
|
|
|
}
|
2022-01-08 22:11:47 +03:00
|
|
|
return &entry;
|
|
|
|
}
|
|
|
|
|
2022-01-23 18:35:28 +03:00
|
|
|
ErrorOr<CacheEntry*> ensure(BlockBasedFileSystem::BlockIndex block_index) const
|
2020-11-24 18:38:30 +03:00
|
|
|
{
|
2022-01-08 22:11:47 +03:00
|
|
|
if (auto* entry = get(block_index))
|
2022-01-23 18:35:28 +03:00
|
|
|
return entry;
|
2020-11-24 14:08:52 +03:00
|
|
|
|
2020-11-24 18:38:30 +03:00
|
|
|
if (m_clean_list.is_empty()) {
|
2020-07-02 12:48:08 +03:00
|
|
|
// Not a single clean entry! Flush writes and try again.
|
2021-07-11 01:33:27 +03:00
|
|
|
// NOTE: We want to make sure we only call FileBackedFileSystem flush here,
|
|
|
|
// not some FileBackedFileSystem subclass flush!
|
Kernel/FileSystem: Discard safely filesystems when unmounted last time
This commit reached that goal of "safely discarding" a filesystem by
doing the following:
1. Stop using the s_file_system_map HashMap as it was an unsafe measure
to access pointers of FileSystems. Instead, make sure to register all
FileSystems at the VFS layer, with an IntrusiveList, to avoid problems
related to OOM conditions.
2. Make sure to cleanly remove the DiskCache object from a BlockBased
filesystem, so the destructor of such object will not need to do that in
the destruction point.
3. For ext2 filesystems, don't cache the root inode at m_inode_cache
HashMap. The reason for this is that when unmounting an ext2 filesystem,
we lookup at the cache to see if there's a reference to a cached inode
and if that's the case, we fail with EBUSY. If we keep the m_root_inode
also being referenced at the m_inode_cache map, we have 2 references to
that object, which will lead to fail with EBUSY. Also, it's much simpler
to always ask for a root inode and get it immediately from m_root_inode,
instead of looking up the cache for that inode.
2022-08-20 09:28:02 +03:00
|
|
|
m_fs->flush_writes_impl();
|
2022-01-08 21:57:33 +03:00
|
|
|
return ensure(block_index);
|
2020-07-02 12:48:08 +03:00
|
|
|
}
|
|
|
|
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(m_clean_list.last());
|
2020-11-24 18:38:30 +03:00
|
|
|
auto& new_entry = *m_clean_list.last();
|
|
|
|
m_clean_list.prepend(new_entry);
|
2020-11-24 14:08:52 +03:00
|
|
|
|
2020-11-24 18:38:30 +03:00
|
|
|
m_hash.remove(new_entry.block_index);
|
2022-01-23 18:35:28 +03:00
|
|
|
TRY(m_hash.try_set(block_index, &new_entry));
|
2020-11-24 14:08:52 +03:00
|
|
|
|
2020-07-02 12:48:08 +03:00
|
|
|
new_entry.block_index = block_index;
|
|
|
|
new_entry.has_data = false;
|
2020-11-24 18:38:30 +03:00
|
|
|
|
2022-01-23 18:35:28 +03:00
|
|
|
return &new_entry;
|
2020-07-02 12:48:08 +03:00
|
|
|
}
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
CacheEntry const* entries() const { return (CacheEntry const*)m_entries->data(); }
|
2021-08-01 14:33:06 +03:00
|
|
|
CacheEntry* entries() { return (CacheEntry*)m_entries->data(); }
|
2020-07-02 12:48:08 +03:00
|
|
|
|
2020-11-24 18:38:30 +03:00
|
|
|
template<typename Callback>
|
|
|
|
void for_each_dirty_entry(Callback callback)
|
2020-07-02 12:48:08 +03:00
|
|
|
{
|
2020-11-24 18:38:30 +03:00
|
|
|
for (auto& entry : m_dirty_list)
|
|
|
|
callback(entry);
|
2020-07-02 12:48:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
Kernel/FileSystem: Discard safely filesystems when unmounted last time
This commit reached that goal of "safely discarding" a filesystem by
doing the following:
1. Stop using the s_file_system_map HashMap as it was an unsafe measure
to access pointers of FileSystems. Instead, make sure to register all
FileSystems at the VFS layer, with an IntrusiveList, to avoid problems
related to OOM conditions.
2. Make sure to cleanly remove the DiskCache object from a BlockBased
filesystem, so the destructor of such object will not need to do that in
the destruction point.
3. For ext2 filesystems, don't cache the root inode at m_inode_cache
HashMap. The reason for this is that when unmounting an ext2 filesystem,
we lookup at the cache to see if there's a reference to a cached inode
and if that's the case, we fail with EBUSY. If we keep the m_root_inode
also being referenced at the m_inode_cache map, we have 2 references to
that object, which will lead to fail with EBUSY. Also, it's much simpler
to always ask for a root inode and get it immediately from m_root_inode,
instead of looking up the cache for that inode.
2022-08-20 09:28:02 +03:00
|
|
|
mutable NonnullRefPtr<BlockBasedFileSystem> m_fs;
|
2023-04-01 20:11:21 +03:00
|
|
|
NonnullOwnPtr<KBuffer> m_cached_block_data;
|
|
|
|
|
|
|
|
// NOTE: m_entries must be declared before m_dirty_list and m_clean_list because their entries are allocated from it.
|
|
|
|
// We need to ensure that the destructors of m_dirty_list and m_clean_list are called before m_entries is destroyed.
|
|
|
|
NonnullOwnPtr<KBuffer> m_entries;
|
2021-09-09 15:00:59 +03:00
|
|
|
mutable IntrusiveList<&CacheEntry::list_node> m_dirty_list;
|
Kernel/FileSystem: Discard safely filesystems when unmounted last time
This commit reached that goal of "safely discarding" a filesystem by
doing the following:
1. Stop using the s_file_system_map HashMap as it was an unsafe measure
to access pointers of FileSystems. Instead, make sure to register all
FileSystems at the VFS layer, with an IntrusiveList, to avoid problems
related to OOM conditions.
2. Make sure to cleanly remove the DiskCache object from a BlockBased
filesystem, so the destructor of such object will not need to do that in
the destruction point.
3. For ext2 filesystems, don't cache the root inode at m_inode_cache
HashMap. The reason for this is that when unmounting an ext2 filesystem,
we lookup at the cache to see if there's a reference to a cached inode
and if that's the case, we fail with EBUSY. If we keep the m_root_inode
also being referenced at the m_inode_cache map, we have 2 references to
that object, which will lead to fail with EBUSY. Also, it's much simpler
to always ask for a root inode and get it immediately from m_root_inode,
instead of looking up the cache for that inode.
2022-08-20 09:28:02 +03:00
|
|
|
mutable IntrusiveList<&CacheEntry::list_node> m_clean_list;
|
|
|
|
mutable HashMap<BlockBasedFileSystem::BlockIndex, CacheEntry*> m_hash;
|
2020-07-02 12:48:08 +03:00
|
|
|
};
|
|
|
|
|
2021-09-07 14:39:11 +03:00
|
|
|
BlockBasedFileSystem::BlockBasedFileSystem(OpenFileDescription& file_description)
|
2021-07-11 01:33:27 +03:00
|
|
|
: FileBackedFileSystem(file_description)
|
2020-07-02 12:48:08 +03:00
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(file_description.file().is_seekable());
|
2020-07-02 12:48:08 +03:00
|
|
|
}
|
|
|
|
|
2022-03-16 22:15:15 +03:00
|
|
|
BlockBasedFileSystem::~BlockBasedFileSystem() = default;
|
2020-07-02 12:48:08 +03:00
|
|
|
|
Kernel/FileSystem: Discard safely filesystems when unmounted last time
This commit reached that goal of "safely discarding" a filesystem by
doing the following:
1. Stop using the s_file_system_map HashMap as it was an unsafe measure
to access pointers of FileSystems. Instead, make sure to register all
FileSystems at the VFS layer, with an IntrusiveList, to avoid problems
related to OOM conditions.
2. Make sure to cleanly remove the DiskCache object from a BlockBased
filesystem, so the destructor of such object will not need to do that in
the destruction point.
3. For ext2 filesystems, don't cache the root inode at m_inode_cache
HashMap. The reason for this is that when unmounting an ext2 filesystem,
we lookup at the cache to see if there's a reference to a cached inode
and if that's the case, we fail with EBUSY. If we keep the m_root_inode
also being referenced at the m_inode_cache map, we have 2 references to
that object, which will lead to fail with EBUSY. Also, it's much simpler
to always ask for a root inode and get it immediately from m_root_inode,
instead of looking up the cache for that inode.
2022-08-20 09:28:02 +03:00
|
|
|
void BlockBasedFileSystem::remove_disk_cache_before_last_unmount()
|
|
|
|
{
|
|
|
|
VERIFY(m_lock.is_locked());
|
|
|
|
m_cache.with_exclusive([&](auto& cache) {
|
|
|
|
cache.clear();
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2022-08-20 00:03:24 +03:00
|
|
|
ErrorOr<void> BlockBasedFileSystem::initialize_while_locked()
|
2021-08-01 14:33:06 +03:00
|
|
|
{
|
2022-08-20 00:03:24 +03:00
|
|
|
VERIFY(m_lock.is_locked());
|
|
|
|
VERIFY(!is_initialized_while_locked());
|
2021-08-01 14:33:06 +03:00
|
|
|
VERIFY(block_size() != 0);
|
2022-04-11 01:08:07 +03:00
|
|
|
auto cached_block_data = TRY(KBuffer::try_create_with_size("BlockBasedFS: Cache blocks"sv, DiskCache::EntryCount * block_size()));
|
|
|
|
auto entries_data = TRY(KBuffer::try_create_with_size("BlockBasedFS: Cache entries"sv, DiskCache::EntryCount * sizeof(CacheEntry)));
|
2021-09-07 16:15:08 +03:00
|
|
|
auto disk_cache = TRY(adopt_nonnull_own_or_enomem(new (nothrow) DiskCache(*this, move(cached_block_data), move(entries_data))));
|
2021-08-01 14:33:06 +03:00
|
|
|
|
2021-08-08 00:50:37 +03:00
|
|
|
m_cache.with_exclusive([&](auto& cache) {
|
|
|
|
cache = move(disk_cache);
|
|
|
|
});
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2021-08-01 14:33:06 +03:00
|
|
|
}
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
ErrorOr<void> BlockBasedFileSystem::write_block(BlockIndex index, UserOrKernelBuffer const& data, size_t count, u64 offset, bool allow_cache)
|
2020-07-02 12:48:08 +03:00
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(m_logical_block_size);
|
|
|
|
VERIFY(offset + count <= block_size());
|
2021-02-12 15:51:34 +03:00
|
|
|
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::write_block {}, size={}", index, count);
|
2020-07-02 12:48:08 +03:00
|
|
|
|
2021-12-26 01:44:04 +03:00
|
|
|
// NOTE: We copy the `data` to write into a local buffer before taking the cache lock.
|
|
|
|
// This makes sure any page faults caused by accessing the data will occur before
|
|
|
|
// we tie down the cache.
|
2022-01-20 20:47:39 +03:00
|
|
|
auto buffered_data = TRY(ByteBuffer::create_uninitialized(count));
|
2021-12-26 01:44:04 +03:00
|
|
|
|
|
|
|
TRY(data.read(buffered_data.bytes()));
|
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
return m_cache.with_exclusive([&](auto& cache) -> ErrorOr<void> {
|
2021-08-08 00:50:37 +03:00
|
|
|
if (!allow_cache) {
|
|
|
|
flush_specific_block_if_needed(index);
|
2022-01-25 21:23:53 +03:00
|
|
|
u64 base_offset = index.value() * block_size() + offset;
|
2021-09-05 22:24:37 +03:00
|
|
|
auto nwritten = TRY(file_description().write(base_offset, data, count));
|
|
|
|
VERIFY(nwritten == count);
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2021-08-08 00:50:37 +03:00
|
|
|
}
|
2020-07-02 12:48:08 +03:00
|
|
|
|
2022-01-23 18:35:28 +03:00
|
|
|
auto entry = TRY(cache->ensure(index));
|
2021-08-08 00:50:37 +03:00
|
|
|
if (count < block_size()) {
|
|
|
|
// Fill the cache first.
|
2021-09-05 22:24:37 +03:00
|
|
|
TRY(read_block(index, nullptr, block_size()));
|
2021-08-08 00:50:37 +03:00
|
|
|
}
|
2022-01-23 18:35:28 +03:00
|
|
|
memcpy(entry->data + offset, buffered_data.data(), count);
|
2020-07-02 12:48:08 +03:00
|
|
|
|
2022-01-23 18:35:28 +03:00
|
|
|
cache->mark_dirty(*entry);
|
|
|
|
entry->has_data = true;
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2021-08-08 00:50:37 +03:00
|
|
|
});
|
2020-12-24 19:18:40 +03:00
|
|
|
}
|
|
|
|
|
2022-01-23 03:53:02 +03:00
|
|
|
ErrorOr<void> BlockBasedFileSystem::raw_read(BlockIndex index, UserOrKernelBuffer& buffer)
|
2020-07-02 12:48:08 +03:00
|
|
|
{
|
2021-07-05 22:38:17 +03:00
|
|
|
auto base_offset = index.value() * m_logical_block_size;
|
2022-01-23 03:53:02 +03:00
|
|
|
auto nread = TRY(file_description().read(buffer, base_offset, m_logical_block_size));
|
|
|
|
VERIFY(nread == m_logical_block_size);
|
|
|
|
return {};
|
2020-07-02 12:48:08 +03:00
|
|
|
}
|
2021-02-26 19:15:32 +03:00
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
ErrorOr<void> BlockBasedFileSystem::raw_write(BlockIndex index, UserOrKernelBuffer const& buffer)
|
2020-07-02 12:48:08 +03:00
|
|
|
{
|
2021-07-05 22:38:17 +03:00
|
|
|
auto base_offset = index.value() * m_logical_block_size;
|
2022-01-23 03:53:02 +03:00
|
|
|
auto nwritten = TRY(file_description().write(base_offset, buffer, m_logical_block_size));
|
|
|
|
VERIFY(nwritten == m_logical_block_size);
|
|
|
|
return {};
|
2020-07-02 12:48:08 +03:00
|
|
|
}
|
|
|
|
|
2022-01-23 03:53:02 +03:00
|
|
|
ErrorOr<void> BlockBasedFileSystem::raw_read_blocks(BlockIndex index, size_t count, UserOrKernelBuffer& buffer)
|
2020-07-02 12:48:08 +03:00
|
|
|
{
|
2020-09-12 06:11:07 +03:00
|
|
|
auto current = buffer;
|
2021-07-05 22:38:17 +03:00
|
|
|
for (auto block = index.value(); block < (index.value() + count); block++) {
|
2022-01-23 03:53:02 +03:00
|
|
|
TRY(raw_read(BlockIndex { block }, current));
|
2020-09-12 06:11:07 +03:00
|
|
|
current = current.offset(logical_block_size());
|
2020-07-02 12:48:08 +03:00
|
|
|
}
|
2022-01-23 03:53:02 +03:00
|
|
|
return {};
|
2020-07-02 12:48:08 +03:00
|
|
|
}
|
2021-02-26 19:15:32 +03:00
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
ErrorOr<void> BlockBasedFileSystem::raw_write_blocks(BlockIndex index, size_t count, UserOrKernelBuffer const& buffer)
|
2020-07-02 12:48:08 +03:00
|
|
|
{
|
2020-09-12 06:11:07 +03:00
|
|
|
auto current = buffer;
|
2021-07-05 22:38:17 +03:00
|
|
|
for (auto block = index.value(); block < (index.value() + count); block++) {
|
2022-01-23 03:53:02 +03:00
|
|
|
TRY(raw_write(block, current));
|
2020-09-12 06:11:07 +03:00
|
|
|
current = current.offset(logical_block_size());
|
2020-07-02 12:48:08 +03:00
|
|
|
}
|
2022-01-23 03:53:02 +03:00
|
|
|
return {};
|
2020-07-02 12:48:08 +03:00
|
|
|
}
|
|
|
|
|
2022-04-01 20:58:27 +03:00
|
|
|
ErrorOr<void> BlockBasedFileSystem::write_blocks(BlockIndex index, unsigned count, UserOrKernelBuffer const& data, bool allow_cache)
|
2020-07-02 12:48:08 +03:00
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(m_logical_block_size);
|
2021-02-12 15:51:34 +03:00
|
|
|
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::write_blocks {}, count={}", index, count);
|
2021-01-20 23:11:01 +03:00
|
|
|
for (unsigned i = 0; i < count; ++i) {
|
2021-09-05 22:24:37 +03:00
|
|
|
TRY(write_block(BlockIndex { index.value() + i }, data.offset(i * block_size()), block_size(), 0, allow_cache));
|
2021-01-20 23:11:01 +03:00
|
|
|
}
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2020-07-02 12:48:08 +03:00
|
|
|
}
|
|
|
|
|
2022-01-25 21:24:48 +03:00
|
|
|
ErrorOr<void> BlockBasedFileSystem::read_block(BlockIndex index, UserOrKernelBuffer* buffer, size_t count, u64 offset, bool allow_cache) const
|
2020-07-02 12:48:08 +03:00
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(m_logical_block_size);
|
|
|
|
VERIFY(offset + count <= block_size());
|
2021-02-12 15:51:34 +03:00
|
|
|
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::read_block {}", index);
|
2020-07-02 12:48:08 +03:00
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
return m_cache.with_exclusive([&](auto& cache) -> ErrorOr<void> {
|
2021-08-08 00:50:37 +03:00
|
|
|
if (!allow_cache) {
|
|
|
|
const_cast<BlockBasedFileSystem*>(this)->flush_specific_block_if_needed(index);
|
2022-01-25 21:24:48 +03:00
|
|
|
u64 base_offset = index.value() * block_size() + offset;
|
2021-09-05 22:24:37 +03:00
|
|
|
auto nread = TRY(file_description().read(*buffer, base_offset, count));
|
|
|
|
VERIFY(nread == count);
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2021-08-08 00:50:37 +03:00
|
|
|
}
|
2021-07-16 02:08:00 +03:00
|
|
|
|
2022-01-23 18:35:28 +03:00
|
|
|
auto* entry = TRY(cache->ensure(index));
|
|
|
|
if (!entry->has_data) {
|
2021-08-08 00:50:37 +03:00
|
|
|
auto base_offset = index.value() * block_size();
|
2022-01-23 18:35:28 +03:00
|
|
|
auto entry_data_buffer = UserOrKernelBuffer::for_kernel_buffer(entry->data);
|
2021-09-05 22:24:37 +03:00
|
|
|
auto nread = TRY(file_description().read(entry_data_buffer, base_offset, block_size()));
|
|
|
|
VERIFY(nread == block_size());
|
2022-01-23 18:35:28 +03:00
|
|
|
entry->has_data = true;
|
2021-08-08 00:50:37 +03:00
|
|
|
}
|
2021-09-07 13:09:52 +03:00
|
|
|
if (buffer)
|
2022-01-23 18:35:28 +03:00
|
|
|
TRY(buffer->write(entry->data + offset, count));
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2021-08-08 00:50:37 +03:00
|
|
|
});
|
2020-07-02 12:48:08 +03:00
|
|
|
}
|
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
ErrorOr<void> BlockBasedFileSystem::read_blocks(BlockIndex index, unsigned count, UserOrKernelBuffer& buffer, bool allow_cache) const
|
2020-07-02 12:48:08 +03:00
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(m_logical_block_size);
|
2020-07-02 12:48:08 +03:00
|
|
|
if (!count)
|
2021-01-21 01:11:17 +03:00
|
|
|
return EINVAL;
|
2020-07-02 12:48:08 +03:00
|
|
|
if (count == 1)
|
2020-12-29 02:05:37 +03:00
|
|
|
return read_block(index, &buffer, block_size(), 0, allow_cache);
|
2020-09-12 06:11:07 +03:00
|
|
|
auto out = buffer;
|
2020-07-02 12:48:08 +03:00
|
|
|
for (unsigned i = 0; i < count; ++i) {
|
2021-09-05 22:24:37 +03:00
|
|
|
TRY(read_block(BlockIndex { index.value() + i }, &out, block_size(), 0, allow_cache));
|
2020-09-12 06:11:07 +03:00
|
|
|
out = out.offset(block_size());
|
2020-07-02 12:48:08 +03:00
|
|
|
}
|
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
return {};
|
2020-07-02 12:48:08 +03:00
|
|
|
}
|
|
|
|
|
2021-07-11 01:34:36 +03:00
|
|
|
void BlockBasedFileSystem::flush_specific_block_if_needed(BlockIndex index)
|
2020-07-02 12:48:08 +03:00
|
|
|
{
|
2021-08-08 00:50:37 +03:00
|
|
|
m_cache.with_exclusive([&](auto& cache) {
|
|
|
|
if (!cache->is_dirty())
|
|
|
|
return;
|
2022-01-08 22:11:47 +03:00
|
|
|
auto* entry = cache->get(index);
|
|
|
|
if (!entry)
|
|
|
|
return;
|
|
|
|
if (!cache->entry_is_dirty(*entry))
|
|
|
|
return;
|
|
|
|
size_t base_offset = entry->block_index.value() * block_size();
|
|
|
|
auto entry_data_buffer = UserOrKernelBuffer::for_kernel_buffer(entry->data);
|
|
|
|
(void)file_description().write(base_offset, entry_data_buffer, block_size());
|
2020-07-02 12:48:08 +03:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2021-07-11 01:34:36 +03:00
|
|
|
void BlockBasedFileSystem::flush_writes_impl()
|
2020-07-02 12:48:08 +03:00
|
|
|
{
|
2021-08-08 00:50:37 +03:00
|
|
|
size_t count = 0;
|
|
|
|
m_cache.with_exclusive([&](auto& cache) {
|
|
|
|
if (!cache->is_dirty())
|
|
|
|
return;
|
|
|
|
cache->for_each_dirty_entry([&](CacheEntry& entry) {
|
|
|
|
auto base_offset = entry.block_index.value() * block_size();
|
|
|
|
auto entry_data_buffer = UserOrKernelBuffer::for_kernel_buffer(entry.data);
|
|
|
|
[[maybe_unused]] auto rc = file_description().write(base_offset, entry_data_buffer, block_size());
|
|
|
|
++count;
|
|
|
|
});
|
|
|
|
cache->mark_all_clean();
|
|
|
|
dbgln("{}: Flushed {} blocks to disk", class_name(), count);
|
2020-07-02 12:48:08 +03:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2021-07-11 01:34:36 +03:00
|
|
|
void BlockBasedFileSystem::flush_writes()
|
2020-07-02 12:48:08 +03:00
|
|
|
{
|
|
|
|
flush_writes_impl();
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|