2020-02-28 22:20:35 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
*
|
2021-04-22 11:24:48 +03:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-02-28 22:20:35 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <Kernel/FileSystem/Inode.h>
|
2021-08-06 11:45:34 +03:00
|
|
|
#include <Kernel/Memory/InodeVMObject.h>
|
2020-02-28 22:20:35 +03:00
|
|
|
|
2021-08-06 14:49:36 +03:00
|
|
|
namespace Kernel::Memory {
|
2020-02-28 22:20:35 +03:00
|
|
|
|
2022-08-24 16:56:26 +03:00
|
|
|
InodeVMObject::InodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
2022-01-12 19:59:46 +03:00
|
|
|
: VMObject(move(new_physical_pages))
|
2020-02-28 22:20:35 +03:00
|
|
|
, m_inode(inode)
|
2022-02-10 20:39:17 +03:00
|
|
|
, m_dirty_pages(move(dirty_pages))
|
2020-02-28 22:20:35 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2022-08-24 16:56:26 +03:00
|
|
|
InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages, Bitmap dirty_pages)
|
2022-01-12 19:59:46 +03:00
|
|
|
: VMObject(move(new_physical_pages))
|
2020-02-28 22:20:35 +03:00
|
|
|
, m_inode(other.m_inode)
|
2022-02-10 20:39:17 +03:00
|
|
|
, m_dirty_pages(move(dirty_pages))
|
2020-02-28 22:20:35 +03:00
|
|
|
{
|
2020-03-01 14:11:50 +03:00
|
|
|
for (size_t i = 0; i < page_count(); ++i)
|
|
|
|
m_dirty_pages.set(i, other.m_dirty_pages.get(i));
|
2020-02-28 22:20:35 +03:00
|
|
|
}
|
|
|
|
|
2022-03-16 22:15:15 +03:00
|
|
|
InodeVMObject::~InodeVMObject() = default;
|
2020-02-28 22:20:35 +03:00
|
|
|
|
|
|
|
size_t InodeVMObject::amount_clean() const
|
|
|
|
{
|
|
|
|
size_t count = 0;
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(page_count() == m_dirty_pages.size());
|
2020-02-28 22:20:35 +03:00
|
|
|
for (size_t i = 0; i < page_count(); ++i) {
|
|
|
|
if (!m_dirty_pages.get(i) && m_physical_pages[i])
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
return count * PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t InodeVMObject::amount_dirty() const
|
|
|
|
{
|
|
|
|
size_t count = 0;
|
|
|
|
for (size_t i = 0; i < m_dirty_pages.size(); ++i) {
|
|
|
|
if (m_dirty_pages.get(i))
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
return count * PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
int InodeVMObject::release_all_clean_pages()
|
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker locker(m_lock);
|
2020-02-28 22:20:35 +03:00
|
|
|
|
|
|
|
int count = 0;
|
|
|
|
for (size_t i = 0; i < page_count(); ++i) {
|
|
|
|
if (!m_dirty_pages.get(i) && m_physical_pages[i]) {
|
|
|
|
m_physical_pages[i] = nullptr;
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
}
|
2021-07-23 03:40:16 +03:00
|
|
|
if (count) {
|
|
|
|
for_each_region([](auto& region) {
|
|
|
|
region.remap();
|
|
|
|
});
|
|
|
|
}
|
2020-02-28 22:20:35 +03:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2022-08-15 06:05:42 +03:00
|
|
|
int InodeVMObject::try_release_clean_pages(int page_amount)
|
|
|
|
{
|
|
|
|
SpinlockLocker locker(m_lock);
|
|
|
|
|
|
|
|
int count = 0;
|
|
|
|
for (size_t i = 0; i < page_count() && count < page_amount; ++i) {
|
|
|
|
if (!m_dirty_pages.get(i) && m_physical_pages[i]) {
|
|
|
|
m_physical_pages[i] = nullptr;
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (count) {
|
|
|
|
for_each_region([](auto& region) {
|
|
|
|
region.remap();
|
|
|
|
});
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2020-02-28 22:20:35 +03:00
|
|
|
u32 InodeVMObject::writable_mappings() const
|
|
|
|
{
|
|
|
|
u32 count = 0;
|
|
|
|
const_cast<InodeVMObject&>(*this).for_each_region([&](auto& region) {
|
|
|
|
if (region.is_writable())
|
|
|
|
++count;
|
|
|
|
});
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|