2020-02-28 22:20:35 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
*
|
2021-04-22 11:24:48 +03:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-02-28 22:20:35 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <Kernel/FileSystem/Inode.h>
|
2021-08-06 11:45:34 +03:00
|
|
|
#include <Kernel/Memory/InodeVMObject.h>
|
2020-02-28 22:20:35 +03:00
|
|
|
|
2021-08-06 14:49:36 +03:00
|
|
|
namespace Kernel::Memory {
|
2020-02-28 22:20:35 +03:00
|
|
|
|
2022-01-12 19:59:46 +03:00
|
|
|
InodeVMObject::InodeVMObject(Inode& inode, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
|
|
|
|
: VMObject(move(new_physical_pages))
|
2020-02-28 22:20:35 +03:00
|
|
|
, m_inode(inode)
|
2022-02-10 19:53:26 +03:00
|
|
|
, m_dirty_pages(Bitmap::try_create(page_count(), false).release_value_but_fixme_should_propagate_errors())
|
2020-02-28 22:20:35 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2022-01-12 19:59:46 +03:00
|
|
|
InodeVMObject::InodeVMObject(InodeVMObject const& other, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
|
|
|
|
: VMObject(move(new_physical_pages))
|
2020-02-28 22:20:35 +03:00
|
|
|
, m_inode(other.m_inode)
|
2022-02-10 19:53:26 +03:00
|
|
|
, m_dirty_pages(Bitmap::try_create(page_count(), false).release_value_but_fixme_should_propagate_errors())
|
2020-02-28 22:20:35 +03:00
|
|
|
{
|
2020-03-01 14:11:50 +03:00
|
|
|
for (size_t i = 0; i < page_count(); ++i)
|
|
|
|
m_dirty_pages.set(i, other.m_dirty_pages.get(i));
|
2020-02-28 22:20:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
InodeVMObject::~InodeVMObject()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t InodeVMObject::amount_clean() const
|
|
|
|
{
|
|
|
|
size_t count = 0;
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(page_count() == m_dirty_pages.size());
|
2020-02-28 22:20:35 +03:00
|
|
|
for (size_t i = 0; i < page_count(); ++i) {
|
|
|
|
if (!m_dirty_pages.get(i) && m_physical_pages[i])
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
return count * PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t InodeVMObject::amount_dirty() const
|
|
|
|
{
|
|
|
|
size_t count = 0;
|
|
|
|
for (size_t i = 0; i < m_dirty_pages.size(); ++i) {
|
|
|
|
if (m_dirty_pages.get(i))
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
return count * PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
int InodeVMObject::release_all_clean_pages()
|
|
|
|
{
|
2021-08-22 02:49:22 +03:00
|
|
|
SpinlockLocker locker(m_lock);
|
2020-02-28 22:20:35 +03:00
|
|
|
|
|
|
|
int count = 0;
|
|
|
|
for (size_t i = 0; i < page_count(); ++i) {
|
|
|
|
if (!m_dirty_pages.get(i) && m_physical_pages[i]) {
|
|
|
|
m_physical_pages[i] = nullptr;
|
|
|
|
++count;
|
|
|
|
}
|
|
|
|
}
|
2021-07-23 03:40:16 +03:00
|
|
|
if (count) {
|
|
|
|
for_each_region([](auto& region) {
|
|
|
|
region.remap();
|
|
|
|
});
|
|
|
|
}
|
2020-02-28 22:20:35 +03:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 InodeVMObject::writable_mappings() const
|
|
|
|
{
|
|
|
|
u32 count = 0;
|
|
|
|
const_cast<InodeVMObject&>(*this).for_each_region([&](auto& region) {
|
|
|
|
if (region.is_writable())
|
|
|
|
++count;
|
|
|
|
});
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 InodeVMObject::executable_mappings() const
|
|
|
|
{
|
|
|
|
u32 count = 0;
|
|
|
|
const_cast<InodeVMObject&>(*this).for_each_region([&](auto& region) {
|
|
|
|
if (region.is_executable())
|
|
|
|
++count;
|
|
|
|
});
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|