From 651417a08598a8899e14a7975574315d10eb385b Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Fri, 28 Feb 2020 20:20:35 +0100 Subject: [PATCH] Kernel: Split InodeVMObject into two subclasses We now have PrivateInodeVMObject and SharedInodeVMObject, corresponding to MAP_PRIVATE and MAP_SHARED respectively. Note that PrivateInodeVMObject is not used yet. --- Kernel/Makefile | 2 + Kernel/Process.cpp | 4 +- Kernel/VM/InodeVMObject.cpp | 182 +++++++++++++++++++++++++++++ Kernel/VM/InodeVMObject.h | 69 +++++++++++ Kernel/VM/PrivateInodeVMObject.cpp | 56 +++++++++ Kernel/VM/PrivateInodeVMObject.h | 51 ++++++++ Kernel/VM/Region.cpp | 2 +- Kernel/VM/SharedInodeVMObject.cpp | 142 +--------------------- Kernel/VM/SharedInodeVMObject.h | 29 +---- 9 files changed, 371 insertions(+), 166 deletions(-) create mode 100644 Kernel/VM/InodeVMObject.cpp create mode 100644 Kernel/VM/InodeVMObject.h create mode 100644 Kernel/VM/PrivateInodeVMObject.cpp create mode 100644 Kernel/VM/PrivateInodeVMObject.h diff --git a/Kernel/Makefile b/Kernel/Makefile index 767fdb78c1d..f86922cc79c 100644 --- a/Kernel/Makefile +++ b/Kernel/Makefile @@ -101,11 +101,13 @@ OBJS = \ TTY/VirtualConsole.o \ Thread.o \ VM/AnonymousVMObject.o \ + VM/InodeVMObject.o \ VM/MemoryManager.o \ VM/PageDirectory.o \ VM/PhysicalPage.o \ VM/PhysicalRegion.o \ VM/PurgeableVMObject.o \ + VM/PrivateInodeVMObject.o \ VM/RangeAllocator.o \ VM/Region.o \ VM/SharedInodeVMObject.o \ diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index c2a5101d529..51350d9d38b 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -663,12 +663,12 @@ int Process::sys$purge(int mode) } } if (mode & PURGE_ALL_CLEAN_INODE) { - NonnullRefPtrVector vmobjects; + NonnullRefPtrVector vmobjects; { InterruptDisabler disabler; MM.for_each_vmobject([&](auto& vmobject) { if (vmobject.is_inode()) - vmobjects.append(static_cast(vmobject)); + vmobjects.append(static_cast(vmobject)); return IterationDecision::Continue; }); } diff --git a/Kernel/VM/InodeVMObject.cpp b/Kernel/VM/InodeVMObject.cpp new file mode 100644 index 00000000000..0e027fe501a --- /dev/null +++ b/Kernel/VM/InodeVMObject.cpp @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2018-2020, Andreas Kling + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +namespace Kernel { + +InodeVMObject::InodeVMObject(Inode& inode, size_t size) + : VMObject(size) + , m_inode(inode) + , m_dirty_pages(page_count(), false) +{ +} + +InodeVMObject::InodeVMObject(const InodeVMObject& other) + : VMObject(other) + , m_inode(other.m_inode) +{ +} + +InodeVMObject::~InodeVMObject() +{ +} + +size_t InodeVMObject::amount_clean() const +{ + size_t count = 0; + ASSERT(page_count() == (size_t)m_dirty_pages.size()); + for (size_t i = 0; i < page_count(); ++i) { + if (!m_dirty_pages.get(i) && m_physical_pages[i]) + ++count; + } + return count * PAGE_SIZE; +} + +size_t InodeVMObject::amount_dirty() const +{ + size_t count = 0; + for (size_t i = 0; i < m_dirty_pages.size(); ++i) { + if (m_dirty_pages.get(i)) + ++count; + } + return count * PAGE_SIZE; +} + +void InodeVMObject::inode_size_changed(Badge, size_t old_size, size_t new_size) +{ + dbg() << "VMObject::inode_size_changed: {" << m_inode->fsid() << ":" << m_inode->index() << "} " << old_size << " -> " << new_size; + + InterruptDisabler disabler; + + auto new_page_count = PAGE_ROUND_UP(new_size) / PAGE_SIZE; + m_physical_pages.resize(new_page_count); + + m_dirty_pages.grow(new_page_count, false); + + // FIXME: Consolidate with inode_contents_changed() so we only do a single walk. + for_each_region([](auto& region) { + region.remap(); + }); +} + +void InodeVMObject::inode_contents_changed(Badge, off_t offset, ssize_t size, const u8* data) +{ + (void)size; + (void)data; + InterruptDisabler disabler; + ASSERT(offset >= 0); + + // FIXME: Only invalidate the parts that actually changed. + for (auto& physical_page : m_physical_pages) + physical_page = nullptr; + +#if 0 + size_t current_offset = offset; + size_t remaining_bytes = size; + const u8* data_ptr = data; + + auto to_page_index = [] (size_t offset) -> size_t { + return offset / PAGE_SIZE; + }; + + if (current_offset & PAGE_MASK) { + size_t page_index = to_page_index(current_offset); + size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK)); + if (m_physical_pages[page_index]) { + auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]); + memcpy(ptr, data_ptr, bytes_to_copy); + MM.unquickmap_page(); + } + current_offset += bytes_to_copy; + data += bytes_to_copy; + remaining_bytes -= bytes_to_copy; + } + + for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) { + size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK); + if (m_physical_pages[page_index]) { + auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]); + memcpy(ptr, data_ptr, bytes_to_copy); + MM.unquickmap_page(); + } + current_offset += bytes_to_copy; + data += bytes_to_copy; + } +#endif + + // FIXME: Consolidate with inode_size_changed() so we only do a single walk. + for_each_region([](auto& region) { + region.remap(); + }); +} + +int InodeVMObject::release_all_clean_pages() +{ + LOCKER(m_paging_lock); + return release_all_clean_pages_impl(); +} + +int InodeVMObject::release_all_clean_pages_impl() +{ + int count = 0; + InterruptDisabler disabler; + for (size_t i = 0; i < page_count(); ++i) { + if (!m_dirty_pages.get(i) && m_physical_pages[i]) { + m_physical_pages[i] = nullptr; + ++count; + } + } + for_each_region([](auto& region) { + region.remap(); + }); + return count; +} + +u32 InodeVMObject::writable_mappings() const +{ + u32 count = 0; + const_cast(*this).for_each_region([&](auto& region) { + if (region.is_writable()) + ++count; + }); + return count; +} + +u32 InodeVMObject::executable_mappings() const +{ + u32 count = 0; + const_cast(*this).for_each_region([&](auto& region) { + if (region.is_executable()) + ++count; + }); + return count; +} + +} diff --git a/Kernel/VM/InodeVMObject.h b/Kernel/VM/InodeVMObject.h new file mode 100644 index 00000000000..5ec9013bacb --- /dev/null +++ b/Kernel/VM/InodeVMObject.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2018-2020, Andreas Kling + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include +#include +#include + +namespace Kernel { + +class InodeVMObject : public VMObject { +public: + virtual ~InodeVMObject() override; + + Inode& inode() { return *m_inode; } + const Inode& inode() const { return *m_inode; } + + void inode_contents_changed(Badge, off_t, ssize_t, const u8*); + void inode_size_changed(Badge, size_t old_size, size_t new_size); + + size_t amount_dirty() const; + size_t amount_clean() const; + + int release_all_clean_pages(); + + u32 writable_mappings() const; + u32 executable_mappings() const; + +protected: + explicit InodeVMObject(Inode&, size_t); + explicit InodeVMObject(const InodeVMObject&); + + InodeVMObject& operator=(const InodeVMObject&) = delete; + InodeVMObject& operator=(InodeVMObject&&) = delete; + InodeVMObject(InodeVMObject&&) = delete; + + virtual bool is_inode() const final { return true; } + + int release_all_clean_pages_impl(); + + NonnullRefPtr m_inode; + Bitmap m_dirty_pages; +}; + +} diff --git a/Kernel/VM/PrivateInodeVMObject.cpp b/Kernel/VM/PrivateInodeVMObject.cpp new file mode 100644 index 00000000000..66ca77a7235 --- /dev/null +++ b/Kernel/VM/PrivateInodeVMObject.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2020, Andreas Kling + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +namespace Kernel { + +NonnullRefPtr PrivateInodeVMObject::create_with_inode(Inode& inode) +{ + return adopt(*new PrivateInodeVMObject(inode, inode.size())); +} + +NonnullRefPtr PrivateInodeVMObject::clone() +{ + return adopt(*new PrivateInodeVMObject(*this)); +} + +PrivateInodeVMObject::PrivateInodeVMObject(Inode& inode, size_t size) + : InodeVMObject(inode, size) +{ +} + +PrivateInodeVMObject::PrivateInodeVMObject(const PrivateInodeVMObject& other) + : InodeVMObject(other) +{ +} + +PrivateInodeVMObject::~PrivateInodeVMObject() +{ +} + +} diff --git a/Kernel/VM/PrivateInodeVMObject.h b/Kernel/VM/PrivateInodeVMObject.h new file mode 100644 index 00000000000..4a1d8a87a49 --- /dev/null +++ b/Kernel/VM/PrivateInodeVMObject.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2020, Andreas Kling + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include +#include +#include + +namespace Kernel { + +class PrivateInodeVMObject final : public InodeVMObject { + AK_MAKE_NONMOVABLE(PrivateInodeVMObject); + +public: + virtual ~PrivateInodeVMObject() override; + + static NonnullRefPtr create_with_inode(Inode&); + virtual NonnullRefPtr clone() override; + +private: + explicit PrivateInodeVMObject(Inode&, size_t); + explicit PrivateInodeVMObject(const PrivateInodeVMObject&); + + PrivateInodeVMObject& operator=(const PrivateInodeVMObject&) = delete; +}; + +} diff --git a/Kernel/VM/Region.cpp b/Kernel/VM/Region.cpp index 70916b76720..070f58afaf0 100644 --- a/Kernel/VM/Region.cpp +++ b/Kernel/VM/Region.cpp @@ -452,7 +452,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region) LOCKER(vmobject().m_paging_lock); cli(); - auto& inode_vmobject = static_cast(vmobject()); + auto& inode_vmobject = static_cast(vmobject()); auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[first_page_index() + page_index_in_region]; #ifdef PAGE_FAULT_DEBUG diff --git a/Kernel/VM/SharedInodeVMObject.cpp b/Kernel/VM/SharedInodeVMObject.cpp index 55e3de8c6d2..c1fb4c09263 100644 --- a/Kernel/VM/SharedInodeVMObject.cpp +++ b/Kernel/VM/SharedInodeVMObject.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020, Andreas Kling + * Copyright (c) 2020, Andreas Kling * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -25,9 +25,9 @@ */ #include -#include #include #include +#include namespace Kernel { @@ -47,15 +47,12 @@ NonnullRefPtr SharedInodeVMObject::clone() } SharedInodeVMObject::SharedInodeVMObject(Inode& inode, size_t size) - : VMObject(size) - , m_inode(inode) - , m_dirty_pages(page_count(), false) + : InodeVMObject(inode, size) { } SharedInodeVMObject::SharedInodeVMObject(const SharedInodeVMObject& other) - : VMObject(other) - , m_inode(other.m_inode) + : InodeVMObject(other) { } @@ -64,135 +61,4 @@ SharedInodeVMObject::~SharedInodeVMObject() ASSERT(inode().shared_vmobject() == this); } -size_t SharedInodeVMObject::amount_clean() const -{ - size_t count = 0; - ASSERT(page_count() == (size_t)m_dirty_pages.size()); - for (size_t i = 0; i < page_count(); ++i) { - if (!m_dirty_pages.get(i) && m_physical_pages[i]) - ++count; - } - return count * PAGE_SIZE; -} - -size_t SharedInodeVMObject::amount_dirty() const -{ - size_t count = 0; - for (size_t i = 0; i < m_dirty_pages.size(); ++i) { - if (m_dirty_pages.get(i)) - ++count; - } - return count * PAGE_SIZE; -} - -void SharedInodeVMObject::inode_size_changed(Badge, size_t old_size, size_t new_size) -{ - dbg() << "VMObject::inode_size_changed: {" << m_inode->fsid() << ":" << m_inode->index() << "} " << old_size << " -> " << new_size; - - InterruptDisabler disabler; - - auto new_page_count = PAGE_ROUND_UP(new_size) / PAGE_SIZE; - m_physical_pages.resize(new_page_count); - - m_dirty_pages.grow(new_page_count, false); - - // FIXME: Consolidate with inode_contents_changed() so we only do a single walk. - for_each_region([](auto& region) { - region.remap(); - }); -} - -void SharedInodeVMObject::inode_contents_changed(Badge, off_t offset, ssize_t size, const u8* data) -{ - (void)size; - (void)data; - InterruptDisabler disabler; - ASSERT(offset >= 0); - - // FIXME: Only invalidate the parts that actually changed. - for (auto& physical_page : m_physical_pages) - physical_page = nullptr; - -#if 0 - size_t current_offset = offset; - size_t remaining_bytes = size; - const u8* data_ptr = data; - - auto to_page_index = [] (size_t offset) -> size_t { - return offset / PAGE_SIZE; - }; - - if (current_offset & PAGE_MASK) { - size_t page_index = to_page_index(current_offset); - size_t bytes_to_copy = min(size, PAGE_SIZE - (current_offset & PAGE_MASK)); - if (m_physical_pages[page_index]) { - auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]); - memcpy(ptr, data_ptr, bytes_to_copy); - MM.unquickmap_page(); - } - current_offset += bytes_to_copy; - data += bytes_to_copy; - remaining_bytes -= bytes_to_copy; - } - - for (size_t page_index = to_page_index(current_offset); page_index < m_physical_pages.size(); ++page_index) { - size_t bytes_to_copy = PAGE_SIZE - (current_offset & PAGE_MASK); - if (m_physical_pages[page_index]) { - auto* ptr = MM.quickmap_page(*m_physical_pages[page_index]); - memcpy(ptr, data_ptr, bytes_to_copy); - MM.unquickmap_page(); - } - current_offset += bytes_to_copy; - data += bytes_to_copy; - } -#endif - - // FIXME: Consolidate with inode_size_changed() so we only do a single walk. - for_each_region([](auto& region) { - region.remap(); - }); -} - -int SharedInodeVMObject::release_all_clean_pages() -{ - LOCKER(m_paging_lock); - return release_all_clean_pages_impl(); -} - -int SharedInodeVMObject::release_all_clean_pages_impl() -{ - int count = 0; - InterruptDisabler disabler; - for (size_t i = 0; i < page_count(); ++i) { - if (!m_dirty_pages.get(i) && m_physical_pages[i]) { - m_physical_pages[i] = nullptr; - ++count; - } - } - for_each_region([](auto& region) { - region.remap(); - }); - return count; -} - -u32 SharedInodeVMObject::writable_mappings() const -{ - u32 count = 0; - const_cast(*this).for_each_region([&](auto& region) { - if (region.is_writable()) - ++count; - }); - return count; -} - -u32 SharedInodeVMObject::executable_mappings() const -{ - u32 count = 0; - const_cast(*this).for_each_region([&](auto& region) { - if (region.is_executable()) - ++count; - }); - return count; -} - } diff --git a/Kernel/VM/SharedInodeVMObject.h b/Kernel/VM/SharedInodeVMObject.h index fd5f74a99c9..c8c33206c93 100644 --- a/Kernel/VM/SharedInodeVMObject.h +++ b/Kernel/VM/SharedInodeVMObject.h @@ -28,45 +28,24 @@ #include #include -#include +#include namespace Kernel { -class SharedInodeVMObject final : public VMObject { +class SharedInodeVMObject final : public InodeVMObject { + AK_MAKE_NONMOVABLE(SharedInodeVMObject); + public: virtual ~SharedInodeVMObject() override; static NonnullRefPtr create_with_inode(Inode&); virtual NonnullRefPtr clone() override; - Inode& inode() { return *m_inode; } - const Inode& inode() const { return *m_inode; } - - void inode_contents_changed(Badge, off_t, ssize_t, const u8*); - void inode_size_changed(Badge, size_t old_size, size_t new_size); - - size_t amount_dirty() const; - size_t amount_clean() const; - - int release_all_clean_pages(); - - u32 writable_mappings() const; - u32 executable_mappings() const; - private: explicit SharedInodeVMObject(Inode&, size_t); explicit SharedInodeVMObject(const SharedInodeVMObject&); SharedInodeVMObject& operator=(const SharedInodeVMObject&) = delete; - SharedInodeVMObject& operator=(SharedInodeVMObject&&) = delete; - SharedInodeVMObject(SharedInodeVMObject&&) = delete; - - virtual bool is_inode() const override { return true; } - - int release_all_clean_pages_impl(); - - NonnullRefPtr m_inode; - Bitmap m_dirty_pages; }; }