/* * Copyright (c) 2018-2020, Andreas Kling * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include #include #include #include #include #include #include #include #include #include namespace Kernel { class Inode; class VMObject; enum class ShouldFlushTLB { No, Yes, }; class Region final : public InlineLinkedListNode , public Weakable , public PurgeablePageRanges { friend class MemoryManager; MAKE_SLAB_ALLOCATED(Region) public: enum Access : u8 { Read = 1, Write = 2, Execute = 4, HasBeenReadable = 16, HasBeenWritable = 32, HasBeenExecutable = 64, }; enum class Cacheable { No = 0, Yes, }; static NonnullOwnPtr create_user_accessible(Process*, const Range&, NonnullRefPtr, size_t offset_in_vmobject, String name, u8 access, Cacheable, bool shared); static NonnullOwnPtr create_kernel_only(const Range&, NonnullRefPtr, size_t offset_in_vmobject, String name, u8 access, Cacheable = Cacheable::Yes); ~Region(); const Range& range() const { return m_range; } VirtualAddress vaddr() const { return m_range.base(); } size_t size() const { return m_range.size(); } bool is_readable() const { return m_access & Access::Read; } bool is_writable() const { return m_access & Access::Write; } bool is_executable() const { return m_access & Access::Execute; } bool has_been_readable() const { return m_access & Access::HasBeenReadable; } bool has_been_writable() const { return m_access & Access::HasBeenWritable; } bool has_been_executable() const { return m_access & Access::HasBeenExecutable; } bool is_cacheable() const { return m_cacheable; } const String& name() const { return m_name; } unsigned access() const { return m_access; } void set_name(String name) { m_name = move(name); } const VMObject& vmobject() const { return *m_vmobject; } VMObject& vmobject() { return *m_vmobject; } void set_vmobject(NonnullRefPtr&&); bool is_shared() const { return m_shared; } void set_shared(bool shared) { m_shared = shared; } bool is_stack() const { return m_stack; } void set_stack(bool stack) { m_stack = stack; } bool is_mmap() const { return m_mmap; } void set_mmap(bool mmap) { m_mmap = mmap; } bool is_user() const { return !is_kernel(); } bool is_kernel() const { return vaddr().get() < 0x00800000 || vaddr().get() >= 0xc0000000; } PageFaultResponse handle_fault(const PageFault&, ScopedSpinLock&); OwnPtr clone(Process&); bool contains(VirtualAddress vaddr) const { return m_range.contains(vaddr); } bool contains(const Range& range) const { return m_range.contains(range); } unsigned page_index_from_address(VirtualAddress vaddr) const { return (vaddr - m_range.base()).get() / PAGE_SIZE; } VirtualAddress vaddr_from_page_index(size_t page_index) const { return vaddr().offset(page_index * PAGE_SIZE); } bool translate_vmobject_page(size_t& index) const { auto first_index = first_page_index(); if (index < first_index) { index = first_index; return false; } index -= first_index; auto total_page_count = this->page_count(); if (index >= total_page_count) { index = first_index + total_page_count - 1; return false; } return true; } bool translate_vmobject_page_range(size_t& index, size_t& page_count) const { auto first_index = first_page_index(); if (index < first_index) { auto delta = first_index - index; index = first_index; if (delta >= page_count) { page_count = 0; return false; } page_count -= delta; } index -= first_index; auto total_page_count = this->page_count(); if (index + page_count > total_page_count) { page_count = total_page_count - index; if (page_count == 0) return false; } return true; } ALWAYS_INLINE size_t translate_to_vmobject_page(size_t page_index) const { return first_page_index() + page_index; } size_t first_page_index() const { return m_offset_in_vmobject / PAGE_SIZE; } size_t page_count() const { return size() / PAGE_SIZE; } const PhysicalPage* physical_page(size_t index) const { VERIFY(index < page_count()); return vmobject().physical_pages()[first_page_index() + index]; } RefPtr& physical_page_slot(size_t index) { VERIFY(index < page_count()); return vmobject().physical_pages()[first_page_index() + index]; } size_t offset_in_vmobject() const { return m_offset_in_vmobject; } size_t offset_in_vmobject_from_vaddr(VirtualAddress vaddr) const { return m_offset_in_vmobject + vaddr.get() - this->vaddr().get(); } size_t amount_resident() const; size_t amount_shared() const; size_t amount_dirty() const; bool should_cow(size_t page_index) const; void set_should_cow(size_t page_index, bool); size_t cow_pages() const; void set_readable(bool b) { set_access_bit(Access::Read, b); } void set_writable(bool b) { set_access_bit(Access::Write, b); } void set_executable(bool b) { set_access_bit(Access::Execute, b); } void set_page_directory(PageDirectory&); bool map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes); enum class ShouldDeallocateVirtualMemoryRange { No, Yes, }; void unmap(ShouldDeallocateVirtualMemoryRange = ShouldDeallocateVirtualMemoryRange::Yes); void remap(); // For InlineLinkedListNode Region* m_next { nullptr }; Region* m_prev { nullptr }; bool remap_vmobject_page_range(size_t page_index, size_t page_count); bool is_volatile(VirtualAddress vaddr, size_t size) const; enum class SetVolatileError { Success = 0, NotPurgeable, OutOfMemory }; SetVolatileError set_volatile(VirtualAddress vaddr, size_t size, bool is_volatile, bool& was_purged); RefPtr get_owner(); bool is_syscall_region() const { return m_syscall_region; } void set_syscall_region(bool b) { m_syscall_region = b; } private: Region(const Range&, NonnullRefPtr, size_t offset_in_vmobject, String, u8 access, Cacheable, bool shared); bool do_remap_vmobject_page_range(size_t page_index, size_t page_count); void set_access_bit(Access access, bool b) { if (b) m_access |= access | (access << 4); else m_access &= ~access; } bool do_remap_vmobject_page(size_t index, bool with_flush = true); bool remap_vmobject_page(size_t index, bool with_flush = true); PageFaultResponse handle_cow_fault(size_t page_index); PageFaultResponse handle_inode_fault(size_t page_index, ScopedSpinLock&); PageFaultResponse handle_zero_fault(size_t page_index); bool map_individual_page_impl(size_t page_index); void register_purgeable_page_ranges(); void unregister_purgeable_page_ranges(); RefPtr m_page_directory; Range m_range; size_t m_offset_in_vmobject { 0 }; NonnullRefPtr m_vmobject; String m_name; u8 m_access { 0 }; bool m_shared : 1 { false }; bool m_cacheable : 1 { false }; bool m_stack : 1 { false }; bool m_mmap : 1 { false }; bool m_syscall_region : 1 { false }; WeakPtr m_owner; }; inline unsigned prot_to_region_access_flags(int prot) { unsigned access = 0; if (prot & PROT_READ) access |= Region::Access::Read; if (prot & PROT_WRITE) access |= Region::Access::Write; if (prot & PROT_EXEC) access |= Region::Access::Execute; return access; } inline int region_access_flags_to_prot(unsigned access) { int prot = 0; if (access & Region::Access::Read) prot |= PROT_READ; if (access & Region::Access::Write) prot |= PROT_WRITE; if (access & Region::Access::Execute) prot |= PROT_EXEC; return prot; } }