2020-01-18 11:38:21 +03:00
|
|
|
/*
|
2022-01-16 15:10:05 +03:00
|
|
|
* Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
|
2020-01-18 11:38:21 +03:00
|
|
|
*
|
2021-04-22 11:24:48 +03:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 11:38:21 +03:00
|
|
|
*/
|
|
|
|
|
2018-10-18 00:13:55 +03:00
|
|
|
#pragma once
|
|
|
|
|
2022-01-23 15:30:33 +03:00
|
|
|
#include <AK/Badge.h>
|
2021-05-16 12:36:52 +03:00
|
|
|
#include <AK/Concepts.h>
|
2019-05-28 12:53:16 +03:00
|
|
|
#include <AK/HashTable.h>
|
2022-01-16 15:10:05 +03:00
|
|
|
#include <AK/IntrusiveRedBlackTree.h>
|
2021-07-12 23:52:17 +03:00
|
|
|
#include <AK/NonnullOwnPtrVector.h>
|
2020-02-16 03:50:16 +03:00
|
|
|
#include <Kernel/Forward.h>
|
2022-08-19 21:53:40 +03:00
|
|
|
#include <Kernel/Library/NonnullLockRefPtrVector.h>
|
2021-08-22 02:37:17 +03:00
|
|
|
#include <Kernel/Locking/Spinlock.h>
|
2021-08-06 11:45:34 +03:00
|
|
|
#include <Kernel/Memory/AllocationStrategy.h>
|
|
|
|
#include <Kernel/Memory/PhysicalPage.h>
|
|
|
|
#include <Kernel/Memory/PhysicalRegion.h>
|
|
|
|
#include <Kernel/Memory/Region.h>
|
2022-04-03 14:28:16 +03:00
|
|
|
#include <Kernel/Memory/RegionTree.h>
|
2021-08-06 11:45:34 +03:00
|
|
|
#include <Kernel/Memory/VMObject.h>
|
2018-10-18 14:05:00 +03:00
|
|
|
|
2021-12-01 20:14:03 +03:00
|
|
|
namespace Kernel {
|
|
|
|
class PageDirectoryEntry;
|
2022-04-03 01:56:20 +03:00
|
|
|
class PageTableEntry;
|
2021-12-01 20:14:03 +03:00
|
|
|
}
|
|
|
|
|
2021-12-25 19:23:18 +03:00
|
|
|
struct KmallocGlobalData;
|
|
|
|
|
2021-08-06 14:49:36 +03:00
|
|
|
namespace Kernel::Memory {
|
2020-02-16 03:27:42 +03:00
|
|
|
|
2021-12-24 17:22:11 +03:00
|
|
|
ErrorOr<FlatPtr> page_round_up(FlatPtr x);
|
2021-02-14 11:57:19 +03:00
|
|
|
|
|
|
|
constexpr FlatPtr page_round_down(FlatPtr x)
|
|
|
|
{
|
|
|
|
return ((FlatPtr)(x)) & ~(PAGE_SIZE - 1);
|
|
|
|
}
|
2019-01-13 02:27:25 +03:00
|
|
|
|
2021-06-17 14:05:37 +03:00
|
|
|
inline FlatPtr virtual_to_low_physical(FlatPtr virtual_)
|
2020-01-17 21:59:20 +03:00
|
|
|
{
|
2021-07-22 14:05:04 +03:00
|
|
|
return virtual_ - physical_to_virtual_offset;
|
2020-01-17 21:59:20 +03:00
|
|
|
}
|
|
|
|
|
2021-08-07 00:42:53 +03:00
|
|
|
enum class UsedMemoryRangeType {
|
2021-01-19 21:13:03 +03:00
|
|
|
LowMemory = 0,
|
|
|
|
Kernel,
|
|
|
|
BootModule,
|
2021-07-08 04:50:05 +03:00
|
|
|
PhysicalPages,
|
2021-12-19 03:15:12 +03:00
|
|
|
__Count
|
2021-01-19 21:13:03 +03:00
|
|
|
};
|
|
|
|
|
2021-08-07 00:42:53 +03:00
|
|
|
static constexpr StringView UserMemoryRangeTypeNames[] {
|
2022-07-11 20:32:29 +03:00
|
|
|
"Low memory"sv,
|
|
|
|
"Kernel"sv,
|
|
|
|
"Boot module"sv,
|
|
|
|
"Physical Pages"sv
|
2021-01-19 21:13:03 +03:00
|
|
|
};
|
2021-12-19 03:15:12 +03:00
|
|
|
static_assert(array_size(UserMemoryRangeTypeNames) == to_underlying(UsedMemoryRangeType::__Count));
|
2021-01-19 21:13:03 +03:00
|
|
|
|
2021-08-07 00:42:53 +03:00
|
|
|
struct UsedMemoryRange {
|
|
|
|
UsedMemoryRangeType type {};
|
2021-01-19 21:13:03 +03:00
|
|
|
PhysicalAddress start;
|
|
|
|
PhysicalAddress end;
|
|
|
|
};
|
|
|
|
|
2021-08-07 00:42:53 +03:00
|
|
|
struct ContiguousReservedMemoryRange {
|
2021-01-29 15:03:25 +03:00
|
|
|
PhysicalAddress start;
|
2021-07-07 06:35:15 +03:00
|
|
|
PhysicalSize length {};
|
2021-01-29 15:03:25 +03:00
|
|
|
};
|
|
|
|
|
2021-08-07 00:42:53 +03:00
|
|
|
enum class PhysicalMemoryRangeType {
|
2021-01-29 15:03:25 +03:00
|
|
|
Usable = 0,
|
|
|
|
Reserved,
|
|
|
|
ACPI_Reclaimable,
|
|
|
|
ACPI_NVS,
|
|
|
|
BadMemory,
|
2021-02-12 00:02:39 +03:00
|
|
|
Unknown,
|
2021-01-29 15:03:25 +03:00
|
|
|
};
|
|
|
|
|
2021-08-07 00:42:53 +03:00
|
|
|
struct PhysicalMemoryRange {
|
|
|
|
PhysicalMemoryRangeType type { PhysicalMemoryRangeType::Unknown };
|
2021-01-29 15:03:25 +03:00
|
|
|
PhysicalAddress start;
|
2021-07-07 06:35:15 +03:00
|
|
|
PhysicalSize length {};
|
2021-01-29 15:03:25 +03:00
|
|
|
};
|
|
|
|
|
2021-08-06 14:49:36 +03:00
|
|
|
#define MM Kernel::Memory::MemoryManager::the()
|
2018-10-27 15:56:52 +03:00
|
|
|
|
2020-06-29 01:04:35 +03:00
|
|
|
struct MemoryManagerData {
|
2021-07-27 15:30:26 +03:00
|
|
|
static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::MemoryManager; }
|
|
|
|
|
2022-08-18 22:46:28 +03:00
|
|
|
Spinlock m_quickmap_in_use { LockRank::None };
|
2022-08-23 22:42:30 +03:00
|
|
|
InterruptsState m_quickmap_previous_interrupts_state;
|
2020-06-29 01:04:35 +03:00
|
|
|
};
|
|
|
|
|
2021-08-04 23:49:13 +03:00
|
|
|
// This class represents a set of committed physical pages.
|
|
|
|
// When you ask MemoryManager to commit pages for you, you get one of these in return.
|
|
|
|
// You can allocate pages from it via `take_one()`
|
|
|
|
// It will uncommit any (unallocated) remaining pages when destroyed.
|
|
|
|
class CommittedPhysicalPageSet {
|
|
|
|
AK_MAKE_NONCOPYABLE(CommittedPhysicalPageSet);
|
|
|
|
|
|
|
|
public:
|
|
|
|
CommittedPhysicalPageSet(Badge<MemoryManager>, size_t page_count)
|
|
|
|
: m_page_count(page_count)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
CommittedPhysicalPageSet(CommittedPhysicalPageSet&& other)
|
|
|
|
: m_page_count(exchange(other.m_page_count, 0))
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
~CommittedPhysicalPageSet();
|
|
|
|
|
|
|
|
bool is_empty() const { return m_page_count == 0; }
|
|
|
|
size_t page_count() const { return m_page_count; }
|
|
|
|
|
2022-08-24 16:56:26 +03:00
|
|
|
[[nodiscard]] NonnullRefPtr<PhysicalPage> take_one();
|
2021-08-05 18:14:13 +03:00
|
|
|
void uncommit_one();
|
2021-08-04 23:49:13 +03:00
|
|
|
|
|
|
|
void operator=(CommittedPhysicalPageSet&&) = delete;
|
|
|
|
|
|
|
|
private:
|
|
|
|
size_t m_page_count { 0 };
|
|
|
|
};
|
|
|
|
|
2018-10-18 00:13:55 +03:00
|
|
|
class MemoryManager {
|
2021-07-14 00:21:22 +03:00
|
|
|
friend class PageDirectory;
|
2020-09-06 00:52:14 +03:00
|
|
|
friend class AnonymousVMObject;
|
2018-11-08 16:35:30 +03:00
|
|
|
friend class Region;
|
2022-04-03 16:27:47 +03:00
|
|
|
friend class RegionTree;
|
2018-11-08 23:20:09 +03:00
|
|
|
friend class VMObject;
|
2021-12-25 19:23:18 +03:00
|
|
|
friend struct ::KmallocGlobalData;
|
2019-05-28 12:53:16 +03:00
|
|
|
|
2018-10-18 00:13:55 +03:00
|
|
|
public:
|
2019-07-16 14:44:41 +03:00
|
|
|
static MemoryManager& the();
|
2020-08-30 01:41:30 +03:00
|
|
|
static bool is_initialized();
|
2018-10-18 00:13:55 +03:00
|
|
|
|
2020-06-29 01:04:35 +03:00
|
|
|
static void initialize(u32 cpu);
|
2020-09-18 10:49:51 +03:00
|
|
|
|
2020-06-29 01:04:35 +03:00
|
|
|
static inline MemoryManagerData& get_data()
|
|
|
|
{
|
2021-07-27 15:30:26 +03:00
|
|
|
return ProcessorSpecific<MemoryManagerData>::get();
|
2020-06-29 01:04:35 +03:00
|
|
|
}
|
2018-10-18 14:05:00 +03:00
|
|
|
|
2021-07-14 14:31:21 +03:00
|
|
|
PageFaultResponse handle_page_fault(PageFault const&);
|
2018-10-18 14:05:00 +03:00
|
|
|
|
2021-03-11 15:03:40 +03:00
|
|
|
void set_page_writable_direct(VirtualAddress, bool);
|
|
|
|
|
2021-02-14 19:35:07 +03:00
|
|
|
void protect_readonly_after_init_memory();
|
2021-12-19 03:15:12 +03:00
|
|
|
void unmap_prekernel();
|
2021-07-16 10:50:34 +03:00
|
|
|
void unmap_text_after_init();
|
2021-12-17 11:52:06 +03:00
|
|
|
void protect_ksyms_after_init();
|
2021-02-14 19:35:07 +03:00
|
|
|
|
2021-09-06 18:11:33 +03:00
|
|
|
static void enter_process_address_space(Process&);
|
|
|
|
static void enter_address_space(AddressSpace&);
|
2018-11-01 11:01:51 +03:00
|
|
|
|
2021-08-06 14:57:39 +03:00
|
|
|
bool validate_user_stack(AddressSpace&, VirtualAddress) const;
|
2020-02-21 15:05:39 +03:00
|
|
|
|
2019-06-07 18:13:23 +03:00
|
|
|
enum class ShouldZeroFill {
|
2019-05-28 12:53:16 +03:00
|
|
|
No,
|
|
|
|
Yes
|
|
|
|
};
|
2019-01-31 05:57:06 +03:00
|
|
|
|
2022-07-14 15:27:22 +03:00
|
|
|
ErrorOr<CommittedPhysicalPageSet> commit_physical_pages(size_t page_count);
|
|
|
|
void uncommit_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count);
|
2021-08-04 23:49:13 +03:00
|
|
|
|
2022-08-24 16:56:26 +03:00
|
|
|
NonnullRefPtr<PhysicalPage> allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
|
|
|
|
ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
|
|
|
|
ErrorOr<NonnullRefPtrVector<PhysicalPage>> allocate_contiguous_physical_pages(size_t size);
|
2021-07-12 00:12:32 +03:00
|
|
|
void deallocate_physical_page(PhysicalAddress);
|
2018-11-05 12:23:00 +03:00
|
|
|
|
2021-11-08 02:51:39 +03:00
|
|
|
ErrorOr<NonnullOwnPtr<Region>> allocate_contiguous_kernel_region(size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
2022-08-24 16:56:26 +03:00
|
|
|
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page);
|
2022-01-08 19:34:09 +03:00
|
|
|
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_page(StringView name, Memory::Region::Access access);
|
2022-08-24 16:56:26 +03:00
|
|
|
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, NonnullRefPtrVector<Memory::PhysicalPage>& dma_buffer_pages);
|
2022-01-08 19:34:09 +03:00
|
|
|
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access);
|
2021-11-08 02:51:39 +03:00
|
|
|
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
|
|
|
|
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
|
|
|
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
2022-04-05 13:40:31 +03:00
|
|
|
ErrorOr<NonnullOwnPtr<Region>> allocate_unbacked_region_anywhere(size_t size, size_t alignment);
|
2022-04-05 13:37:11 +03:00
|
|
|
ErrorOr<NonnullOwnPtr<Region>> create_identity_mapped_region(PhysicalAddress, size_t);
|
2019-05-14 12:51:00 +03:00
|
|
|
|
2021-07-07 06:35:15 +03:00
|
|
|
struct SystemMemoryInfo {
|
2022-07-14 15:27:22 +03:00
|
|
|
PhysicalSize physical_pages { 0 };
|
|
|
|
PhysicalSize physical_pages_used { 0 };
|
|
|
|
PhysicalSize physical_pages_committed { 0 };
|
|
|
|
PhysicalSize physical_pages_uncommitted { 0 };
|
2021-07-07 06:35:15 +03:00
|
|
|
};
|
|
|
|
|
2022-08-25 17:46:13 +03:00
|
|
|
SystemMemoryInfo get_system_memory_info();
|
2019-06-11 14:13:02 +03:00
|
|
|
|
2021-05-16 12:36:52 +03:00
|
|
|
template<IteratorFunction<VMObject&> Callback>
|
2019-08-08 11:43:44 +03:00
|
|
|
static void for_each_vmobject(Callback callback)
|
|
|
|
{
|
2021-08-16 23:54:25 +03:00
|
|
|
VMObject::all_instances().with([&](auto& list) {
|
|
|
|
for (auto& vmobject : list) {
|
|
|
|
if (callback(vmobject) == IterationDecision::Break)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
});
|
2019-08-08 11:43:44 +03:00
|
|
|
}
|
|
|
|
|
2021-05-16 12:36:52 +03:00
|
|
|
template<VoidFunction<VMObject&> Callback>
|
|
|
|
static void for_each_vmobject(Callback callback)
|
|
|
|
{
|
2021-08-16 23:54:25 +03:00
|
|
|
VMObject::all_instances().with([&](auto& list) {
|
|
|
|
for (auto& vmobject : list) {
|
|
|
|
callback(vmobject);
|
|
|
|
}
|
|
|
|
});
|
2021-05-16 12:36:52 +03:00
|
|
|
}
|
|
|
|
|
2021-08-06 14:57:39 +03:00
|
|
|
static Region* find_user_region_from_vaddr(AddressSpace&, VirtualAddress);
|
2022-08-23 18:58:05 +03:00
|
|
|
static void validate_syscall_preconditions(Process&, RegisterState const&);
|
2019-11-29 18:15:30 +03:00
|
|
|
|
2020-01-18 10:34:28 +03:00
|
|
|
void dump_kernel_regions();
|
|
|
|
|
2020-02-15 15:12:02 +03:00
|
|
|
PhysicalPage& shared_zero_page() { return *m_shared_zero_page; }
|
2020-09-05 06:12:25 +03:00
|
|
|
PhysicalPage& lazy_committed_page() { return *m_lazy_committed_page; }
|
2020-02-15 15:12:02 +03:00
|
|
|
|
2020-06-02 07:55:09 +03:00
|
|
|
PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
|
|
|
|
|
2022-08-25 17:46:13 +03:00
|
|
|
template<typename Callback>
|
|
|
|
void for_each_used_memory_range(Callback callback)
|
|
|
|
{
|
|
|
|
m_global_data.template with([&](auto& global_data) {
|
|
|
|
for (auto& range : global_data.used_memory_ranges)
|
|
|
|
callback(range);
|
|
|
|
});
|
|
|
|
}
|
2021-12-23 22:49:31 +03:00
|
|
|
bool is_allowed_to_read_physical_memory_for_userspace(PhysicalAddress, size_t read_length) const;
|
2021-01-19 21:13:03 +03:00
|
|
|
|
2021-07-08 04:50:05 +03:00
|
|
|
PhysicalPageEntry& get_physical_page_entry(PhysicalAddress);
|
|
|
|
PhysicalAddress get_physical_address(PhysicalPage const&);
|
|
|
|
|
2021-11-17 21:31:30 +03:00
|
|
|
void copy_physical_page(PhysicalPage&, u8 page_buffer[PAGE_SIZE]);
|
|
|
|
|
2022-01-03 02:27:21 +03:00
|
|
|
IterationDecision for_each_physical_memory_range(Function<IterationDecision(PhysicalMemoryRange const&)>);
|
|
|
|
|
2018-10-18 00:13:55 +03:00
|
|
|
private:
|
2020-08-22 18:53:34 +03:00
|
|
|
MemoryManager();
|
2018-10-18 00:13:55 +03:00
|
|
|
~MemoryManager();
|
|
|
|
|
2021-07-08 04:50:05 +03:00
|
|
|
void initialize_physical_pages();
|
2021-01-29 15:03:25 +03:00
|
|
|
void register_reserved_ranges();
|
|
|
|
|
2022-01-15 19:01:53 +03:00
|
|
|
void unregister_kernel_region(Region&);
|
2018-11-08 23:20:09 +03:00
|
|
|
|
2020-01-18 00:07:20 +03:00
|
|
|
void protect_kernel_image();
|
2020-01-17 23:03:52 +03:00
|
|
|
void parse_memory_map();
|
2020-07-06 16:27:22 +03:00
|
|
|
static void flush_tlb_local(VirtualAddress, size_t page_count = 1);
|
2021-07-14 14:31:21 +03:00
|
|
|
static void flush_tlb(PageDirectory const*, VirtualAddress, size_t page_count = 1);
|
2018-10-18 00:13:55 +03:00
|
|
|
|
2019-08-06 11:31:20 +03:00
|
|
|
static Region* kernel_region_from_vaddr(VirtualAddress);
|
|
|
|
|
2020-07-31 00:52:28 +03:00
|
|
|
static Region* find_region_from_vaddr(VirtualAddress);
|
2019-08-06 12:19:16 +03:00
|
|
|
|
2022-08-24 16:56:26 +03:00
|
|
|
RefPtr<PhysicalPage> find_free_physical_page(bool);
|
2021-07-08 04:50:05 +03:00
|
|
|
|
|
|
|
ALWAYS_INLINE u8* quickmap_page(PhysicalPage& page)
|
|
|
|
{
|
|
|
|
return quickmap_page(page.paddr());
|
|
|
|
}
|
|
|
|
u8* quickmap_page(PhysicalAddress const&);
|
2018-11-05 15:48:07 +03:00
|
|
|
void unquickmap_page();
|
|
|
|
|
2020-01-17 21:59:20 +03:00
|
|
|
PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
|
|
|
|
PageTableEntry* quickmap_pt(PhysicalAddress);
|
|
|
|
|
2020-11-01 02:19:18 +03:00
|
|
|
PageTableEntry* pte(PageDirectory&, VirtualAddress);
|
2020-09-02 01:10:54 +03:00
|
|
|
PageTableEntry* ensure_pte(PageDirectory&, VirtualAddress);
|
2021-12-19 03:15:12 +03:00
|
|
|
enum class IsLastPTERelease {
|
|
|
|
Yes,
|
|
|
|
No
|
|
|
|
};
|
Kernel: Remove redundant hash map of page tables in PageDirectory
The purpose of the PageDirectory::m_page_tables map was really just
to act as ref-counting storage for PhysicalPage objects that were
being used for the directory's page tables.
However, this was basically redundant, since we can find the physical
address of each page table from the page directory, and we can find the
PhysicalPage object from MemoryManager::get_physical_page_entry().
So if we just manually ref() and unref() the pages when they go in and
out of the directory, we no longer need PageDirectory::m_page_tables!
Not only does this remove a bunch of kmalloc() traffic, it also solves
a race condition that would occur when lazily adding a new page table
to a directory:
Previously, when MemoryManager::ensure_pte() would call HashMap::set()
to insert the new page table into m_page_tables, if the HashMap had to
grow its internal storage, it would call kmalloc(). If that kmalloc()
would need to perform heap expansion, it would end up calling
ensure_pte() again, which would clobber the page directory mapping used
by the outer invocation of ensure_pte().
The net result of the above bug would be that any invocation of
MemoryManager::ensure_pte() could erroneously return a pointer into
a kernel page table instead of the correct one!
This whole problem goes away when we remove the HashMap, as ensure_pte()
no longer does anything that allocates from the heap.
2022-01-10 18:00:46 +03:00
|
|
|
void release_pte(PageDirectory&, VirtualAddress, IsLastPTERelease);
|
2018-10-18 00:13:55 +03:00
|
|
|
|
2022-08-25 17:46:13 +03:00
|
|
|
// NOTE: These are outside of GlobalData as they are only assigned on startup,
|
|
|
|
// and then never change. Atomic ref-counting covers that case without
|
|
|
|
// the need for additional synchronization.
|
2022-08-19 21:53:40 +03:00
|
|
|
LockRefPtr<PageDirectory> m_kernel_page_directory;
|
2022-08-24 16:56:26 +03:00
|
|
|
RefPtr<PhysicalPage> m_shared_zero_page;
|
|
|
|
RefPtr<PhysicalPage> m_lazy_committed_page;
|
2020-02-15 15:12:02 +03:00
|
|
|
|
2022-08-25 17:46:13 +03:00
|
|
|
// NOTE: These are outside of GlobalData as they are initialized on startup,
|
|
|
|
// and then never change.
|
2021-07-08 04:50:05 +03:00
|
|
|
PhysicalPageEntry* m_physical_page_entries { nullptr };
|
|
|
|
size_t m_physical_page_entries_count { 0 };
|
2018-11-08 23:20:09 +03:00
|
|
|
|
2022-08-25 17:46:13 +03:00
|
|
|
struct GlobalData {
|
|
|
|
GlobalData();
|
|
|
|
|
|
|
|
SystemMemoryInfo system_memory_info;
|
|
|
|
|
|
|
|
NonnullOwnPtrVector<PhysicalRegion> physical_regions;
|
|
|
|
OwnPtr<PhysicalRegion> physical_pages_region;
|
|
|
|
|
|
|
|
RegionTree region_tree;
|
|
|
|
|
|
|
|
Vector<UsedMemoryRange> used_memory_ranges;
|
|
|
|
Vector<PhysicalMemoryRange> physical_memory_ranges;
|
|
|
|
Vector<ContiguousReservedMemoryRange> reserved_memory_ranges;
|
|
|
|
};
|
2021-12-11 20:31:56 +03:00
|
|
|
|
2022-08-25 17:46:13 +03:00
|
|
|
SpinlockProtected<GlobalData> m_global_data;
|
2018-10-18 00:13:55 +03:00
|
|
|
};
|
2018-11-01 13:44:21 +03:00
|
|
|
|
2020-01-02 22:49:21 +03:00
|
|
|
inline bool is_user_address(VirtualAddress vaddr)
|
|
|
|
{
|
2021-07-07 05:25:22 +03:00
|
|
|
return vaddr.get() < USER_RANGE_CEILING;
|
2020-01-02 22:49:21 +03:00
|
|
|
}
|
2020-01-19 11:14:14 +03:00
|
|
|
|
|
|
|
inline bool is_user_range(VirtualAddress vaddr, size_t size)
|
|
|
|
{
|
|
|
|
if (vaddr.offset(size) < vaddr)
|
|
|
|
return false;
|
2021-09-11 03:34:55 +03:00
|
|
|
if (!is_user_address(vaddr))
|
|
|
|
return false;
|
|
|
|
if (size <= 1)
|
|
|
|
return true;
|
|
|
|
return is_user_address(vaddr.offset(size - 1));
|
2020-01-19 11:14:14 +03:00
|
|
|
}
|
2020-02-15 15:12:02 +03:00
|
|
|
|
2021-08-06 14:54:48 +03:00
|
|
|
inline bool is_user_range(VirtualRange const& range)
|
2021-02-13 02:47:47 +03:00
|
|
|
{
|
|
|
|
return is_user_range(range.base(), range.size());
|
|
|
|
}
|
|
|
|
|
2020-02-15 15:12:02 +03:00
|
|
|
inline bool PhysicalPage::is_shared_zero_page() const
|
|
|
|
{
|
|
|
|
return this == &MM.shared_zero_page();
|
|
|
|
}
|
2020-02-16 03:27:42 +03:00
|
|
|
|
2020-09-05 06:12:25 +03:00
|
|
|
inline bool PhysicalPage::is_lazy_committed_page() const
|
|
|
|
{
|
|
|
|
return this == &MM.lazy_committed_page();
|
|
|
|
}
|
|
|
|
|
2021-11-29 22:19:58 +03:00
|
|
|
inline ErrorOr<Memory::VirtualRange> expand_range_to_page_boundaries(FlatPtr address, size_t size)
|
|
|
|
{
|
|
|
|
if ((address + size) < address)
|
|
|
|
return EINVAL;
|
|
|
|
|
|
|
|
auto base = VirtualAddress { address }.page_base();
|
2021-12-24 17:22:11 +03:00
|
|
|
auto end = TRY(Memory::page_round_up(address + size));
|
2021-11-29 22:19:58 +03:00
|
|
|
|
|
|
|
return Memory::VirtualRange { base, end - base.get() };
|
|
|
|
}
|
|
|
|
|
2020-02-16 03:27:42 +03:00
|
|
|
}
|