2018-10-18 00:13:55 +03:00
|
|
|
#pragma once
|
|
|
|
|
2019-05-28 12:53:16 +03:00
|
|
|
#include <AK/Badge.h>
|
2018-11-05 15:48:07 +03:00
|
|
|
#include <AK/Bitmap.h>
|
2018-11-01 15:21:02 +03:00
|
|
|
#include <AK/ByteBuffer.h>
|
2019-05-28 12:53:16 +03:00
|
|
|
#include <AK/HashTable.h>
|
2019-06-27 14:34:28 +03:00
|
|
|
#include <AK/NonnullRefPtrVector.h>
|
2019-06-21 19:58:45 +03:00
|
|
|
#include <AK/RefCounted.h>
|
2019-08-05 12:35:49 +03:00
|
|
|
#include <AK/RefPtr.h>
|
2019-11-29 18:15:30 +03:00
|
|
|
#include <AK/String.h>
|
2019-05-28 12:53:16 +03:00
|
|
|
#include <AK/Types.h>
|
2018-10-18 14:05:00 +03:00
|
|
|
#include <AK/Vector.h>
|
2019-02-08 18:40:48 +03:00
|
|
|
#include <AK/Weakable.h>
|
2019-06-07 21:02:01 +03:00
|
|
|
#include <Kernel/Arch/i386/CPU.h>
|
2019-05-28 12:53:16 +03:00
|
|
|
#include <Kernel/FileSystem/InodeIdentifier.h>
|
2019-04-03 16:13:07 +03:00
|
|
|
#include <Kernel/VM/PhysicalPage.h>
|
2019-06-11 14:13:02 +03:00
|
|
|
#include <Kernel/VM/PhysicalRegion.h>
|
2019-04-03 16:13:07 +03:00
|
|
|
#include <Kernel/VM/Region.h>
|
|
|
|
#include <Kernel/VM/VMObject.h>
|
2018-10-18 14:05:00 +03:00
|
|
|
|
2019-07-03 22:17:35 +03:00
|
|
|
#define PAGE_ROUND_UP(x) ((((u32)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1)))
|
2019-01-13 02:27:25 +03:00
|
|
|
|
2020-01-17 21:59:20 +03:00
|
|
|
template<typename T>
|
|
|
|
inline T* low_physical_to_virtual(T* physical)
|
|
|
|
{
|
|
|
|
return (T*)(((u8*)physical) + 0xc0000000);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline u32 low_physical_to_virtual(u32 physical)
|
|
|
|
{
|
|
|
|
return physical + 0xc0000000;
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename T>
|
|
|
|
inline T* virtual_to_low_physical(T* physical)
|
|
|
|
{
|
|
|
|
return (T*)(((u8*)physical) - 0xc0000000);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline u32 virtual_to_low_physical(u32 physical)
|
|
|
|
{
|
|
|
|
return physical - 0xc0000000;
|
|
|
|
}
|
|
|
|
|
2019-08-05 12:35:49 +03:00
|
|
|
class KBuffer;
|
2019-01-18 17:01:40 +03:00
|
|
|
class SynthFSInode;
|
2018-10-18 14:05:00 +03:00
|
|
|
|
2018-10-27 15:56:52 +03:00
|
|
|
#define MM MemoryManager::the()
|
|
|
|
|
2018-10-18 00:13:55 +03:00
|
|
|
class MemoryManager {
|
2018-11-01 01:19:15 +03:00
|
|
|
AK_MAKE_ETERNAL
|
2018-12-31 16:58:03 +03:00
|
|
|
friend class PageDirectory;
|
2018-11-05 12:23:00 +03:00
|
|
|
friend class PhysicalPage;
|
2019-06-11 14:13:02 +03:00
|
|
|
friend class PhysicalRegion;
|
2018-11-08 16:35:30 +03:00
|
|
|
friend class Region;
|
2018-11-08 23:20:09 +03:00
|
|
|
friend class VMObject;
|
2019-08-05 12:35:49 +03:00
|
|
|
friend Optional<KBuffer> procfs$mm(InodeIdentifier);
|
|
|
|
friend Optional<KBuffer> procfs$memstat(InodeIdentifier);
|
2019-05-28 12:53:16 +03:00
|
|
|
|
2018-10-18 00:13:55 +03:00
|
|
|
public:
|
2019-07-16 14:44:41 +03:00
|
|
|
static MemoryManager& the();
|
2018-10-18 00:13:55 +03:00
|
|
|
|
2020-01-17 21:59:20 +03:00
|
|
|
static void initialize();
|
2018-10-18 14:05:00 +03:00
|
|
|
|
2018-11-05 12:29:19 +03:00
|
|
|
PageFaultResponse handle_page_fault(const PageFault&);
|
2018-10-18 14:05:00 +03:00
|
|
|
|
2018-11-01 15:15:46 +03:00
|
|
|
void enter_process_paging_scope(Process&);
|
2018-11-01 11:01:51 +03:00
|
|
|
|
2019-11-17 14:11:43 +03:00
|
|
|
bool validate_user_stack(const Process&, VirtualAddress) const;
|
2020-01-02 04:09:25 +03:00
|
|
|
bool validate_user_read(const Process&, VirtualAddress, size_t) const;
|
|
|
|
bool validate_user_write(const Process&, VirtualAddress, size_t) const;
|
|
|
|
|
|
|
|
bool validate_kernel_read(const Process&, VirtualAddress, size_t) const;
|
2018-11-01 14:45:51 +03:00
|
|
|
|
2019-06-07 18:13:23 +03:00
|
|
|
enum class ShouldZeroFill {
|
2019-05-28 12:53:16 +03:00
|
|
|
No,
|
|
|
|
Yes
|
|
|
|
};
|
2019-01-31 05:57:06 +03:00
|
|
|
|
2020-01-18 00:18:56 +03:00
|
|
|
RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes);
|
2019-06-21 19:37:47 +03:00
|
|
|
RefPtr<PhysicalPage> allocate_supervisor_physical_page();
|
2019-06-14 14:56:21 +03:00
|
|
|
void deallocate_user_physical_page(PhysicalPage&&);
|
|
|
|
void deallocate_supervisor_physical_page(PhysicalPage&&);
|
2018-11-05 12:23:00 +03:00
|
|
|
|
2019-10-16 19:27:00 +03:00
|
|
|
void map_for_kernel(VirtualAddress, PhysicalAddress, bool cache_disabled = false);
|
2019-03-10 17:25:33 +03:00
|
|
|
|
2020-01-10 00:29:31 +03:00
|
|
|
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true, bool cacheable = true);
|
|
|
|
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false);
|
|
|
|
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false);
|
|
|
|
OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access, bool cacheable = false);
|
2019-05-14 12:51:00 +03:00
|
|
|
|
2019-06-11 14:13:02 +03:00
|
|
|
unsigned user_physical_pages() const { return m_user_physical_pages; }
|
|
|
|
unsigned user_physical_pages_used() const { return m_user_physical_pages_used; }
|
|
|
|
unsigned super_physical_pages() const { return m_super_physical_pages; }
|
|
|
|
unsigned super_physical_pages_used() const { return m_super_physical_pages_used; }
|
|
|
|
|
2019-08-08 11:43:44 +03:00
|
|
|
template<typename Callback>
|
|
|
|
static void for_each_vmobject(Callback callback)
|
|
|
|
{
|
2019-08-08 14:40:58 +03:00
|
|
|
for (auto& vmobject : MM.m_vmobjects) {
|
|
|
|
if (callback(vmobject) == IterationDecision::Break)
|
2019-08-08 11:43:44 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-29 18:15:30 +03:00
|
|
|
static Region* region_from_vaddr(Process&, VirtualAddress);
|
|
|
|
static const Region* region_from_vaddr(const Process&, VirtualAddress);
|
|
|
|
|
2018-10-18 00:13:55 +03:00
|
|
|
private:
|
2020-01-17 21:59:20 +03:00
|
|
|
MemoryManager();
|
2018-10-18 00:13:55 +03:00
|
|
|
~MemoryManager();
|
|
|
|
|
2020-01-02 04:09:25 +03:00
|
|
|
enum class AccessSpace { Kernel, User };
|
|
|
|
enum class AccessType { Read, Write };
|
|
|
|
template<AccessSpace, AccessType>
|
|
|
|
bool validate_range(const Process&, VirtualAddress, size_t) const;
|
|
|
|
|
2019-12-19 21:13:44 +03:00
|
|
|
void register_vmobject(VMObject&);
|
|
|
|
void unregister_vmobject(VMObject&);
|
2018-11-09 00:24:02 +03:00
|
|
|
void register_region(Region&);
|
|
|
|
void unregister_region(Region&);
|
2018-11-08 23:20:09 +03:00
|
|
|
|
2020-01-01 03:56:58 +03:00
|
|
|
void detect_cpu_features();
|
2020-01-18 00:02:10 +03:00
|
|
|
void setup_low_1mb();
|
2020-01-18 00:07:20 +03:00
|
|
|
void protect_kernel_image();
|
2020-01-17 23:03:52 +03:00
|
|
|
void parse_memory_map();
|
2018-12-03 03:38:22 +03:00
|
|
|
void flush_entire_tlb();
|
2019-06-07 13:56:50 +03:00
|
|
|
void flush_tlb(VirtualAddress);
|
2018-10-18 00:13:55 +03:00
|
|
|
|
2019-08-06 11:31:20 +03:00
|
|
|
static Region* user_region_from_vaddr(Process&, VirtualAddress);
|
|
|
|
static Region* kernel_region_from_vaddr(VirtualAddress);
|
|
|
|
|
2019-08-06 12:19:16 +03:00
|
|
|
static Region* region_from_vaddr(VirtualAddress);
|
|
|
|
|
2019-11-09 00:39:29 +03:00
|
|
|
RefPtr<PhysicalPage> find_free_user_physical_page();
|
2019-07-03 22:17:35 +03:00
|
|
|
u8* quickmap_page(PhysicalPage&);
|
2018-11-05 15:48:07 +03:00
|
|
|
void unquickmap_page();
|
|
|
|
|
2020-01-17 21:59:20 +03:00
|
|
|
PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
|
|
|
|
PageTableEntry* quickmap_pt(PhysicalAddress);
|
|
|
|
|
2018-12-31 16:58:03 +03:00
|
|
|
PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
|
|
|
|
|
2019-06-26 22:45:56 +03:00
|
|
|
PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress);
|
2018-10-18 00:13:55 +03:00
|
|
|
|
2019-06-21 19:37:47 +03:00
|
|
|
RefPtr<PageDirectory> m_kernel_page_directory;
|
2020-01-18 00:02:10 +03:00
|
|
|
RefPtr<PhysicalPage> m_low_page_table;
|
2018-10-18 14:05:00 +03:00
|
|
|
|
2019-06-11 14:13:02 +03:00
|
|
|
unsigned m_user_physical_pages { 0 };
|
|
|
|
unsigned m_user_physical_pages_used { 0 };
|
|
|
|
unsigned m_super_physical_pages { 0 };
|
|
|
|
unsigned m_super_physical_pages_used { 0 };
|
|
|
|
|
2019-06-27 14:34:28 +03:00
|
|
|
NonnullRefPtrVector<PhysicalRegion> m_user_physical_regions;
|
|
|
|
NonnullRefPtrVector<PhysicalRegion> m_super_physical_regions;
|
2018-11-08 23:20:09 +03:00
|
|
|
|
2019-08-08 11:53:24 +03:00
|
|
|
InlineLinkedList<Region> m_user_regions;
|
|
|
|
InlineLinkedList<Region> m_kernel_regions;
|
2019-01-27 12:17:27 +03:00
|
|
|
|
2019-08-08 11:43:44 +03:00
|
|
|
InlineLinkedList<VMObject> m_vmobjects;
|
|
|
|
|
2019-01-31 05:57:06 +03:00
|
|
|
bool m_quickmap_in_use { false };
|
2018-10-18 00:13:55 +03:00
|
|
|
};
|
2018-11-01 13:44:21 +03:00
|
|
|
|
2018-11-02 01:04:34 +03:00
|
|
|
struct ProcessPagingScope {
|
2019-03-24 00:03:17 +03:00
|
|
|
ProcessPagingScope(Process&);
|
|
|
|
~ProcessPagingScope();
|
2018-11-01 13:44:21 +03:00
|
|
|
};
|
2019-12-09 22:06:03 +03:00
|
|
|
|
|
|
|
template<typename Callback>
|
|
|
|
void VMObject::for_each_region(Callback callback)
|
|
|
|
{
|
|
|
|
// FIXME: Figure out a better data structure so we don't have to walk every single region every time an inode changes.
|
|
|
|
// Perhaps VMObject could have a Vector<Region*> with all of his mappers?
|
|
|
|
for (auto& region : MM.m_user_regions) {
|
|
|
|
if (®ion.vmobject() == this)
|
|
|
|
callback(region);
|
|
|
|
}
|
|
|
|
for (auto& region : MM.m_kernel_regions) {
|
|
|
|
if (®ion.vmobject() == this)
|
|
|
|
callback(region);
|
|
|
|
}
|
|
|
|
}
|
2020-01-02 22:49:21 +03:00
|
|
|
|
|
|
|
inline bool is_user_address(VirtualAddress vaddr)
|
|
|
|
{
|
|
|
|
return vaddr.get() >= (8 * MB) && vaddr.get() < 0xc0000000;
|
|
|
|
}
|