mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-01-06 02:55:49 +03:00
Kernel: Add mechanism to identity map the lowest 2MB
This commit is contained in:
parent
19190267a6
commit
841364b609
Notes:
sideshowbarker
2024-07-19 05:50:08 +09:00
Author: https://github.com/tomuta Commit: https://github.com/SerenityOS/serenity/commit/841364b6096 Pull-request: https://github.com/SerenityOS/serenity/pull/2475 Reviewed-by: https://github.com/awesomekling Reviewed-by: https://github.com/bugaevc
@ -211,8 +211,6 @@ void MemoryManager::initialize()
|
|||||||
|
|
||||||
Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
|
Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
|
||||||
{
|
{
|
||||||
if (vaddr.get() < 0xc0000000)
|
|
||||||
return nullptr;
|
|
||||||
for (auto& region : MM.m_kernel_regions) {
|
for (auto& region : MM.m_kernel_regions) {
|
||||||
if (region.contains(vaddr))
|
if (region.contains(vaddr))
|
||||||
return ®ion;
|
return ®ion;
|
||||||
@ -318,6 +316,18 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
|
|||||||
return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable);
|
return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
|
||||||
|
{
|
||||||
|
ASSERT(!(size % PAGE_SIZE));
|
||||||
|
auto range = kernel_page_directory().identity_range_allocator().allocate_specific(VirtualAddress(paddr.get()), size);
|
||||||
|
if (!range.is_valid())
|
||||||
|
return nullptr;
|
||||||
|
auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
|
||||||
|
if (!vmobject)
|
||||||
|
return nullptr;
|
||||||
|
return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable);
|
||||||
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable)
|
||||||
{
|
{
|
||||||
return allocate_kernel_region(size, name, access, true, true, cacheable);
|
return allocate_kernel_region(size, name, access, true, true, cacheable);
|
||||||
@ -665,7 +675,7 @@ void MemoryManager::unregister_vmobject(VMObject& vmobject)
|
|||||||
void MemoryManager::register_region(Region& region)
|
void MemoryManager::register_region(Region& region)
|
||||||
{
|
{
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
if (region.vaddr().get() >= 0xc0000000)
|
if (region.is_kernel())
|
||||||
m_kernel_regions.append(®ion);
|
m_kernel_regions.append(®ion);
|
||||||
else
|
else
|
||||||
m_user_regions.append(®ion);
|
m_user_regions.append(®ion);
|
||||||
@ -674,7 +684,7 @@ void MemoryManager::register_region(Region& region)
|
|||||||
void MemoryManager::unregister_region(Region& region)
|
void MemoryManager::unregister_region(Region& region)
|
||||||
{
|
{
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
if (region.vaddr().get() >= 0xc0000000)
|
if (region.is_kernel())
|
||||||
m_kernel_regions.remove(®ion);
|
m_kernel_regions.remove(®ion);
|
||||||
else
|
else
|
||||||
m_user_regions.remove(®ion);
|
m_user_regions.remove(®ion);
|
||||||
|
@ -107,6 +107,7 @@ public:
|
|||||||
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
||||||
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true, bool cacheable = true);
|
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true, bool cacheable = true);
|
||||||
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
||||||
|
OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
||||||
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
||||||
OwnPtr<Region> allocate_kernel_region_with_vmobject(const Range&, VMObject&, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
OwnPtr<Region> allocate_kernel_region_with_vmobject(const Range&, VMObject&, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
||||||
OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access, bool cacheable = true);
|
OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access, bool cacheable = true);
|
||||||
@ -143,6 +144,8 @@ public:
|
|||||||
|
|
||||||
PhysicalPage& shared_zero_page() { return *m_shared_zero_page; }
|
PhysicalPage& shared_zero_page() { return *m_shared_zero_page; }
|
||||||
|
|
||||||
|
PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MemoryManager();
|
MemoryManager();
|
||||||
~MemoryManager();
|
~MemoryManager();
|
||||||
@ -177,8 +180,6 @@ private:
|
|||||||
PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
|
PageDirectoryEntry* quickmap_pd(PageDirectory&, size_t pdpt_index);
|
||||||
PageTableEntry* quickmap_pt(PhysicalAddress);
|
PageTableEntry* quickmap_pt(PhysicalAddress);
|
||||||
|
|
||||||
PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
|
|
||||||
|
|
||||||
const PageTableEntry* pte(const PageDirectory&, VirtualAddress);
|
const PageTableEntry* pte(const PageDirectory&, VirtualAddress);
|
||||||
PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress);
|
PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress);
|
||||||
|
|
||||||
|
@ -59,6 +59,7 @@ extern "C" PageDirectoryEntry boot_pd3[1024];
|
|||||||
PageDirectory::PageDirectory()
|
PageDirectory::PageDirectory()
|
||||||
{
|
{
|
||||||
m_range_allocator.initialize_with_range(VirtualAddress(0xc0800000), 0x3f000000);
|
m_range_allocator.initialize_with_range(VirtualAddress(0xc0800000), 0x3f000000);
|
||||||
|
m_identity_range_allocator.initialize_with_range(VirtualAddress(FlatPtr(0x00000000)), 0x00200000);
|
||||||
|
|
||||||
// Adopt the page tables already set up by boot.S
|
// Adopt the page tables already set up by boot.S
|
||||||
PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((FlatPtr)boot_pdpt));
|
PhysicalAddress boot_pdpt_paddr(virtual_to_low_physical((FlatPtr)boot_pdpt));
|
||||||
|
@ -52,6 +52,7 @@ public:
|
|||||||
u32 cr3() const { return m_directory_table->paddr().get(); }
|
u32 cr3() const { return m_directory_table->paddr().get(); }
|
||||||
|
|
||||||
RangeAllocator& range_allocator() { return m_range_allocator; }
|
RangeAllocator& range_allocator() { return m_range_allocator; }
|
||||||
|
RangeAllocator& identity_range_allocator() { return m_identity_range_allocator; }
|
||||||
|
|
||||||
Process* process() { return m_process; }
|
Process* process() { return m_process; }
|
||||||
const Process* process() const { return m_process; }
|
const Process* process() const { return m_process; }
|
||||||
@ -62,6 +63,7 @@ private:
|
|||||||
|
|
||||||
Process* m_process { nullptr };
|
Process* m_process { nullptr };
|
||||||
RangeAllocator m_range_allocator;
|
RangeAllocator m_range_allocator;
|
||||||
|
RangeAllocator m_identity_range_allocator;
|
||||||
RefPtr<PhysicalPage> m_directory_table;
|
RefPtr<PhysicalPage> m_directory_table;
|
||||||
RefPtr<PhysicalPage> m_directory_pages[4];
|
RefPtr<PhysicalPage> m_directory_pages[4];
|
||||||
HashMap<unsigned, RefPtr<PhysicalPage>> m_physical_pages;
|
HashMap<unsigned, RefPtr<PhysicalPage>> m_physical_pages;
|
||||||
|
@ -90,6 +90,11 @@ public:
|
|||||||
|
|
||||||
void dump() const;
|
void dump() const;
|
||||||
|
|
||||||
|
bool contains(const Range& range) const
|
||||||
|
{
|
||||||
|
return m_total_range.contains(range);
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void carve_at_index(int, const Range&);
|
void carve_at_index(int, const Range&);
|
||||||
|
|
||||||
|
@ -40,13 +40,14 @@
|
|||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, u8 access, bool cacheable)
|
Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, u8 access, bool cacheable, bool kernel)
|
||||||
: m_range(range)
|
: m_range(range)
|
||||||
, m_offset_in_vmobject(offset_in_vmobject)
|
, m_offset_in_vmobject(offset_in_vmobject)
|
||||||
, m_vmobject(move(vmobject))
|
, m_vmobject(move(vmobject))
|
||||||
, m_name(name)
|
, m_name(name)
|
||||||
, m_access(access)
|
, m_access(access)
|
||||||
, m_cacheable(cacheable)
|
, m_cacheable(cacheable)
|
||||||
|
, m_kernel(kernel)
|
||||||
{
|
{
|
||||||
MM.register_region(*this);
|
MM.register_region(*this);
|
||||||
}
|
}
|
||||||
@ -186,14 +187,14 @@ size_t Region::amount_shared() const
|
|||||||
|
|
||||||
NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable)
|
NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable)
|
||||||
{
|
{
|
||||||
auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable);
|
auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable, false);
|
||||||
region->m_user_accessible = true;
|
region->m_user_accessible = true;
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable)
|
NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable)
|
||||||
{
|
{
|
||||||
auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable);
|
auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable, true);
|
||||||
region->m_user_accessible = false;
|
region->m_user_accessible = false;
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
@ -268,8 +269,12 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
|
|||||||
dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", page ? page->paddr().get() : 0) << " <<";
|
dbg() << "MM: >> Unmapped " << vaddr << " => P" << String::format("%p", page ? page->paddr().get() : 0) << " <<";
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes)
|
if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes) {
|
||||||
|
if (m_page_directory->range_allocator().contains(range()))
|
||||||
m_page_directory->range_allocator().deallocate(range());
|
m_page_directory->range_allocator().deallocate(range());
|
||||||
|
else
|
||||||
|
m_page_directory->identity_range_allocator().deallocate(range());
|
||||||
|
}
|
||||||
m_page_directory = nullptr;
|
m_page_directory = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,6 +95,9 @@ public:
|
|||||||
bool is_user_accessible() const { return m_user_accessible; }
|
bool is_user_accessible() const { return m_user_accessible; }
|
||||||
void set_user_accessible(bool b) { m_user_accessible = b; }
|
void set_user_accessible(bool b) { m_user_accessible = b; }
|
||||||
|
|
||||||
|
bool is_kernel() const { return m_kernel || vaddr().get() >= 0xc0000000; }
|
||||||
|
void set_kernel(bool kernel) { m_kernel = kernel; }
|
||||||
|
|
||||||
PageFaultResponse handle_fault(const PageFault&);
|
PageFaultResponse handle_fault(const PageFault&);
|
||||||
|
|
||||||
NonnullOwnPtr<Region> clone();
|
NonnullOwnPtr<Region> clone();
|
||||||
@ -178,7 +181,7 @@ public:
|
|||||||
Region* m_prev { nullptr };
|
Region* m_prev { nullptr };
|
||||||
|
|
||||||
// NOTE: These are public so we can make<> them.
|
// NOTE: These are public so we can make<> them.
|
||||||
Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String&, u8 access, bool cacheable);
|
Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String&, u8 access, bool cacheable, bool kernel);
|
||||||
|
|
||||||
void set_inherit_mode(InheritMode inherit_mode) { m_inherit_mode = inherit_mode; }
|
void set_inherit_mode(InheritMode inherit_mode) { m_inherit_mode = inherit_mode; }
|
||||||
|
|
||||||
@ -211,6 +214,7 @@ private:
|
|||||||
bool m_cacheable : 1 { false };
|
bool m_cacheable : 1 { false };
|
||||||
bool m_stack : 1 { false };
|
bool m_stack : 1 { false };
|
||||||
bool m_mmap : 1 { false };
|
bool m_mmap : 1 { false };
|
||||||
|
bool m_kernel : 1 { false };
|
||||||
mutable OwnPtr<Bitmap> m_cow_map;
|
mutable OwnPtr<Bitmap> m_cow_map;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user