Kernel: Make and use KERNEL_BASE

This is to make the 0xc0000000 less a magic number, and will make it
easier in the future to move the Kernel around
This commit is contained in:
Hendiadyoin1 2021-06-28 18:43:18 +02:00 committed by Andreas Kling
parent 8b44aa7885
commit 65566d6868
Notes: sideshowbarker 2024-07-18 11:23:47 +09:00
7 changed files with 12 additions and 9 deletions

View File

@ -115,7 +115,7 @@ NEVER_INLINE static void dump_backtrace_impl(FlatPtr base_pointer, bool use_ksym
if (use_ksyms) {
FlatPtr copied_stack_ptr[2];
for (FlatPtr* stack_ptr = (FlatPtr*)base_pointer; stack_ptr && recognized_symbol_count < max_recognized_symbol_count; stack_ptr = (FlatPtr*)copied_stack_ptr[0]) {
if ((FlatPtr)stack_ptr < 0xc0000000)
if ((FlatPtr)stack_ptr < KERNEL_BASE)
break;
void* fault_at;

View File

@ -340,7 +340,7 @@ void Process::crash(int signal, FlatPtr ip, bool out_of_memory)
if (out_of_memory) {
dbgln("\033[31;1mOut of memory\033[m, killing: {}", *this);
} else {
if (ip >= 0xc0000000 && g_kernel_symbols_available) {
if (ip >= KERNEL_BASE && g_kernel_symbols_available) {
auto* symbol = symbolicate_kernel_address(ip);
dbgln("\033[31;1m{:p} {} +{}\033[0m\n", ip, (symbol ? demangle(symbol->name) : "(k?)"), (symbol ? ip - symbol->address : 0));
} else {

View File

@ -10,3 +10,5 @@
#define READONLY_AFTER_INIT __attribute__((section(".ro_after_init")))
#define UNMAP_AFTER_INIT NEVER_INLINE __attribute__((section(".unmap_after_init")))
#define KERNEL_BASE 0xC000'0000

View File

@ -717,7 +717,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
return {};
}
fast_u32_fill((u32*)page->paddr().offset(0xc0000000).as_ptr(), 0, PAGE_SIZE / sizeof(u32));
fast_u32_fill((u32*)page->paddr().offset(KERNEL_BASE).as_ptr(), 0, PAGE_SIZE / sizeof(u32));
++m_super_physical_pages_used;
return page;
}

View File

@ -40,12 +40,12 @@ constexpr FlatPtr page_round_down(FlatPtr x)
inline FlatPtr low_physical_to_virtual(FlatPtr physical)
{
return physical + 0xc0000000;
return physical + KERNEL_BASE;
}
inline FlatPtr virtual_to_low_physical(FlatPtr virtual_)
{
return virtual_ - 0xc0000000;
return virtual_ - KERNEL_BASE;
}
enum class UsedMemoryRangeType {
@ -260,7 +260,7 @@ void VMObject::for_each_region(Callback callback)
inline bool is_user_address(VirtualAddress vaddr)
{
return vaddr.get() < 0xc0000000;
return vaddr.get() < KERNEL_BASE;
}
inline bool is_user_range(VirtualAddress vaddr, size_t size)

View File

@ -37,7 +37,7 @@ extern "C" PageDirectoryEntry boot_pd3[1024];
UNMAP_AFTER_INIT PageDirectory::PageDirectory()
{
m_range_allocator.initialize_with_range(VirtualAddress(0xc2000000), 0x2f000000);
m_range_allocator.initialize_with_range(VirtualAddress(KERNEL_BASE + 0x02000000), 0x2f000000);
m_identity_range_allocator.initialize_with_range(VirtualAddress(FlatPtr(0x00000000)), 0x00200000);
// Adopt the page tables already set up by boot.S
@ -89,7 +89,7 @@ PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator)
m_directory_pages[2] = MM.allocate_user_physical_page();
if (!m_directory_pages[2])
return;
// Share the top 1 GiB of kernel-only mappings (>=3GiB or >=0xc0000000)
// Share the top 1 GiB of kernel-only mappings (>=3GiB or >=KERNEL_BASE)
m_directory_pages[3] = MM.kernel_page_directory().m_directory_pages[3];
#if ARCH(X86_64)

View File

@ -13,6 +13,7 @@
#include <Kernel/Arch/x86/PageFault.h>
#include <Kernel/Heap/SlabAllocator.h>
#include <Kernel/KString.h>
#include <Kernel/Sections.h>
#include <Kernel/VM/PageFaultResponse.h>
#include <Kernel/VM/PurgeablePageRanges.h>
#include <Kernel/VM/RangeAllocator.h>
@ -87,7 +88,7 @@ public:
void set_mmap(bool mmap) { m_mmap = mmap; }
bool is_user() const { return !is_kernel(); }
bool is_kernel() const { return vaddr().get() < 0x00800000 || vaddr().get() >= 0xc0000000; }
bool is_kernel() const { return vaddr().get() < 0x00800000 || vaddr().get() >= KERNEL_BASE; }
PageFaultResponse handle_fault(const PageFault&, ScopedSpinLock<RecursiveSpinLock>&);