Kernel: Fix the variable declaration for some linker script symbols

Despite what the declaration would have us believe these are not "u8*".
If they were we wouldn't have to use the & operator to get the address
of them and then cast them to "u8*"/FlatPtr afterwards.
This commit is contained in:
Gunnar Beutner 2021-07-22 22:11:17 +02:00 committed by Andreas Kling
parent 40580696a6
commit f2be1f9326
Notes: sideshowbarker 2024-07-18 08:32:56 +09:00
4 changed files with 75 additions and 75 deletions

View File

@ -9,33 +9,33 @@
#define CODE_SECTION(section_name) __attribute__((section(section_name))) #define CODE_SECTION(section_name) __attribute__((section(section_name)))
extern "C" u8* start_of_safemem_text; extern "C" u8 start_of_safemem_text[];
extern "C" u8* end_of_safemem_text; extern "C" u8 end_of_safemem_text[];
extern "C" u8* safe_memcpy_ins_1; extern "C" u8 safe_memcpy_ins_1[];
extern "C" u8* safe_memcpy_1_faulted; extern "C" u8 safe_memcpy_1_faulted[];
extern "C" u8* safe_memcpy_ins_2; extern "C" u8 safe_memcpy_ins_2[];
extern "C" u8* safe_memcpy_2_faulted; extern "C" u8 safe_memcpy_2_faulted[];
extern "C" u8* safe_strnlen_ins; extern "C" u8 safe_strnlen_ins[];
extern "C" u8* safe_strnlen_faulted; extern "C" u8 safe_strnlen_faulted[];
extern "C" u8* safe_memset_ins_1; extern "C" u8 safe_memset_ins_1[];
extern "C" u8* safe_memset_1_faulted; extern "C" u8 safe_memset_1_faulted[];
extern "C" u8* safe_memset_ins_2; extern "C" u8 safe_memset_ins_2[];
extern "C" u8* safe_memset_2_faulted; extern "C" u8 safe_memset_2_faulted[];
extern "C" u8* start_of_safemem_atomic_text; extern "C" u8 start_of_safemem_atomic_text[];
extern "C" u8* end_of_safemem_atomic_text; extern "C" u8 end_of_safemem_atomic_text[];
extern "C" u8* safe_atomic_fetch_add_relaxed_ins; extern "C" u8 safe_atomic_fetch_add_relaxed_ins[];
extern "C" u8* safe_atomic_fetch_add_relaxed_faulted; extern "C" u8 safe_atomic_fetch_add_relaxed_faulted[];
extern "C" u8* safe_atomic_exchange_relaxed_ins; extern "C" u8 safe_atomic_exchange_relaxed_ins[];
extern "C" u8* safe_atomic_exchange_relaxed_faulted; extern "C" u8 safe_atomic_exchange_relaxed_faulted[];
extern "C" u8* safe_atomic_load_relaxed_ins; extern "C" u8 safe_atomic_load_relaxed_ins[];
extern "C" u8* safe_atomic_load_relaxed_faulted; extern "C" u8 safe_atomic_load_relaxed_faulted[];
extern "C" u8* safe_atomic_store_relaxed_ins; extern "C" u8 safe_atomic_store_relaxed_ins[];
extern "C" u8* safe_atomic_store_relaxed_faulted; extern "C" u8 safe_atomic_store_relaxed_faulted[];
extern "C" u8* safe_atomic_compare_exchange_relaxed_ins; extern "C" u8 safe_atomic_compare_exchange_relaxed_ins[];
extern "C" u8* safe_atomic_compare_exchange_relaxed_faulted; extern "C" u8 safe_atomic_compare_exchange_relaxed_faulted[];
namespace Kernel { namespace Kernel {
@ -266,16 +266,16 @@ bool handle_safe_access_fault(RegisterState& regs, FlatPtr fault_address)
if (ip >= (FlatPtr)&start_of_safemem_text && ip < (FlatPtr)&end_of_safemem_text) { if (ip >= (FlatPtr)&start_of_safemem_text && ip < (FlatPtr)&end_of_safemem_text) {
// If we detect that the fault happened in safe_memcpy() safe_strnlen(), // If we detect that the fault happened in safe_memcpy() safe_strnlen(),
// or safe_memset() then resume at the appropriate _faulted label // or safe_memset() then resume at the appropriate _faulted label
if (ip == (FlatPtr)&safe_memcpy_ins_1) if (ip == (FlatPtr)safe_memcpy_ins_1)
ip = (FlatPtr)&safe_memcpy_1_faulted; ip = (FlatPtr)safe_memcpy_1_faulted;
else if (ip == (FlatPtr)&safe_memcpy_ins_2) else if (ip == (FlatPtr)safe_memcpy_ins_2)
ip = (FlatPtr)&safe_memcpy_2_faulted; ip = (FlatPtr)safe_memcpy_2_faulted;
else if (ip == (FlatPtr)&safe_strnlen_ins) else if (ip == (FlatPtr)safe_strnlen_ins)
ip = (FlatPtr)&safe_strnlen_faulted; ip = (FlatPtr)safe_strnlen_faulted;
else if (ip == (FlatPtr)&safe_memset_ins_1) else if (ip == (FlatPtr)safe_memset_ins_1)
ip = (FlatPtr)&safe_memset_1_faulted; ip = (FlatPtr)safe_memset_1_faulted;
else if (ip == (FlatPtr)&safe_memset_ins_2) else if (ip == (FlatPtr)safe_memset_ins_2)
ip = (FlatPtr)&safe_memset_2_faulted; ip = (FlatPtr)safe_memset_2_faulted;
else else
return false; return false;
@ -292,16 +292,16 @@ bool handle_safe_access_fault(RegisterState& regs, FlatPtr fault_address)
// If we detect that a fault happened in one of the atomic safe_ // If we detect that a fault happened in one of the atomic safe_
// functions, resume at the appropriate _faulted label and set // functions, resume at the appropriate _faulted label and set
// the edx/rdx register to 1 to indicate an error // the edx/rdx register to 1 to indicate an error
if (ip == (FlatPtr)&safe_atomic_fetch_add_relaxed_ins) if (ip == (FlatPtr)safe_atomic_fetch_add_relaxed_ins)
ip = (FlatPtr)&safe_atomic_fetch_add_relaxed_faulted; ip = (FlatPtr)safe_atomic_fetch_add_relaxed_faulted;
else if (ip == (FlatPtr)&safe_atomic_exchange_relaxed_ins) else if (ip == (FlatPtr)safe_atomic_exchange_relaxed_ins)
ip = (FlatPtr)&safe_atomic_exchange_relaxed_faulted; ip = (FlatPtr)safe_atomic_exchange_relaxed_faulted;
else if (ip == (FlatPtr)&safe_atomic_load_relaxed_ins) else if (ip == (FlatPtr)safe_atomic_load_relaxed_ins)
ip = (FlatPtr)&safe_atomic_load_relaxed_faulted; ip = (FlatPtr)safe_atomic_load_relaxed_faulted;
else if (ip == (FlatPtr)&safe_atomic_store_relaxed_ins) else if (ip == (FlatPtr)safe_atomic_store_relaxed_ins)
ip = (FlatPtr)&safe_atomic_store_relaxed_faulted; ip = (FlatPtr)safe_atomic_store_relaxed_faulted;
else if (ip == (FlatPtr)&safe_atomic_compare_exchange_relaxed_ins) else if (ip == (FlatPtr)safe_atomic_compare_exchange_relaxed_ins)
ip = (FlatPtr)&safe_atomic_compare_exchange_relaxed_faulted; ip = (FlatPtr)safe_atomic_compare_exchange_relaxed_faulted;
else else
return false; return false;

View File

@ -23,17 +23,17 @@
#include <Kernel/VM/PhysicalRegion.h> #include <Kernel/VM/PhysicalRegion.h>
#include <Kernel/VM/SharedInodeVMObject.h> #include <Kernel/VM/SharedInodeVMObject.h>
extern u8* start_of_kernel_image; extern u8 start_of_kernel_image[];
extern u8* end_of_kernel_image; extern u8 end_of_kernel_image[];
extern FlatPtr start_of_kernel_text; extern u8 start_of_kernel_text[];
extern FlatPtr start_of_kernel_data; extern u8 start_of_kernel_data[];
extern FlatPtr end_of_kernel_bss; extern u8 end_of_kernel_bss[];
extern FlatPtr start_of_ro_after_init; extern u8 start_of_ro_after_init[];
extern FlatPtr end_of_ro_after_init; extern u8 end_of_ro_after_init[];
extern FlatPtr start_of_unmap_after_init; extern u8 start_of_unmap_after_init[];
extern FlatPtr end_of_unmap_after_init; extern u8 end_of_unmap_after_init[];
extern FlatPtr start_of_kernel_ksyms; extern u8 start_of_kernel_ksyms[];
extern FlatPtr end_of_kernel_ksyms; extern u8 end_of_kernel_ksyms[];
extern multiboot_module_entry_t multiboot_copy_boot_modules_array[16]; extern multiboot_module_entry_t multiboot_copy_boot_modules_array[16];
extern size_t multiboot_copy_boot_modules_count; extern size_t multiboot_copy_boot_modules_count;
@ -92,13 +92,13 @@ UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
{ {
ScopedSpinLock page_lock(kernel_page_directory().get_lock()); ScopedSpinLock page_lock(kernel_page_directory().get_lock());
// Disable writing to the kernel text and rodata segments. // Disable writing to the kernel text and rodata segments.
for (auto i = (FlatPtr)&start_of_kernel_text; i < (FlatPtr)&start_of_kernel_data; i += PAGE_SIZE) { for (auto i = start_of_kernel_text; i < start_of_kernel_data; i += PAGE_SIZE) {
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i)); auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
pte.set_writable(false); pte.set_writable(false);
} }
if (Processor::current().has_feature(CPUFeature::NX)) { if (Processor::current().has_feature(CPUFeature::NX)) {
// Disable execution of the kernel data, bss and heap segments. // Disable execution of the kernel data, bss and heap segments.
for (auto i = (FlatPtr)&start_of_kernel_data; i < (FlatPtr)&end_of_kernel_image; i += PAGE_SIZE) { for (auto i = start_of_kernel_data; i < end_of_kernel_image; i += PAGE_SIZE) {
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i)); auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
pte.set_execute_disabled(true); pte.set_execute_disabled(true);
} }
@ -140,8 +140,8 @@ void MemoryManager::unmap_ksyms_after_init()
ScopedSpinLock mm_lock(s_mm_lock); ScopedSpinLock mm_lock(s_mm_lock);
ScopedSpinLock page_lock(kernel_page_directory().get_lock()); ScopedSpinLock page_lock(kernel_page_directory().get_lock());
auto start = page_round_down((FlatPtr)&start_of_kernel_ksyms); auto start = page_round_down((FlatPtr)start_of_kernel_ksyms);
auto end = page_round_up((FlatPtr)&end_of_kernel_ksyms); auto end = page_round_up((FlatPtr)end_of_kernel_ksyms);
// Unmap the entire .ksyms section // Unmap the entire .ksyms section
for (auto i = start; i < end; i += PAGE_SIZE) { for (auto i = start; i < end; i += PAGE_SIZE) {
@ -198,7 +198,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
m_used_memory_ranges.ensure_capacity(4); m_used_memory_ranges.ensure_capacity(4);
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) }); m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Prekernel, start_of_prekernel_image, end_of_prekernel_image }); m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Prekernel, start_of_prekernel_image, end_of_prekernel_image });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical(FlatPtr(&start_of_kernel_image))), PhysicalAddress(page_round_up(virtual_to_low_physical(FlatPtr(&end_of_kernel_image)))) }); m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image))) });
if (multiboot_info_ptr->flags & 0x4) { if (multiboot_info_ptr->flags & 0x4) {
auto* bootmods_start = multiboot_copy_boot_modules_array; auto* bootmods_start = multiboot_copy_boot_modules_array;

View File

@ -13,7 +13,7 @@
#include <Kernel/VM/MemoryManager.h> #include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/PageDirectory.h> #include <Kernel/VM/PageDirectory.h>
extern u8* end_of_kernel_image; extern u8 end_of_kernel_image[];
namespace Kernel { namespace Kernel {
@ -34,7 +34,7 @@ RefPtr<PageDirectory> PageDirectory::find_by_cr3(FlatPtr cr3)
UNMAP_AFTER_INIT PageDirectory::PageDirectory() UNMAP_AFTER_INIT PageDirectory::PageDirectory()
{ {
// make sure this starts in a new page directory to make MemoryManager::initialize_physical_pages() happy // make sure this starts in a new page directory to make MemoryManager::initialize_physical_pages() happy
FlatPtr start_of_range = ((FlatPtr)&end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000; FlatPtr start_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
m_range_allocator.initialize_with_range(VirtualAddress(start_of_range), KERNEL_PD_END - start_of_range); m_range_allocator.initialize_with_range(VirtualAddress(start_of_range), KERNEL_PD_END - start_of_range);
m_identity_range_allocator.initialize_with_range(VirtualAddress(FlatPtr(0x00000000)), 0x00200000); m_identity_range_allocator.initialize_with_range(VirtualAddress(FlatPtr(0x00000000)), 0x00200000);
} }

View File

@ -61,20 +61,20 @@
// Defined in the linker script // Defined in the linker script
typedef void (*ctor_func_t)(); typedef void (*ctor_func_t)();
extern ctor_func_t start_heap_ctors; extern ctor_func_t start_heap_ctors[];
extern ctor_func_t end_heap_ctors; extern ctor_func_t end_heap_ctors[];
extern ctor_func_t start_ctors; extern ctor_func_t start_ctors[];
extern ctor_func_t end_ctors; extern ctor_func_t end_ctors[];
extern size_t __stack_chk_guard; extern size_t __stack_chk_guard;
size_t __stack_chk_guard; size_t __stack_chk_guard;
extern "C" u8* start_of_safemem_text; extern "C" u8 start_of_safemem_text[];
extern "C" u8* end_of_safemem_text; extern "C" u8 end_of_safemem_text[];
extern "C" u8* start_of_safemem_atomic_text; extern "C" u8 start_of_safemem_atomic_text[];
extern "C" u8* end_of_safemem_atomic_text; extern "C" u8 end_of_safemem_atomic_text[];
extern "C" u8* end_of_kernel_image; extern "C" u8 end_of_kernel_image[];
multiboot_module_entry_t multiboot_copy_boot_modules_array[16]; multiboot_module_entry_t multiboot_copy_boot_modules_array[16];
size_t multiboot_copy_boot_modules_count; size_t multiboot_copy_boot_modules_count;
@ -149,7 +149,7 @@ extern "C" [[noreturn]] UNMAP_AFTER_INIT void init(BootInfo const& boot_info)
s_bsp_processor.early_initialize(0); s_bsp_processor.early_initialize(0);
// Invoke the constructors needed for the kernel heap // Invoke the constructors needed for the kernel heap
for (ctor_func_t* ctor = &start_heap_ctors; ctor < &end_heap_ctors; ctor++) for (ctor_func_t* ctor = start_heap_ctors; ctor < end_heap_ctors; ctor++)
(*ctor)(); (*ctor)();
kmalloc_init(); kmalloc_init();
slab_alloc_init(); slab_alloc_init();
@ -163,12 +163,12 @@ extern "C" [[noreturn]] UNMAP_AFTER_INIT void init(BootInfo const& boot_info)
MemoryManager::initialize(0); MemoryManager::initialize(0);
// Ensure that the safemem sections are not empty. This could happen if the linker accidentally discards the sections. // Ensure that the safemem sections are not empty. This could happen if the linker accidentally discards the sections.
VERIFY(&start_of_safemem_text != &end_of_safemem_text); VERIFY(start_of_safemem_text != end_of_safemem_text);
VERIFY(&start_of_safemem_atomic_text != &end_of_safemem_atomic_text); VERIFY(start_of_safemem_atomic_text != end_of_safemem_atomic_text);
// Invoke all static global constructors in the kernel. // Invoke all static global constructors in the kernel.
// Note that we want to do this as early as possible. // Note that we want to do this as early as possible.
for (ctor_func_t* ctor = &start_ctors; ctor < &end_ctors; ctor++) for (ctor_func_t* ctor = start_ctors; ctor < end_ctors; ctor++)
(*ctor)(); (*ctor)();
APIC::initialize(); APIC::initialize();