Kernel: Add initial basic support for KASAN

This commit adds minimal support for compiler-instrumentation based
memory access sanitization.
Currently we only support detection of kmalloc redzone accesses, and
kmalloc use-after-free accesses.

Support for inline checks (for improved performance), and for stack
use-after-return and use-after-return detection is left for future PRs.
This commit is contained in:
Idan Horowitz 2023-12-29 02:36:39 +02:00 committed by Andreas Kling
parent 7ad7ae7000
commit f7a1f28d7f
Notes: sideshowbarker 2024-07-16 19:57:55 +09:00
10 changed files with 538 additions and 63 deletions

View File

@ -722,11 +722,15 @@ if (ENABLE_KERNEL_UNDEFINED_SANITIZER)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined")
endif()
# Kernel Address Sanitize (KASAN) implementation is still a work in progress, this option
# is not currently meant to be used, besides when developing Kernel ASAN support.
#
if (ENABLE_KERNEL_ADDRESS_SANITIZER)
add_compile_options(-fsanitize=kernel-address)
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang$")
# TODO: Support inline KASAN for improved performance
add_compile_options("SHELL:-mllvm -asan-instrumentation-with-call-threshold=0")
# TODO: Support KASAN stack poisoning (inline) for use-after-return and use-after-scope detection
add_compile_options("SHELL:-mllvm -asan-stack=0")
endif()
set_source_files_properties(Security/AddressSanitizer.cpp PROPERTIES COMPILE_FLAGS "-fno-sanitize=kernel-address")
add_link_options(-fsanitize=kernel-address)
endif()

View File

@ -0,0 +1,31 @@
/*
* Copyright (c) 2023, Idan Horowitz <idan.horowitz@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/FileSystem/SysFS/Subsystems/Kernel/Configuration/KASANDeadly.h>
#include <Kernel/Security/AddressSanitizer.h>
namespace Kernel {
UNMAP_AFTER_INIT SysFSKASANDeadly::SysFSKASANDeadly(SysFSDirectory const& parent_directory)
: SysFSSystemBooleanVariable(parent_directory)
{
}
UNMAP_AFTER_INIT NonnullRefPtr<SysFSKASANDeadly> SysFSKASANDeadly::must_create(SysFSDirectory const& parent_directory)
{
return adopt_ref_if_nonnull(new (nothrow) SysFSKASANDeadly(parent_directory)).release_nonnull();
}
bool SysFSKASANDeadly::value() const
{
return AddressSanitizer::g_kasan_is_deadly;
}
void SysFSKASANDeadly::set_value(bool new_value)
{
AddressSanitizer::g_kasan_is_deadly = new_value;
}
}

View File

@ -0,0 +1,28 @@
/*
* Copyright (c) 2023, Idan Horowitz <idan.horowitz@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/RefPtr.h>
#include <AK/Types.h>
#include <Kernel/FileSystem/SysFS/Subsystems/Kernel/Configuration/BooleanVariable.h>
#include <Kernel/Library/UserOrKernelBuffer.h>
namespace Kernel {
class SysFSKASANDeadly final : public SysFSSystemBooleanVariable {
public:
virtual StringView name() const override { return "kasan_is_deadly"sv; }
static NonnullRefPtr<SysFSKASANDeadly> must_create(SysFSDirectory const&);
private:
virtual bool value() const override;
virtual void set_value(bool new_value) override;
explicit SysFSKASANDeadly(SysFSDirectory const&);
};
}

View File

@ -11,6 +11,7 @@
#include <AK/TemporaryChange.h>
#include <AK/Vector.h>
#include <AK/kmalloc.h>
#include <Kernel/Security/AddressSanitizer.h>
namespace Kernel {
@ -68,7 +69,7 @@ public:
return needed_chunks * CHUNK_SIZE + (needed_chunks + 7) / 8;
}
void* allocate(size_t size, size_t alignment, CallerWillInitializeMemory caller_will_initialize_memory)
void* allocate(size_t size, size_t alignment, [[maybe_unused]] CallerWillInitializeMemory caller_will_initialize_memory)
{
// The minimum possible alignment is CHUNK_SIZE, since we only track chunks here, nothing smaller.
if (alignment < CHUNK_SIZE)
@ -104,17 +105,23 @@ public:
VERIFY(first_chunk.value() <= aligned_first_chunk);
VERIFY(aligned_first_chunk + chunks_needed <= first_chunk.value() + chunks_needed + chunk_alignment);
#ifdef HAS_ADDRESS_SANITIZER
AddressSanitizer::mark_region((FlatPtr)a, real_size, (chunks_needed * CHUNK_SIZE), AddressSanitizer::ShadowType::Malloc);
#endif
u8* ptr = a->data;
a->allocation_size_in_chunks = chunks_needed;
m_bitmap.set_range_and_verify_that_all_bits_flip(aligned_first_chunk, chunks_needed, true);
m_allocated_chunks += chunks_needed;
#ifndef HAS_ADDRESS_SANITIZER
if (caller_will_initialize_memory == CallerWillInitializeMemory::No) {
if constexpr (HEAP_SCRUB_BYTE_ALLOC != 0) {
__builtin_memset(ptr, HEAP_SCRUB_BYTE_ALLOC, (chunks_needed * CHUNK_SIZE) - sizeof(AllocationHeader));
}
}
#endif
VERIFY((FlatPtr)ptr % alignment == 0);
return ptr;
@ -137,9 +144,13 @@ public:
VERIFY(m_allocated_chunks >= a->allocation_size_in_chunks);
m_allocated_chunks -= a->allocation_size_in_chunks;
#ifdef HAS_ADDRESS_SANITIZER
AddressSanitizer::fill_shadow((FlatPtr)a, a->allocation_size_in_chunks * CHUNK_SIZE, AddressSanitizer::ShadowType::Free);
#else
if constexpr (HEAP_SCRUB_BYTE_FREE != 0) {
__builtin_memset(a, HEAP_SCRUB_BYTE_FREE, a->allocation_size_in_chunks * CHUNK_SIZE);
}
#endif
}
bool contains(void const* ptr) const

View File

@ -16,6 +16,7 @@
#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Sections.h>
#include <Kernel/Security/AddressSanitizer.h>
#include <Kernel/Tasks/PerformanceManager.h>
#if ARCH(X86_64) || ARCH(AARCH64) || ARCH(RISCV64)
@ -65,11 +66,18 @@ public:
}
}
void* allocate()
void* allocate([[maybe_unused]] size_t requested_size)
{
VERIFY(m_freelist);
++m_allocated_slabs;
return exchange(m_freelist, m_freelist->next);
#ifdef HAS_ADDRESS_SANITIZER
AddressSanitizer::fill_shadow((FlatPtr)m_freelist, sizeof(FreelistEntry::next), Kernel::AddressSanitizer::ShadowType::Unpoisoned8Bytes);
#endif
auto* ptr = exchange(m_freelist, m_freelist->next);
#ifdef HAS_ADDRESS_SANITIZER
AddressSanitizer::mark_region((FlatPtr)ptr, requested_size, m_slab_size, AddressSanitizer::ShadowType::Malloc);
#endif
return ptr;
}
void deallocate(void* ptr)
@ -77,7 +85,13 @@ public:
VERIFY(ptr >= &m_data && ptr < ((u8*)this + block_size));
--m_allocated_slabs;
auto* freelist_entry = (FreelistEntry*)ptr;
#ifdef HAS_ADDRESS_SANITIZER
AddressSanitizer::fill_shadow((FlatPtr)freelist_entry, sizeof(FreelistEntry::next), Kernel::AddressSanitizer::ShadowType::Unpoisoned8Bytes);
#endif
freelist_entry->next = m_freelist;
#ifdef HAS_ADDRESS_SANITIZER
AddressSanitizer::fill_shadow((FlatPtr)freelist_entry, m_slab_size, AddressSanitizer::ShadowType::Free);
#endif
m_freelist = freelist_entry;
}
@ -122,7 +136,7 @@ public:
size_t slab_size() const { return m_slab_size; }
void* allocate(CallerWillInitializeMemory caller_will_initialize_memory)
void* allocate(size_t requested_size, [[maybe_unused]] CallerWillInitializeMemory caller_will_initialize_memory)
{
if (m_usable_blocks.is_empty()) {
// FIXME: This allocation wastes `block_size` bytes due to the implementation of kmalloc_aligned().
@ -136,19 +150,23 @@ public:
m_usable_blocks.append(*block);
}
auto* block = m_usable_blocks.first();
auto* ptr = block->allocate();
auto* ptr = block->allocate(requested_size);
if (block->is_full())
m_full_blocks.append(*block);
#ifndef HAS_ADDRESS_SANITIZER
if (caller_will_initialize_memory == CallerWillInitializeMemory::No) {
memset(ptr, KMALLOC_SCRUB_BYTE, m_slab_size);
}
#endif
return ptr;
}
void deallocate(void* ptr)
{
#ifndef HAS_ADDRESS_SANITIZER
memset(ptr, KFREE_SCRUB_BYTE, m_slab_size);
#endif
auto* block = (KmallocSlabBlock*)((FlatPtr)ptr & KmallocSlabBlock::block_mask);
bool block_was_full = block->is_full();
@ -227,7 +245,7 @@ struct KmallocGlobalData {
for (auto& slabheap : slabheaps) {
if (size <= slabheap.slab_size() && alignment <= slabheap.slab_size())
return slabheap.allocate(caller_will_initialize_memory);
return slabheap.allocate(size, caller_will_initialize_memory);
}
for (auto& subheap : subheaps) {

View File

@ -24,6 +24,7 @@
#include <Kernel/Memory/SharedInodeVMObject.h>
#include <Kernel/Prekernel/Prekernel.h>
#include <Kernel/Sections.h>
#include <Kernel/Security/AddressSanitizer.h>
#include <Kernel/Tasks/Process.h>
extern u8 start_of_kernel_image[];
@ -105,6 +106,10 @@ UNMAP_AFTER_INIT MemoryManager::MemoryManager()
// By using a tag we don't have to query the VMObject for every page
// whether it was committed or not
m_lazy_committed_page = committed_pages.take_one();
#ifdef HAS_ADDRESS_SANITIZER
initialize_kasan_shadow_memory();
#endif
}
UNMAP_AFTER_INIT MemoryManager::~MemoryManager() = default;
@ -579,6 +584,26 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
});
}
#ifdef HAS_ADDRESS_SANITIZER
void MemoryManager::initialize_kasan_shadow_memory()
{
m_global_data.with([&](auto& global_data) {
// We map every 8 bytes of normal memory to 1 byte of shadow memory, so we need a 1/9 of total memory for the shadow memory.
auto virtual_range = global_data.region_tree.total_range();
auto shadow_range_size = MUST(page_round_up(ceil_div(virtual_range.size(), 9ul)));
dbgln("MM: Reserving {} bytes for KASAN shadow memory", shadow_range_size);
auto vmobject = MUST(AnonymousVMObject::try_create_with_size(shadow_range_size, AllocationStrategy::AllocateNow));
auto* shadow_region = MUST(Region::create_unplaced(move(vmobject), 0, {}, Memory::Region::Access::ReadWrite)).leak_ptr();
auto shadow_range = VirtualRange { virtual_range.base().offset(virtual_range.size() - shadow_range_size), shadow_range_size };
MUST(global_data.region_tree.place_specifically(*shadow_region, shadow_range));
MUST(shadow_region->map(kernel_page_directory()));
AddressSanitizer::init(shadow_region->vaddr().get());
});
}
#endif
PhysicalPageEntry& MemoryManager::get_physical_page_entry(PhysicalAddress physical_address)
{
auto physical_page_entry_index = PhysicalAddress::physical_page_index(physical_address.get());

View File

@ -243,6 +243,10 @@ private:
void initialize_physical_pages();
void register_reserved_ranges();
#ifdef HAS_ADDRESS_SANITIZER
void initialize_kasan_shadow_memory();
#endif
void unregister_kernel_region(Region&);
void protect_kernel_image();

View File

@ -1,25 +1,249 @@
/*
* Copyright (c) 2021, Brian Gianforcaro <bgianf@serenityos.org>
* Copyright (c) 2023, Idan Horowitz <idan.horowitz@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#if defined(__SANITIZE_ADDRESS__)
#include <AK/Platform.h>
# include <Kernel/Security/AddressSanitizer.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Boot/BootInfo.h>
#include <Kernel/KSyms.h>
#include <Kernel/Library/StdLib.h>
#include <Kernel/Security/AddressSanitizer.h>
void Kernel::AddressSanitizer::shadow_va_check_load(unsigned long address, size_t size, void* return_address)
static constexpr size_t kasan_shadow_scale_offset = 3; // We map each 8 real bytes to 1 shadow byte
static constexpr size_t kasan_shadow_scale = 1 << kasan_shadow_scale_offset;
static constexpr size_t kasan_shadow_mask = kasan_shadow_scale - 1;
// Defined in clang
static constexpr size_t kasan_alloca_redzone_size = 32;
namespace Kernel::AddressSanitizer {
enum class AccessType {
Load,
Store
};
static constexpr StringView to_string(AccessType shadow_type)
{
(void)address;
(void)size;
(void)return_address;
switch (shadow_type) {
case AccessType::Load:
return "Load"sv;
case AccessType::Store:
return "Store"sv;
default:
return "Unknown"sv;
}
}
void Kernel::AddressSanitizer::shadow_va_check_store(unsigned long address, size_t size, void* return_address)
static constexpr StringView to_string(ShadowType shadow_type)
{
(void)address;
(void)size;
(void)return_address;
switch (shadow_type) {
case ShadowType::Unpoisoned8Bytes:
return "8 Bytes Unpoisoned"sv;
case ShadowType::Unpoisoned1Byte:
return "1 Byte Unpoisoned | 7 Bytes Poisoned"sv;
case ShadowType::Unpoisoned2Bytes:
return "2 Bytes Unpoisoned | 6 Bytes Poisoned"sv;
case ShadowType::Unpoisoned3Bytes:
return "3 Bytes Unpoisoned | 5 Bytes Poisoned"sv;
case ShadowType::Unpoisoned4Bytes:
return "4 Bytes Unpoisoned | 4 Bytes Poisoned"sv;
case ShadowType::Unpoisoned5Bytes:
return "5 Bytes Unpoisoned | 3 Bytes Poisoned"sv;
case ShadowType::Unpoisoned6Bytes:
return "6 Bytes Unpoisoned | 2 Bytes Poisoned"sv;
case ShadowType::Unpoisoned7Bytes:
return "7 Bytes Unpoisoned | 1 Byte Poisoned"sv;
case ShadowType::StackLeft:
return "Stack Left Redzone"sv;
case ShadowType::StackMiddle:
return "Stack Middle Redzone"sv;
case ShadowType::StackRight:
return "Stack Right Redzone"sv;
case ShadowType::UseAfterReturn:
return "Use After Return"sv;
case ShadowType::UseAfterScope:
return "Use After Scope"sv;
case ShadowType::Generic:
return "Generic Redzone"sv;
case ShadowType::Malloc:
return "Malloc Redzone"sv;
case ShadowType::Free:
return "Freed Region"sv;
default:
return "Unknown"sv;
}
}
Atomic<bool> g_kasan_is_deadly { true };
static void print_violation(FlatPtr address, size_t size, AccessType access_type, ShadowType shadow_type, void* return_address)
{
critical_dmesgln("KASAN: Invalid {}-byte {} access to {}, which is marked as '{}' [at {:p}]", size, to_string(access_type), VirtualAddress(address), to_string(shadow_type), return_address);
dump_backtrace(g_kasan_is_deadly ? PrintToScreen::Yes : PrintToScreen::No);
if (g_kasan_is_deadly) {
critical_dmesgln("KASAN is configured to be deadly, halting the system.");
Processor::halt();
}
}
static FlatPtr kasan_shadow_base;
static FlatPtr kasan_shadow_offset;
static bool kasan_initialized = false;
void init(FlatPtr shadow_base)
{
kasan_shadow_base = shadow_base;
kasan_shadow_offset = shadow_base - (kernel_mapping_base >> kasan_shadow_scale_offset);
kasan_initialized = true;
}
static inline ShadowType* va_to_shadow(FlatPtr address)
{
return (ShadowType*)((address >> kasan_shadow_scale_offset) + kasan_shadow_offset);
}
void fill_shadow(FlatPtr address, size_t size, ShadowType type)
{
if (!kasan_initialized) [[unlikely]]
return;
VERIFY((address % kasan_shadow_scale) == 0);
VERIFY((size % kasan_shadow_scale) == 0);
auto* shadow = va_to_shadow(address);
auto shadow_size = size >> kasan_shadow_scale_offset;
memset(shadow, to_underlying(type), shadow_size);
}
void mark_region(FlatPtr address, size_t valid_size, size_t total_size, ShadowType type)
{
if (!kasan_initialized) [[unlikely]]
return;
VERIFY((address % kasan_shadow_scale) == 0);
VERIFY((total_size % kasan_shadow_scale) == 0);
auto* shadow = va_to_shadow(address);
auto valid_shadow_size = valid_size >> kasan_shadow_scale_offset;
memset(shadow, to_underlying(ShadowType::Unpoisoned8Bytes), valid_shadow_size);
auto unaligned_size = valid_size & kasan_shadow_mask;
if (unaligned_size)
*(shadow + valid_shadow_size) = static_cast<ShadowType>(unaligned_size);
auto poisoned_shadow_size = (total_size - round_up_to_power_of_two(valid_size, kasan_shadow_scale)) >> kasan_shadow_scale_offset;
memset(shadow + valid_shadow_size + (unaligned_size != 0), to_underlying(type), poisoned_shadow_size);
}
static bool shadow_va_check_1b(FlatPtr address, ShadowType& shadow_type)
{
auto const shadow = *va_to_shadow(address);
i8 const minimal_valid_shadow = (address & kasan_shadow_mask) + 1;
if (shadow == ShadowType::Unpoisoned8Bytes || (minimal_valid_shadow <= static_cast<i8>(shadow))) [[likely]]
return true;
shadow_type = shadow;
return false;
}
static bool shadow_va_check_2b(FlatPtr address, ShadowType& shadow_type)
{
// Check for unaligned access
if ((address >> kasan_shadow_scale_offset) != (address + 1) >> kasan_shadow_scale_offset) [[unlikely]]
return shadow_va_check_1b(address, shadow_type) && shadow_va_check_1b(address + 1, shadow_type);
auto const shadow = *va_to_shadow(address);
i8 const minimal_valid_shadow = ((address + 1) & kasan_shadow_mask) + 1;
if (shadow == ShadowType::Unpoisoned8Bytes || (minimal_valid_shadow <= static_cast<i8>(shadow))) [[likely]]
return true;
shadow_type = shadow;
return false;
}
static bool shadow_va_check_4b(FlatPtr address, ShadowType& shadow_type)
{
// Check for unaligned access
if ((address >> kasan_shadow_scale_offset) != (address + 3) >> kasan_shadow_scale_offset) [[unlikely]]
return shadow_va_check_2b(address, shadow_type) && shadow_va_check_2b(address + 2, shadow_type);
auto const shadow = *va_to_shadow(address);
i8 const minimal_valid_shadow = ((address + 3) & kasan_shadow_mask) + 1;
if (shadow == ShadowType::Unpoisoned8Bytes || (minimal_valid_shadow <= static_cast<i8>(shadow))) [[likely]]
return true;
shadow_type = shadow;
return false;
}
static bool shadow_va_check_8b(FlatPtr address, ShadowType& shadow_type)
{
// Check for unaligned access
if ((address >> kasan_shadow_scale_offset) != (address + 7) >> kasan_shadow_scale_offset) [[unlikely]]
return shadow_va_check_4b(address, shadow_type) && shadow_va_check_4b(address + 4, shadow_type);
auto const shadow = *va_to_shadow(address);
i8 const minimal_valid_shadow = ((address + 7) & kasan_shadow_mask) + 1;
if (shadow == ShadowType::Unpoisoned8Bytes || (minimal_valid_shadow <= static_cast<i8>(shadow))) [[likely]]
return true;
shadow_type = shadow;
return false;
}
static bool shadow_va_check_Nb(FlatPtr address, size_t n, ShadowType& shadow_type)
{
while ((address % 8) && (n > 0)) {
if (!shadow_va_check_1b(address, shadow_type)) [[unlikely]]
return false;
address++;
n--;
}
while (n >= 8) {
if (!shadow_va_check_8b(address, shadow_type))
return false;
address += 8;
n -= 8;
}
while (n > 0) {
if (!shadow_va_check_1b(address, shadow_type)) [[unlikely]]
return false;
address++;
n--;
}
return true;
}
static void shadow_va_check(FlatPtr address, size_t size, AccessType access_type, void* return_address)
{
if (size == 0) [[unlikely]]
return;
if (!kasan_initialized) [[unlikely]]
return;
if (address < kernel_mapping_base || address >= kasan_shadow_base) [[unlikely]]
return;
bool valid = false;
ShadowType shadow_type = ShadowType::Unpoisoned8Bytes;
switch (size) {
case 1:
valid = shadow_va_check_1b(address, shadow_type);
break;
case 2:
valid = shadow_va_check_2b(address, shadow_type);
break;
case 4:
valid = shadow_va_check_4b(address, shadow_type);
break;
case 8:
valid = shadow_va_check_8b(address, shadow_type);
break;
default:
valid = shadow_va_check_Nb(address, size, shadow_type);
break;
}
if (valid) [[likely]]
return;
print_violation(address, size, access_type, shadow_type, return_address);
}
}
using namespace Kernel;
@ -30,27 +254,47 @@ extern "C" {
// Define a macro to easily declare the KASAN load and store callbacks for
// the various sizes of data type.
//
# define ADDRESS_SANITIZER_LOAD_STORE(size) \
void __asan_load##size(unsigned long); \
void __asan_load##size(unsigned long address) \
{ \
shadow_va_check_load(address, size, __builtin_return_address(0)); \
} \
void __asan_load##size##_noabort(unsigned long); \
void __asan_load##size##_noabort(unsigned long address) \
{ \
shadow_va_check_load(address, size, __builtin_return_address(0)); \
} \
void __asan_store##size(unsigned long); \
void __asan_store##size(unsigned long address) \
{ \
shadow_va_check_store(address, size, __builtin_return_address(0)); \
} \
void __asan_store##size##_noabort(unsigned long); \
void __asan_store##size##_noabort(unsigned long address) \
{ \
shadow_va_check_store(address, size, __builtin_return_address(0)); \
}
#define ADDRESS_SANITIZER_LOAD_STORE(size) \
void __asan_load##size(FlatPtr); \
void __asan_load##size(FlatPtr address) \
{ \
shadow_va_check(address, size, AccessType::Load, __builtin_return_address(0)); \
} \
void __asan_load##size##_noabort(FlatPtr); \
void __asan_load##size##_noabort(FlatPtr address) \
{ \
shadow_va_check(address, size, AccessType::Load, __builtin_return_address(0)); \
} \
void __asan_store##size(FlatPtr); \
void __asan_store##size(FlatPtr address) \
{ \
shadow_va_check(address, size, AccessType::Store, __builtin_return_address(0)); \
} \
void __asan_store##size##_noabort(FlatPtr); \
void __asan_store##size##_noabort(FlatPtr address) \
{ \
shadow_va_check(address, size, AccessType::Store, __builtin_return_address(0)); \
} \
void __asan_report_load##size(FlatPtr); \
void __asan_report_load##size(FlatPtr address) \
{ \
print_violation(address, size, AccessType::Load, ShadowType::Generic, __builtin_return_address(0)); \
} \
void __asan_report_load##size##_noabort(FlatPtr); \
void __asan_report_load##size##_noabort(FlatPtr address) \
{ \
print_violation(address, size, AccessType::Load, ShadowType::Generic, __builtin_return_address(0)); \
} \
void __asan_report_store##size(FlatPtr); \
void __asan_report_store##size(FlatPtr address) \
{ \
print_violation(address, size, AccessType::Store, ShadowType::Generic, __builtin_return_address(0)); \
} \
void __asan_report_store##size##_noabort(FlatPtr); \
void __asan_report_store##size##_noabort(FlatPtr address) \
{ \
print_violation(address, size, AccessType::Store, ShadowType::Generic, __builtin_return_address(0)); \
}
ADDRESS_SANITIZER_LOAD_STORE(1);
ADDRESS_SANITIZER_LOAD_STORE(2);
@ -58,42 +302,125 @@ ADDRESS_SANITIZER_LOAD_STORE(4);
ADDRESS_SANITIZER_LOAD_STORE(8);
ADDRESS_SANITIZER_LOAD_STORE(16);
# undef ADDRESS_SANITIZER_LOAD_STORE
#undef ADDRESS_SANITIZER_LOAD_STORE
void __asan_loadN(unsigned long, size_t);
void __asan_loadN(unsigned long address, size_t size)
void __asan_loadN(FlatPtr, size_t);
void __asan_loadN(FlatPtr address, size_t size)
{
shadow_va_check_load(address, size, __builtin_return_address(0));
shadow_va_check(address, size, AccessType::Load, __builtin_return_address(0));
}
void __asan_loadN_noabort(unsigned long, size_t);
void __asan_loadN_noabort(unsigned long address, size_t size)
void __asan_loadN_noabort(FlatPtr, size_t);
void __asan_loadN_noabort(FlatPtr address, size_t size)
{
shadow_va_check_load(address, size, __builtin_return_address(0));
shadow_va_check(address, size, AccessType::Load, __builtin_return_address(0));
}
void __asan_storeN(unsigned long, size_t);
void __asan_storeN(unsigned long address, size_t size)
void __asan_storeN(FlatPtr, size_t);
void __asan_storeN(FlatPtr address, size_t size)
{
shadow_va_check_store(address, size, __builtin_return_address(0));
shadow_va_check(address, size, AccessType::Store, __builtin_return_address(0));
}
void __asan_storeN_noabort(unsigned long, size_t);
void __asan_storeN_noabort(unsigned long address, size_t size)
void __asan_storeN_noabort(FlatPtr, size_t);
void __asan_storeN_noabort(FlatPtr address, size_t size)
{
shadow_va_check_store(address, size, __builtin_return_address(0));
shadow_va_check(address, size, AccessType::Store, __builtin_return_address(0));
}
void __asan_report_load_n(FlatPtr, size_t);
void __asan_report_load_n(FlatPtr address, size_t size)
{
print_violation(address, size, AccessType::Load, ShadowType::Generic, __builtin_return_address(0));
}
void __asan_report_load_n_noabort(FlatPtr, size_t);
void __asan_report_load_n_noabort(FlatPtr address, size_t size)
{
print_violation(address, size, AccessType::Load, ShadowType::Generic, __builtin_return_address(0));
}
void __asan_report_store_n(FlatPtr, size_t);
void __asan_report_store_n(FlatPtr address, size_t size)
{
print_violation(address, size, AccessType::Store, ShadowType::Generic, __builtin_return_address(0));
}
void __asan_report_store_n_noabort(FlatPtr, size_t);
void __asan_report_store_n_noabort(FlatPtr address, size_t size)
{
print_violation(address, size, AccessType::Store, ShadowType::Generic, __builtin_return_address(0));
}
// As defined in the compiler
struct __asan_global_source_location {
char const* filename;
int line_number;
int column_number;
};
struct __asan_global {
uintptr_t address;
size_t valid_size;
size_t total_size;
char const* name;
char const* module_name;
size_t has_dynamic_init;
struct __asan_global_source_location* location;
size_t odr_indicator;
};
void __asan_register_globals(struct __asan_global*, size_t);
void __asan_register_globals(struct __asan_global* globals, size_t count)
{
for (auto i = 0u; i < count; ++i)
mark_region(globals[i].address, globals[i].valid_size, globals[i].total_size, ShadowType::Generic);
}
void __asan_unregister_globals(struct __asan_global*, size_t);
void __asan_unregister_globals(struct __asan_global* globals, size_t count)
{
for (auto i = 0u; i < count; ++i)
mark_region(globals[i].address, globals[i].total_size, globals[i].total_size, ShadowType::Unpoisoned8Bytes);
}
void __asan_alloca_poison(FlatPtr, size_t);
void __asan_alloca_poison(FlatPtr address, size_t size)
{
VERIFY(address % kasan_alloca_redzone_size == 0);
auto rounded_size = round_up_to_power_of_two(size, kasan_alloca_redzone_size);
fill_shadow(address - kasan_alloca_redzone_size, kasan_alloca_redzone_size, ShadowType::StackLeft);
mark_region(address, size, rounded_size, Kernel::AddressSanitizer::ShadowType::StackMiddle);
fill_shadow(address + rounded_size, kasan_alloca_redzone_size, Kernel::AddressSanitizer::ShadowType::StackRight);
}
void __asan_allocas_unpoison(FlatPtr, size_t);
void __asan_allocas_unpoison(FlatPtr start, size_t end)
{
VERIFY(start >= end);
auto size = end - start;
VERIFY(size % kasan_shadow_scale == 0);
fill_shadow(start, size, Kernel::AddressSanitizer::ShadowType::Unpoisoned8Bytes);
}
void __asan_poison_stack_memory(FlatPtr, size_t);
void __asan_poison_stack_memory(FlatPtr address, size_t size)
{
fill_shadow(address, round_up_to_power_of_two(size, kasan_shadow_scale), Kernel::AddressSanitizer::ShadowType::UseAfterScope);
}
void __asan_unpoison_stack_memory(FlatPtr, size_t);
void __asan_unpoison_stack_memory(FlatPtr address, size_t size)
{
fill_shadow(address, round_up_to_power_of_two(size, kasan_shadow_scale), Kernel::AddressSanitizer::ShadowType::Unpoisoned8Bytes);
}
// Performs shadow memory cleanup of the current thread's stack before a
// function marked with the [[noreturn]] attribute is called.
//
void __asan_handle_no_return(void);
void __asan_handle_no_return(void)
{
}
void __asan_before_dynamic_init(char const*);
void __asan_before_dynamic_init(char const* /* module_name */)
void __asan_before_dynamic_init(char const*)
{
}
@ -102,5 +429,3 @@ void __asan_after_dynamic_init()
{
}
}
#endif

View File

@ -1,17 +1,40 @@
/*
* Copyright (c) 2021, Brian Gianforcaro <bgianf@serenityos.org>
* Copyright (c) 2023, Idan Horowitz <idan.horowitz@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Atomic.h>
#include <AK/Types.h>
namespace Kernel::AddressSanitizer {
void shadow_va_check_load(unsigned long address, size_t size, void* return_addr);
extern Atomic<bool> g_kasan_is_deadly;
void shadow_va_check_store(unsigned long address, size_t size, void* return_addr);
enum class ShadowType : u8 {
Unpoisoned8Bytes = 0,
Unpoisoned1Byte = 1,
Unpoisoned2Bytes = 2,
Unpoisoned3Bytes = 3,
Unpoisoned4Bytes = 4,
Unpoisoned5Bytes = 5,
Unpoisoned6Bytes = 6,
Unpoisoned7Bytes = 7,
StackLeft = 0xF1,
StackMiddle = 0xF2,
StackRight = 0xF3,
UseAfterReturn = 0xF5,
UseAfterScope = 0xF8,
Generic = 0xFA,
Malloc = 0xFB,
Free = 0xFC,
};
void init(FlatPtr shadow_base);
void fill_shadow(FlatPtr address, size_t size, ShadowType type);
void mark_region(FlatPtr address, size_t valid_size, size_t total_size, ShadowType type);
}

View File

@ -145,7 +145,7 @@ new file mode 100644
index 000000000..4fdf45a19
--- /dev/null
+++ b/clang/lib/Driver/ToolChains/Serenity.cpp
@@ -0,0 +1,336 @@
@@ -0,0 +1,340 @@
+//===---- Serenity.cpp - SerenityOS ToolChain Implementation ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
@ -482,12 +482,16 @@ index 000000000..4fdf45a19
+
+ return ToolChain::UNW_None;
+}
+
+SanitizerMask Serenity::getSupportedSanitizers() const {
+ return ToolChain::getSupportedSanitizers() | SanitizerKind::KernelAddress;
+}
diff --git a/clang/lib/Driver/ToolChains/Serenity.h b/clang/lib/Driver/ToolChains/Serenity.h
new file mode 100644
index 000000000..feb31a0d6
--- /dev/null
+++ b/clang/lib/Driver/ToolChains/Serenity.h
@@ -0,0 +1,100 @@
@@ -0,0 +1,102 @@
+//===---- Serenity.h - SerenityOS ToolChain Implementation ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
@ -563,6 +567,8 @@ index 000000000..feb31a0d6
+ bool isPIEDefault(const llvm::opt::ArgList&) const override { return false; }
+ bool isPICDefaultForced() const override { return false; }
+
+ SanitizerMask getSupportedSanitizers() const override;
+
+ bool IsMathErrnoDefault() const override { return false; }
+
+ UnwindTableLevel