Kernel: Implement safe_memcpy for the aarch64 build

The implementation just calls the regular memcpy, and is not safe yet.
This can be done later.
This commit is contained in:
Timon Kruiper 2022-05-02 23:01:03 +02:00 committed by Andreas Kling
parent feba7bc8a8
commit 9f76b16124
Notes: sideshowbarker 2024-07-17 11:21:51 +09:00
3 changed files with 62 additions and 57 deletions

View File

@ -141,63 +141,6 @@ void KString::operator delete(void*)
VERIFY_NOT_REACHED();
}
// SafeMem.h
bool safe_memset(void*, int, size_t, void*&);
bool safe_memset(void*, int, size_t, void*&)
{
VERIFY_NOT_REACHED();
return false;
}
ssize_t safe_strnlen(char const*, unsigned long, void*&);
ssize_t safe_strnlen(char const*, unsigned long, void*&)
{
VERIFY_NOT_REACHED();
return 0;
}
bool safe_memcpy(void*, void const*, unsigned long, void*&);
bool safe_memcpy(void*, void const*, unsigned long, void*&)
{
VERIFY_NOT_REACHED();
return false;
}
Optional<bool> safe_atomic_compare_exchange_relaxed(u32 volatile*, u32&, u32);
Optional<bool> safe_atomic_compare_exchange_relaxed(u32 volatile*, u32&, u32)
{
VERIFY_NOT_REACHED();
return {};
}
Optional<u32> safe_atomic_load_relaxed(u32 volatile*);
Optional<u32> safe_atomic_load_relaxed(u32 volatile*)
{
VERIFY_NOT_REACHED();
return {};
}
Optional<u32> safe_atomic_fetch_add_relaxed(u32 volatile*, u32);
Optional<u32> safe_atomic_fetch_add_relaxed(u32 volatile*, u32)
{
VERIFY_NOT_REACHED();
return {};
}
Optional<u32> safe_atomic_exchange_relaxed(u32 volatile*, u32);
Optional<u32> safe_atomic_exchange_relaxed(u32 volatile*, u32)
{
VERIFY_NOT_REACHED();
return {};
}
bool safe_atomic_store_relaxed(u32 volatile*, u32);
bool safe_atomic_store_relaxed(u32 volatile*, u32)
{
VERIFY_NOT_REACHED();
return {};
}
}
extern "C" {

View File

@ -0,0 +1,61 @@
/*
* Copyright (c) 2022, Timon Kruiper <timonkruiper@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/SafeMem.h>
#include <Kernel/StdLib.h>
namespace Kernel {
bool safe_memset(void*, int, size_t, void*&)
{
VERIFY_NOT_REACHED();
return false;
}
ssize_t safe_strnlen(char const*, unsigned long, void*&)
{
VERIFY_NOT_REACHED();
return 0;
}
bool safe_memcpy(void* dest_ptr, void const* src_ptr, unsigned long n, void*&)
{
// FIXME: Actually implement a safe memcpy.
memcpy(dest_ptr, src_ptr, n);
return true;
}
Optional<bool> safe_atomic_compare_exchange_relaxed(u32 volatile*, u32&, u32)
{
VERIFY_NOT_REACHED();
return {};
}
Optional<u32> safe_atomic_load_relaxed(u32 volatile*)
{
VERIFY_NOT_REACHED();
return {};
}
Optional<u32> safe_atomic_fetch_add_relaxed(u32 volatile*, u32)
{
VERIFY_NOT_REACHED();
return {};
}
Optional<u32> safe_atomic_exchange_relaxed(u32 volatile*, u32)
{
VERIFY_NOT_REACHED();
return {};
}
bool safe_atomic_store_relaxed(u32 volatile*, u32)
{
VERIFY_NOT_REACHED();
return {};
}
}

View File

@ -418,6 +418,7 @@ else()
Arch/aarch64/kprintf.cpp
Arch/aarch64/MainIdRegister.cpp
Arch/aarch64/PageDirectory.cpp
Arch/aarch64/SafeMem.cpp
Arch/aarch64/ScopedCritical.cpp
Arch/aarch64/SmapDisabler.cpp
Arch/aarch64/init.cpp