2020-02-09 17:28:56 +03:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
*
|
2021-04-22 11:24:48 +03:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-02-09 17:28:56 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <AK/Assertions.h>
|
2020-08-21 02:17:15 +03:00
|
|
|
#include <AK/MemMem.h>
|
2020-02-09 17:28:56 +03:00
|
|
|
#include <AK/String.h>
|
|
|
|
#include <AK/Types.h>
|
2021-02-25 19:25:34 +03:00
|
|
|
#include <Kernel/Arch/x86/SmapDisabler.h>
|
2020-05-16 13:00:04 +03:00
|
|
|
#include <Kernel/Heap/kmalloc.h>
|
|
|
|
#include <Kernel/StdLib.h>
|
|
|
|
#include <Kernel/VM/MemoryManager.h>
|
2020-02-09 17:28:56 +03:00
|
|
|
|
|
|
|
String copy_string_from_user(const char* user_str, size_t user_str_size)
|
|
|
|
{
|
2020-09-12 06:11:07 +03:00
|
|
|
bool is_user = Kernel::is_user_range(VirtualAddress(user_str), user_str_size);
|
|
|
|
if (!is_user)
|
|
|
|
return {};
|
2020-02-16 03:27:42 +03:00
|
|
|
Kernel::SmapDisabler disabler;
|
2020-09-12 06:11:07 +03:00
|
|
|
void* fault_at;
|
|
|
|
ssize_t length = Kernel::safe_strnlen(user_str, user_str_size, fault_at);
|
|
|
|
if (length < 0) {
|
2021-03-12 14:28:27 +03:00
|
|
|
dbgln("copy_string_from_user({:p}, {}) failed at {} (strnlen)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
|
2020-09-12 06:11:07 +03:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
if (length == 0)
|
|
|
|
return String::empty();
|
|
|
|
|
|
|
|
char* buffer;
|
|
|
|
auto copied_string = StringImpl::create_uninitialized((size_t)length, buffer);
|
|
|
|
if (!Kernel::safe_memcpy(buffer, user_str, (size_t)length, fault_at)) {
|
2021-03-12 14:28:27 +03:00
|
|
|
dbgln("copy_string_from_user({:p}, {}) failed at {} (memcpy)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
|
2020-09-12 06:11:07 +03:00
|
|
|
return {};
|
|
|
|
}
|
|
|
|
return copied_string;
|
|
|
|
}
|
|
|
|
|
|
|
|
String copy_string_from_user(Userspace<const char*> user_str, size_t user_str_size)
|
|
|
|
{
|
|
|
|
return copy_string_from_user(user_str.unsafe_userspace_ptr(), user_str_size);
|
2020-02-09 17:28:56 +03:00
|
|
|
}
|
|
|
|
|
2021-05-28 10:29:16 +03:00
|
|
|
Kernel::KResultOr<NonnullOwnPtr<Kernel::KString>> try_copy_kstring_from_user(const char* user_str, size_t user_str_size)
|
|
|
|
{
|
|
|
|
bool is_user = Kernel::is_user_range(VirtualAddress(user_str), user_str_size);
|
|
|
|
if (!is_user)
|
|
|
|
return EFAULT;
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
void* fault_at;
|
|
|
|
ssize_t length = Kernel::safe_strnlen(user_str, user_str_size, fault_at);
|
|
|
|
if (length < 0) {
|
|
|
|
dbgln("copy_kstring_from_user({:p}, {}) failed at {} (strnlen)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
|
|
|
|
return EFAULT;
|
|
|
|
}
|
|
|
|
char* buffer;
|
|
|
|
auto new_string = Kernel::KString::try_create_uninitialized(length, buffer);
|
|
|
|
if (!new_string)
|
|
|
|
return ENOMEM;
|
|
|
|
|
|
|
|
buffer[length] = '\0';
|
|
|
|
|
|
|
|
if (length == 0)
|
|
|
|
return new_string.release_nonnull();
|
|
|
|
|
|
|
|
if (!Kernel::safe_memcpy(buffer, user_str, (size_t)length, fault_at)) {
|
|
|
|
dbgln("copy_kstring_from_user({:p}, {}) failed at {} (memcpy)", static_cast<const void*>(user_str), user_str_size, VirtualAddress { fault_at });
|
|
|
|
return EFAULT;
|
|
|
|
}
|
|
|
|
return new_string.release_nonnull();
|
|
|
|
}
|
|
|
|
|
|
|
|
Kernel::KResultOr<NonnullOwnPtr<Kernel::KString>> try_copy_kstring_from_user(Userspace<const char*> user_str, size_t user_str_size)
|
|
|
|
{
|
|
|
|
return try_copy_kstring_from_user(user_str.unsafe_userspace_ptr(), user_str_size);
|
|
|
|
}
|
|
|
|
|
2021-02-21 21:18:55 +03:00
|
|
|
[[nodiscard]] Optional<Time> copy_time_from_user(const timespec* ts_user)
|
|
|
|
{
|
|
|
|
timespec ts;
|
|
|
|
if (!copy_from_user(&ts, ts_user, sizeof(timespec))) {
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
return Time::from_timespec(ts);
|
|
|
|
}
|
|
|
|
[[nodiscard]] Optional<Time> copy_time_from_user(const timeval* tv_user)
|
|
|
|
{
|
|
|
|
timeval tv;
|
|
|
|
if (!copy_from_user(&tv, tv_user, sizeof(timeval))) {
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
return Time::from_timeval(tv);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<>
|
|
|
|
[[nodiscard]] Optional<Time> copy_time_from_user<const timeval>(Userspace<const timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
|
|
|
|
template<>
|
|
|
|
[[nodiscard]] Optional<Time> copy_time_from_user<timeval>(Userspace<timeval*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
|
|
|
|
template<>
|
|
|
|
[[nodiscard]] Optional<Time> copy_time_from_user<const timespec>(Userspace<const timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
|
|
|
|
template<>
|
|
|
|
[[nodiscard]] Optional<Time> copy_time_from_user<timespec>(Userspace<timespec*> src) { return copy_time_from_user(src.unsafe_userspace_ptr()); }
|
|
|
|
|
2020-12-20 04:48:56 +03:00
|
|
|
Optional<u32> user_atomic_fetch_add_relaxed(volatile u32* var, u32 val)
|
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
|
|
|
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_fetch_add_relaxed(var, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
Optional<u32> user_atomic_exchange_relaxed(volatile u32* var, u32 val)
|
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
|
|
|
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_exchange_relaxed(var, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
Optional<u32> user_atomic_load_relaxed(volatile u32* var)
|
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
|
|
|
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_load_relaxed(var);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool user_atomic_store_relaxed(volatile u32* var, u32 val)
|
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return false; // not aligned!
|
|
|
|
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
|
|
|
if (!is_user)
|
|
|
|
return false;
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_store_relaxed(var, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
Optional<bool> user_atomic_compare_exchange_relaxed(volatile u32* var, u32& expected, u32 val)
|
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(!Kernel::is_user_range(VirtualAddress(&expected), sizeof(expected)));
|
2020-12-20 04:48:56 +03:00
|
|
|
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_compare_exchange_relaxed(var, expected, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
Optional<u32> user_atomic_fetch_and_relaxed(volatile u32* var, u32 val)
|
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
|
|
|
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_fetch_and_relaxed(var, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
Optional<u32> user_atomic_fetch_and_not_relaxed(volatile u32* var, u32 val)
|
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
|
|
|
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_fetch_and_not_relaxed(var, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
Optional<u32> user_atomic_fetch_or_relaxed(volatile u32* var, u32 val)
|
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
|
|
|
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_fetch_or_relaxed(var, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
Optional<u32> user_atomic_fetch_xor_relaxed(volatile u32* var, u32 val)
|
|
|
|
{
|
|
|
|
if (FlatPtr(var) & 3)
|
|
|
|
return {}; // not aligned!
|
|
|
|
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
|
|
|
|
if (!is_user)
|
|
|
|
return {};
|
|
|
|
Kernel::SmapDisabler disabler;
|
|
|
|
return Kernel::safe_atomic_fetch_xor_relaxed(var, val);
|
|
|
|
}
|
|
|
|
|
2020-02-09 17:28:56 +03:00
|
|
|
extern "C" {
|
|
|
|
|
2020-09-12 06:11:07 +03:00
|
|
|
bool copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
|
2020-02-09 17:28:56 +03:00
|
|
|
{
|
2020-09-12 06:11:07 +03:00
|
|
|
bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
|
|
|
|
if (!is_user)
|
|
|
|
return false;
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(!Kernel::is_user_range(VirtualAddress(src_ptr), n));
|
2020-02-16 03:27:42 +03:00
|
|
|
Kernel::SmapDisabler disabler;
|
2020-09-12 06:11:07 +03:00
|
|
|
void* fault_at;
|
|
|
|
if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
|
2021-03-12 14:28:27 +03:00
|
|
|
dbgln("copy_to_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
|
2020-09-12 06:11:07 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2020-02-09 17:28:56 +03:00
|
|
|
}
|
|
|
|
|
2020-09-12 06:11:07 +03:00
|
|
|
bool copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
|
2020-02-09 17:28:56 +03:00
|
|
|
{
|
2020-09-12 06:11:07 +03:00
|
|
|
bool is_user = Kernel::is_user_range(VirtualAddress(src_ptr), n);
|
|
|
|
if (!is_user)
|
|
|
|
return false;
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(!Kernel::is_user_range(VirtualAddress(dest_ptr), n));
|
2020-02-16 03:27:42 +03:00
|
|
|
Kernel::SmapDisabler disabler;
|
2020-09-12 06:11:07 +03:00
|
|
|
void* fault_at;
|
|
|
|
if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
|
2021-03-12 14:28:27 +03:00
|
|
|
dbgln("copy_from_user({:p}, {:p}, {}) failed at {}", dest_ptr, src_ptr, n, VirtualAddress { fault_at });
|
2020-09-12 06:11:07 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2020-02-09 17:28:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void* memcpy(void* dest_ptr, const void* src_ptr, size_t n)
|
|
|
|
{
|
|
|
|
size_t dest = (size_t)dest_ptr;
|
|
|
|
size_t src = (size_t)src_ptr;
|
|
|
|
// FIXME: Support starting at an unaligned address.
|
|
|
|
if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
|
|
|
|
size_t size_ts = n / sizeof(size_t);
|
2021-06-26 02:00:16 +03:00
|
|
|
#if ARCH(I386)
|
2020-02-09 17:28:56 +03:00
|
|
|
asm volatile(
|
|
|
|
"rep movsl\n"
|
|
|
|
: "=S"(src), "=D"(dest)
|
|
|
|
: "S"(src), "D"(dest), "c"(size_ts)
|
|
|
|
: "memory");
|
2021-06-26 02:00:16 +03:00
|
|
|
#else
|
|
|
|
asm volatile(
|
|
|
|
"rep movsq\n"
|
|
|
|
: "=S"(src), "=D"(dest)
|
|
|
|
: "S"(src), "D"(dest), "c"(size_ts)
|
|
|
|
: "memory");
|
|
|
|
#endif
|
2020-02-09 17:28:56 +03:00
|
|
|
n -= size_ts * sizeof(size_t);
|
|
|
|
if (n == 0)
|
|
|
|
return dest_ptr;
|
|
|
|
}
|
|
|
|
asm volatile(
|
|
|
|
"rep movsb\n" ::"S"(src), "D"(dest), "c"(n)
|
|
|
|
: "memory");
|
|
|
|
return dest_ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void* memmove(void* dest, const void* src, size_t n)
|
|
|
|
{
|
|
|
|
if (dest < src)
|
|
|
|
return memcpy(dest, src, n);
|
|
|
|
|
|
|
|
u8* pd = (u8*)dest;
|
|
|
|
const u8* ps = (const u8*)src;
|
|
|
|
for (pd += n, ps += n; n--;)
|
|
|
|
*--pd = *--ps;
|
|
|
|
return dest;
|
|
|
|
}
|
|
|
|
|
2020-07-31 14:56:33 +03:00
|
|
|
const void* memmem(const void* haystack, size_t haystack_length, const void* needle, size_t needle_length)
|
|
|
|
{
|
2020-08-21 02:17:15 +03:00
|
|
|
return AK::memmem(haystack, haystack_length, needle, needle_length);
|
2020-07-31 14:56:33 +03:00
|
|
|
}
|
|
|
|
|
2020-09-12 06:11:07 +03:00
|
|
|
[[nodiscard]] bool memset_user(void* dest_ptr, int c, size_t n)
|
2020-02-09 17:28:56 +03:00
|
|
|
{
|
2020-09-12 06:11:07 +03:00
|
|
|
bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
|
|
|
|
if (!is_user)
|
|
|
|
return false;
|
2020-02-16 03:27:42 +03:00
|
|
|
Kernel::SmapDisabler disabler;
|
2020-09-12 06:11:07 +03:00
|
|
|
void* fault_at;
|
|
|
|
if (!Kernel::safe_memset(dest_ptr, c, n, fault_at)) {
|
2021-03-12 14:28:27 +03:00
|
|
|
dbgln("memset_user({:p}, {}, {}) failed at {}", dest_ptr, c, n, VirtualAddress { fault_at });
|
2020-09-12 06:11:07 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2020-02-09 17:28:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void* memset(void* dest_ptr, int c, size_t n)
|
|
|
|
{
|
|
|
|
size_t dest = (size_t)dest_ptr;
|
|
|
|
// FIXME: Support starting at an unaligned address.
|
|
|
|
if (!(dest & 0x3) && n >= 12) {
|
|
|
|
size_t size_ts = n / sizeof(size_t);
|
2021-04-07 16:11:13 +03:00
|
|
|
size_t expanded_c = explode_byte((u8)c);
|
2021-06-26 02:00:16 +03:00
|
|
|
#if ARCH(I386)
|
2020-02-09 17:28:56 +03:00
|
|
|
asm volatile(
|
|
|
|
"rep stosl\n"
|
|
|
|
: "=D"(dest)
|
|
|
|
: "D"(dest), "c"(size_ts), "a"(expanded_c)
|
|
|
|
: "memory");
|
2021-06-26 02:00:16 +03:00
|
|
|
#else
|
|
|
|
asm volatile(
|
|
|
|
"rep stosq\n"
|
|
|
|
: "=D"(dest)
|
|
|
|
: "D"(dest), "c"(size_ts), "a"(expanded_c)
|
|
|
|
: "memory");
|
|
|
|
#endif
|
2020-02-09 17:28:56 +03:00
|
|
|
n -= size_ts * sizeof(size_t);
|
|
|
|
if (n == 0)
|
|
|
|
return dest_ptr;
|
|
|
|
}
|
|
|
|
asm volatile(
|
|
|
|
"rep stosb\n"
|
|
|
|
: "=D"(dest), "=c"(n)
|
|
|
|
: "0"(dest), "1"(n), "a"(c)
|
|
|
|
: "memory");
|
|
|
|
return dest_ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t strlen(const char* str)
|
|
|
|
{
|
|
|
|
size_t len = 0;
|
|
|
|
while (*(str++))
|
|
|
|
++len;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t strnlen(const char* str, size_t maxlen)
|
|
|
|
{
|
|
|
|
size_t len = 0;
|
|
|
|
for (; len < maxlen && *str; str++)
|
|
|
|
len++;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
int strcmp(const char* s1, const char* s2)
|
|
|
|
{
|
|
|
|
for (; *s1 == *s2; ++s1, ++s2) {
|
|
|
|
if (*s1 == 0)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return *(const u8*)s1 < *(const u8*)s2 ? -1 : 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
int memcmp(const void* v1, const void* v2, size_t n)
|
|
|
|
{
|
|
|
|
auto* s1 = (const u8*)v1;
|
|
|
|
auto* s2 = (const u8*)v2;
|
|
|
|
while (n-- > 0) {
|
|
|
|
if (*s1++ != *s2++)
|
|
|
|
return s1[-1] < s2[-1] ? -1 : 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int strncmp(const char* s1, const char* s2, size_t n)
|
|
|
|
{
|
|
|
|
if (!n)
|
|
|
|
return 0;
|
|
|
|
do {
|
|
|
|
if (*s1 != *s2++)
|
|
|
|
return *(const unsigned char*)s1 - *(const unsigned char*)--s2;
|
|
|
|
if (*s1++ == 0)
|
|
|
|
break;
|
|
|
|
} while (--n);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
char* strstr(const char* haystack, const char* needle)
|
|
|
|
{
|
|
|
|
char nch;
|
|
|
|
char hch;
|
|
|
|
|
|
|
|
if ((nch = *needle++) != 0) {
|
|
|
|
size_t len = strlen(needle);
|
|
|
|
do {
|
|
|
|
do {
|
|
|
|
if ((hch = *haystack++) == 0)
|
|
|
|
return nullptr;
|
|
|
|
} while (hch != nch);
|
|
|
|
} while (strncmp(haystack, needle, len) != 0);
|
|
|
|
--haystack;
|
|
|
|
}
|
|
|
|
return const_cast<char*>(haystack);
|
|
|
|
}
|
|
|
|
|
|
|
|
void* realloc(void* p, size_t s)
|
|
|
|
{
|
|
|
|
return krealloc(p, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
void free(void* p)
|
|
|
|
{
|
|
|
|
return kfree(p);
|
|
|
|
}
|
|
|
|
|
2020-08-12 00:33:37 +03:00
|
|
|
// Functions that are automatically called by the C++ compiler.
|
|
|
|
// Declare them first, to tell the silly compiler that they are indeed being used.
|
2021-04-29 15:54:15 +03:00
|
|
|
[[noreturn]] void __stack_chk_fail() __attribute__((used));
|
|
|
|
[[noreturn]] void __stack_chk_fail_local() __attribute__((used));
|
2020-08-12 00:33:37 +03:00
|
|
|
extern "C" int __cxa_atexit(void (*)(void*), void*, void*);
|
|
|
|
[[noreturn]] void __cxa_pure_virtual();
|
|
|
|
|
2020-02-09 17:28:56 +03:00
|
|
|
[[noreturn]] void __stack_chk_fail()
|
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY_NOT_REACHED();
|
2020-02-09 17:28:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
[[noreturn]] void __stack_chk_fail_local()
|
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY_NOT_REACHED();
|
2020-02-09 17:28:56 +03:00
|
|
|
}
|
2020-08-12 00:37:27 +03:00
|
|
|
|
|
|
|
extern "C" int __cxa_atexit(void (*)(void*), void*, void*)
|
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY_NOT_REACHED();
|
2020-08-12 00:37:27 +03:00
|
|
|
return 0;
|
|
|
|
}
|
2020-08-12 00:33:37 +03:00
|
|
|
|
|
|
|
[[noreturn]] void __cxa_pure_virtual()
|
|
|
|
{
|
2021-02-23 22:42:32 +03:00
|
|
|
VERIFY_NOT_REACHED();
|
2020-08-12 00:33:37 +03:00
|
|
|
}
|
2020-02-09 17:28:56 +03:00
|
|
|
}
|