Dropping sword guard

This commit is contained in:
Chris Allen 2024-10-26 03:29:54 -05:00
parent 8ece38e8af
commit 80c719b5cb
12 changed files with 691 additions and 1063 deletions

9
Cargo.lock generated
View File

@ -1263,7 +1263,6 @@ dependencies = [
"signal-hook",
"static_assertions",
"sword_crypto",
"sword_guard",
"sword_macros",
"sword_pma",
"thiserror",
@ -1284,14 +1283,6 @@ dependencies = [
"x25519-dalek",
]
[[package]]
name = "sword_guard"
version = "0.1.0"
dependencies = [
"bindgen",
"cc",
]
[[package]]
name = "sword_macros"
version = "0.1.0"

View File

@ -4,7 +4,6 @@ members = [
"rust/ibig",
"rust/sword",
"rust/sword_crypto",
"rust/sword_guard",
"rust/sword_macros",
"rust/sword_pma",
"rust/murmur3",

View File

@ -9,7 +9,6 @@ missing_safety_doc = "allow"
# Please keep these alphabetized
[dependencies]
sword_guard = { path = "../sword_guard" }
sword_crypto = { path = "../sword_crypto" }
sword_macros = { path = "../sword_macros" }
sword_pma = { path = "../sword_pma" }

View File

@ -1,9 +1,9 @@
use crate::interpreter::{Error, Mote, Result};
use crate::noun::D;
use assert_no_alloc::permit_alloc;
use std::ffi::c_void;
use std::marker::PhantomData;
use sword_guard::*;
// use crate::interpreter::{Error, Mote, Result};
// use crate::noun::D;
// use assert_no_alloc::permit_alloc;
// use std::ffi::c_void;
// use std::marker::PhantomData;
// use sword_guard::*;
#[derive(Debug)]
pub enum GuardError {
@ -14,84 +14,84 @@ pub enum GuardError {
Unknown,
}
impl From<u32> for GuardError {
fn from(value: u32) -> Self {
match value {
GUARD_NULL => Self::NullPointer,
GUARD_OOM => Self::OutOfMemory,
x if (x & GUARD_MPROTECT) != 0 => Self::MemoryProtection,
x if (x & (GUARD_MALLOC | GUARD_SIGACTION)) != 0 => Self::Setup,
_ => Self::Unknown,
}
}
}
// impl From<u32> for GuardError {
// fn from(value: u32) -> Self {
// match value {
// GUARD_NULL => Self::NullPointer,
// GUARD_OOM => Self::OutOfMemory,
// x if (x & GUARD_MPROTECT) != 0 => Self::MemoryProtection,
// x if (x & (GUARD_MALLOC | GUARD_SIGACTION)) != 0 => Self::Setup,
// _ => Self::Unknown,
// }
// }
// }
pub struct CCallback<'closure> {
pub function: unsafe extern "C" fn(*mut c_void) -> *mut c_void,
pub input: *mut c_void,
// without this it's too easy to accidentally drop the closure too soon
_lifetime: PhantomData<&'closure mut c_void>,
}
// pub struct CCallback<'closure> {
// pub function: unsafe extern "C" fn(*mut c_void) -> *mut c_void,
// pub input: *mut c_void,
// // without this it's too easy to accidentally drop the closure too soon
// _lifetime: PhantomData<&'closure mut c_void>,
// }
impl<'closure> CCallback<'closure> {
pub fn new<F>(closure: &'closure mut F) -> Self
where
F: FnMut() -> Result,
{
let function: unsafe extern "C" fn(*mut c_void) -> *mut c_void = Self::call_closure::<F>;
// impl<'closure> CCallback<'closure> {
// pub fn new<F>(closure: &'closure mut F) -> Self
// where
// F: FnMut() -> Result,
// {
// let function: unsafe extern "C" fn(*mut c_void) -> *mut c_void = Self::call_closure::<F>;
Self {
function,
input: closure as *mut F as *mut c_void,
_lifetime: PhantomData,
}
}
// Self {
// function,
// input: closure as *mut F as *mut c_void,
// _lifetime: PhantomData,
// }
// }
unsafe extern "C" fn call_closure<F>(input: *mut c_void) -> *mut c_void
where
F: FnMut() -> Result,
{
let cb: &mut F = input.cast::<F>().as_mut().unwrap();
let v = (*cb)();
permit_alloc(|| {
let v_box = Box::new(v);
let v_ptr = Box::into_raw(v_box);
v_ptr as *mut c_void
})
}
}
// unsafe extern "C" fn call_closure<F>(input: *mut c_void) -> *mut c_void
// where
// F: FnMut() -> Result,
// {
// let cb: &mut F = input.cast::<F>().as_mut().unwrap();
// let v = (*cb)();
// permit_alloc(|| {
// let v_box = Box::new(v);
// let v_ptr = Box::into_raw(v_box);
// v_ptr as *mut c_void
// })
// }
// }
pub fn call_with_guard<F: FnMut() -> Result>(
stack_pp: *const *const u64,
alloc_pp: *const *const u64,
closure: &mut F,
) -> Result {
let cb = CCallback::new(closure);
let mut ret_p: *mut c_void = std::ptr::null_mut();
let ret_pp = &mut ret_p as *mut *mut c_void;
// pub fn call_with_guard<F: FnMut() -> Result>(
// stack_pp: *const *const u64,
// alloc_pp: *const *const u64,
// closure: &mut F,
// ) -> Result {
// let cb = CCallback::new(closure);
// let mut ret_p: *mut c_void = std::ptr::null_mut();
// let ret_pp = &mut ret_p as *mut *mut c_void;
unsafe {
let res = guard(
Some(cb.function as unsafe extern "C" fn(*mut c_void) -> *mut c_void),
cb.input,
stack_pp as *const usize,
alloc_pp as *const usize,
ret_pp,
);
// unsafe {
// let res = guard(
// Some(cb.function as unsafe extern "C" fn(*mut c_void) -> *mut c_void),
// cb.input,
// stack_pp as *const usize,
// alloc_pp as *const usize,
// ret_pp,
// );
if res == 0 {
permit_alloc(|| {
let result_box = Box::from_raw(ret_p as *mut Result);
*result_box
})
} else {
let err = GuardError::from(res);
match err {
GuardError::OutOfMemory => Err(Error::NonDeterministic(Mote::Meme, D(0))),
_ => {
panic!("serf: guard: unexpected error {:?} {}", err, res);
}
}
}
}
}
// if res == 0 {
// permit_alloc(|| {
// let result_box = Box::from_raw(ret_p as *mut Result);
// *result_box
// })
// } else {
// let err = GuardError::from(res);
// match err {
// GuardError::OutOfMemory => Err(Error::NonDeterministic(Mote::Meme, D(0))),
// _ => {
// panic!("serf: guard: unexpected error {:?} {}", err, res);
// }
// }
// }
// }
// }

File diff suppressed because it is too large Load Diff

View File

@ -87,11 +87,54 @@ impl NockStack {
}
}
pub fn middle_of_stack(&self) -> *const u64 {
// is that right? off by one?
unsafe { self.start.add(self.size >> 1) }
}
// When frame_pointer < alloc_pointer, the frame is West
// West frame layout:
// - start
// - *prev_alloc_ptr
// - frame_pointer
// - stack_pointer
// - (middle)
// - alloc_pointer
// - *prev_stack_ptr
// - *prev_frame_ptr
// - end
// East frame layout:
// - start
// - *prev_frame_ptr
// - *prev_stack_ptr
// - alloc_pointer
// - (middle)
// - stack_pointer
// - frame_pointer
// - *prev_alloc_ptr
// - end
// sometimes the stack pointer is moving, sometimes the alloc pointer is moving
// if you're allocating you're just bumping the alloc pointer
// pushing a frame is more complicated
// it's fine to cross the middle of the stack, it's not fine for them to cross each other
pub fn alloc_would_overlap_middle(&self, size: usize) -> bool {
if self.is_west() {
let stack_pointer = self.stack_pointer as usize;
let end_point = stack_pointer + size;
end_point <= self.middle_of_stack() as usize
} else {
let stack_pointer = self.stack_pointer as usize;
let end_point = stack_pointer + size;
end_point >= self.middle_of_stack() as usize
}
}
/** Resets the NockStack but flipping the top-frame polarity and unsetting PC. Sets the alloc
* pointer to the "previous" alloc pointer stored in the top frame to keep things "preserved"
* from the top frame. This allows us to do a copying GC on the top frame without erroneously
* "popping" the top frame.
*/
// TODO: #684: Add OOM checks here
pub unsafe fn flip_top_frame(&mut self, top_slots: usize) {
// Assert that we are at the top
assert!((*self.prev_frame_pointer_pointer()).is_null());
@ -123,7 +166,8 @@ impl NockStack {
}
}
/** Resets the NockStack. The top frame is west as in the initial creation of the NockStack. */
/// Resets the NockStack. The top frame is west as in the initial creation of the NockStack.
// TODO: #684: Add OOM checks here
pub fn reset(&mut self, top_slots: usize) {
self.frame_pointer = unsafe { self.start.add(RESERVED + top_slots) } as *mut u64;
self.stack_pointer = self.frame_pointer;
@ -210,11 +254,13 @@ impl NockStack {
}
/** Mutable pointer to a slot in a stack frame: east stack */
// TODO: #684: Add OOM checks here
unsafe fn slot_pointer_east(&self, slot: usize) -> *mut u64 {
self.frame_pointer.add(slot)
}
/** Mutable pointer to a slot in a stack frame: west stack */
// TODO: #684: Add OOM checks here
unsafe fn slot_pointer_west(&self, slot: usize) -> *mut u64 {
self.frame_pointer.sub(slot + 1)
}
@ -288,6 +334,7 @@ impl NockStack {
* allocation pointer is returned as the pointer to the newly allocated memory. */
/** Bump the alloc pointer for a west frame to make space for an allocation */
// TODO: #684: Add OOM checks here
unsafe fn raw_alloc_west(&mut self, words: usize) -> *mut u64 {
if self.pc {
panic!("Allocation during cleanup phase is prohibited.");
@ -297,6 +344,7 @@ impl NockStack {
}
/** Bump the alloc pointer for an east frame to make space for an allocation */
// TODO: #684: Add OOM checks here
unsafe fn raw_alloc_east(&mut self, words: usize) -> *mut u64 {
if self.pc {
panic!("Allocation during cleanup phase is prohibited.");
@ -344,6 +392,7 @@ impl NockStack {
}
}
// TODO: #684: Add OOM checks here
unsafe fn raw_alloc_in_previous_frame_west(&mut self, words: usize) -> *mut u64 {
// note that the allocation is on the east frame, and thus resembles raw_alloc_east
let alloc = *self.prev_alloc_pointer_pointer();
@ -351,6 +400,7 @@ impl NockStack {
alloc
}
// TODO: #684: Add OOM checks here
unsafe fn raw_alloc_in_previous_frame_east(&mut self, words: usize) -> *mut u64 {
// note that the allocation is on the west frame, and thus resembles raw_alloc_west
*(self.prev_alloc_pointer_pointer()) = (*(self.prev_alloc_pointer_pointer())).sub(words);
@ -416,7 +466,9 @@ impl NockStack {
* to make it so that the programmer doesn't need to think about it at all. The
* interface for using the reserved pointers (prev_xyz_pointer_pointer()) and
* lightweight stack (push(), pop(), top()) are the same regardless of whether
* or not pre_copy() has been called.*/
* or not pre_copy() has been called.
* */
// TODO: #684: Add OOM checks here
unsafe fn pre_copy(&mut self) {
if !self.pc {
*(self.free_slot(FRAME)) = *(self.slot_pointer(FRAME));
@ -530,6 +582,7 @@ impl NockStack {
assert_no_junior_pointers!(self, *noun);
}
// TODO: #684: Add OOM checks here? Unsure.
pub unsafe fn assert_struct_is_in<T>(&self, ptr: *const T, count: usize) {
let ap = (if self.pc {
*(self.prev_alloc_pointer_pointer())
@ -588,6 +641,7 @@ impl NockStack {
}
}
// Note re: #684: We don't need OOM checks on de-alloc
pub unsafe fn frame_pop(&mut self) {
let prev_frame_ptr = *self.prev_frame_pointer_pointer();
let prev_stack_ptr = *self.prev_stack_pointer_pointer();
@ -624,6 +678,7 @@ impl NockStack {
*/
/** Push a frame onto the stack with 0 or more local variable slots. */
// TODO: #684: Add OOM checks here
pub fn frame_push(&mut self, num_locals: usize) {
if self.pc {
panic!("frame_push during cleanup phase is prohibited.");
@ -696,6 +751,7 @@ impl NockStack {
}
/** Push onto a west-oriented lightweight stack, moving the stack_pointer. */
// TODO: #684: Add OOM checks here
unsafe fn push_west<T>(&mut self) -> *mut T {
let ap = if self.pc {
*(self.prev_alloc_pointer_pointer())
@ -713,6 +769,7 @@ impl NockStack {
}
/** Push onto an east-oriented ligthweight stack, moving the stack_pointer */
// TODO: #684: Add OOM checks here
unsafe fn push_east<T>(&mut self) -> *mut T {
let ap = if self.pc {
*(self.prev_alloc_pointer_pointer())
@ -742,6 +799,7 @@ impl NockStack {
* this violates the _east/_west naming convention somewhat, since e.g.
* a west frame when pc == false has a west-oriented lightweight stack,
* but when pc == true it becomes east-oriented.*/
// Re: #684: We don't need OOM checks on pop
pub unsafe fn pop<T>(&mut self) {
if self.is_west() && !self.pc || !self.is_west() && self.pc {
self.pop_west::<T>();
@ -900,6 +958,7 @@ impl NockStack {
* Sanity check every frame of the NockStack. Most useful paired with a gdb session set to
* catch rust_panic.
*/
// #684: Don't need OOM checks here
pub fn assert_sane(&mut self) {
let start = self.start;
let limit = unsafe { self.start.add(self.size) };

View File

@ -1,12 +0,0 @@
[package]
name = "sword_guard"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
[build-dependencies]
bindgen = "0.69"
cc = "=1.0.99"

View File

@ -1,77 +0,0 @@
extern crate bindgen;
use std::env;
use std::path::PathBuf;
fn main() {
let opt_level = env::var("OPT_LEVEL").unwrap();
let define_debug = if env::var("CARGO_FEATURE_DEBUG_PRINTS").is_ok() {
"-DDEBUG"
} else {
"-UDEBUG"
};
// This is the directory where the `c` library is located.
let libdir_path = PathBuf::from("c-src")
// Canonicalize the path as `rustc-link-search` requires an absolute
// path.
.canonicalize()
.expect("cannot canonicalize path");
let libdir_path_str = libdir_path.to_str().expect("Path is not a valid string");
// This is the path to the `c` headers file.
let headers_path = libdir_path.join("wrapper.h");
let headers_path_str = headers_path.to_str().expect("Path is not a valid string");
println!("cargo:rerun-if-changed={}", libdir_path_str);
let res = cc::Build::new()
.file(
libdir_path
.join("guard.c")
.to_str()
.expect("Path is not a valid string"),
)
.flag(format!("-O{}", opt_level).as_ref())
.flag(define_debug)
.flag("-g3")
.flag("-Wall")
.flag("-Wextra")
.flag("-Wformat=2")
.flag("-Wmissing-include-dirs")
.flag("-Wnested-externs")
.flag("-Wpedantic")
.flag("-Wredundant-decls")
.flag("-Wshadow")
.flag("-Wwrite-strings")
.flag("-Wno-unused-parameter")
.flag("-Wno-pointer-arith")
.flag("-Wno-strict-prototypes")
.flag("-Wno-unused-function")
.try_compile("guard");
if let Err(err) = res {
panic!("{}", err);
}
// The bindgen::Builder is the main entry point
// to bindgen, and lets you build up options for
// the resulting bindings.
let bindings = bindgen::Builder::default()
// The input header we would like to generate
// bindings for.
.header(headers_path_str)
// Tell cargo to invalidate the built crate whenever any of the
// included header files changed.
.parse_callbacks(Box::new(bindgen::CargoCallbacks::new()))
// Finish the builder and generate the bindings.
.generate()
// Unwrap the Result and panic on failure.
.expect("Unable to generate bindings");
// Write the bindings to the $OUT_DIR/bindings.rs file.
let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()).join("bindings.rs");
bindings
.write_to_file(out_path)
.expect("Couldn't write bindings!");
}

View File

@ -1,243 +0,0 @@
#include <assert.h>
#include <errno.h>
#include <setjmp.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <unistd.h>
#include "guard.h"
#define GD_PAGE_BITS 14ULL
#define GD_PAGE_SIZE (1ULL << GD_PAGE_BITS) // 16 KB
#define GD_PAGE_MASK (GD_PAGE_SIZE - 1)
#define GD_PAGE_ROUND_DOWN(foo) (foo & (~GD_PAGE_MASK))
#ifdef __APPLE__
#define GD_SIGNAL SIGBUS
#else
#define GD_SIGNAL SIGSEGV
#endif
/**
* Linked list stack of jump buffers.
*/
typedef struct GD_buflistnode GD_buflistnode;
struct GD_buflistnode {
jmp_buf buffer;
GD_buflistnode *next;
};
/**
* Global guard page state.
*/
typedef struct GD_state GD_state;
struct GD_state {
uintptr_t guard_p;
const uintptr_t *stack_pp;
const uintptr_t *alloc_pp;
GD_buflistnode *buffer_list;
struct sigaction prev_sa;
};
static GD_state _gd_state = {
.guard_p = 0,
.stack_pp = NULL,
.alloc_pp = NULL,
.buffer_list = NULL,
.prev_sa = { .sa_sigaction = NULL, .sa_flags = 0 },
};
static uint32_t
_protect_page(void *address, int prot)
{
if (mprotect(address, GD_PAGE_SIZE, prot)) {
fprintf(stderr, "guard: prot: mprotect error %d\r\n", errno);
fprintf(stderr, "%s\r\n", strerror(errno));
return guard_mprotect ;
}
return 0;
}
// Center the guard page.
static uint32_t
_focus_guard(GD_state *gd)
{
uintptr_t stack_p = *(gd->stack_pp);
uintptr_t alloc_p = *(gd->alloc_pp);
uintptr_t old_guard_p = (gd->guard_p);
uintptr_t new_guard_p;
uint32_t err = 0;
if (stack_p == 0 || alloc_p == 0) {
fprintf(stderr, "guard: focus: stack or alloc pointer is null\r\n");
return guard_null;
} else if (stack_p == alloc_p) {
return guard_oom;
}
// Compute new guard page.
new_guard_p = GD_PAGE_ROUND_DOWN((stack_p + alloc_p) / 2);
if (new_guard_p == old_guard_p) {
return guard_oom;
}
// Mark new guard page.
if ((err = _protect_page((void *)new_guard_p, PROT_NONE))) {
fprintf(stderr, "guard: focus: mark error\r\n");
return err;
}
// Update guard page tracker.
gd->guard_p = new_guard_p;
// Unmark the old guard page if there is one.
if (old_guard_p) {
if ((err = _protect_page((void *)old_guard_p, PROT_READ | PROT_WRITE))) {
fprintf(stderr, "guard: focus: unmark error\r\n");
return err;
}
}
return 0;
}
static void
_signal_handler(int sig, siginfo_t *si, void *unused)
{
uintptr_t sig_addr;
uint32_t err = 0;
assert(_gd_state.guard_p);
if (sig != GD_SIGNAL) {
fprintf(stderr, "guard: handler: invalid signal: %d\r\n", sig);
assert(0);
}
sig_addr = (uintptr_t)si->si_addr;
if (sig_addr >= _gd_state.guard_p &&
sig_addr < _gd_state.guard_p + GD_PAGE_SIZE)
{
err = _focus_guard(&_gd_state);
if (err) {
siglongjmp(_gd_state.buffer_list->buffer, err);
}
}
else {
struct sigaction prev_sa = _gd_state.prev_sa;
if (prev_sa.sa_sigaction != NULL) {
prev_sa.sa_sigaction(sig, si, unused);
} else if (prev_sa.sa_handler != NULL) {
prev_sa.sa_handler(sig);
} else {
// There should always be a default handler
assert(0);
}
}
}
// Registers the handler function.
static uint32_t
_register_handler(GD_state *gd)
{
struct sigaction sa;
sa.sa_flags = SA_SIGINFO;
sa.sa_sigaction = _signal_handler;
if (sigaction(GD_SIGNAL, &sa, &(gd->prev_sa))) {
fprintf(stderr, "guard: register: sigaction error\r\n");
fprintf(stderr, "%s\r\n", strerror(errno));
return guard_sigaction;
}
return 0;
}
uint32_t
guard(
void *(*f)(void *),
void *closure,
const uintptr_t *const s_pp,
const uintptr_t *const a_pp,
void **ret
) {
GD_buflistnode *new_buffer;
uint32_t err = 0;
uint32_t td_err = 0;
if (_gd_state.guard_p == 0) {
assert(_gd_state.buffer_list == NULL);
_gd_state.stack_pp = s_pp;
_gd_state.alloc_pp = a_pp;
// Initialize the guard page.
if ((err = _focus_guard(&_gd_state))) {
fprintf(stderr, "guard: initial focus error\r\n");
goto exit;
}
// Register guard page signal handler.
if ((err = _register_handler(&_gd_state))) {
fprintf(stderr, "guard: registration error\r\n");
goto tidy;
}
} else {
assert(_gd_state.buffer_list != NULL);
}
// Setup new longjmp buffer.
new_buffer = (GD_buflistnode *)malloc(sizeof(GD_buflistnode));
if (new_buffer == NULL) {
fprintf(stderr, "guard: malloc error\r\n");
fprintf(stderr, "%s\r\n", strerror(errno));
err = guard_malloc;
goto skip;
}
new_buffer->next = _gd_state.buffer_list;
_gd_state.buffer_list = new_buffer;
// Run given closure.
if (!(err = sigsetjmp(_gd_state.buffer_list->buffer, 1))) {
*ret = f(closure);
}
// Restore previous longjmp buffer.
_gd_state.buffer_list = _gd_state.buffer_list->next;
free((void *)new_buffer);
skip:
if (_gd_state.buffer_list == NULL) {
if (sigaction(GD_SIGNAL, &_gd_state.prev_sa, NULL)) {
fprintf(stderr, "guard: error replacing sigsegv handler\r\n");
fprintf(stderr, "%s\r\n", strerror(errno));
td_err = guard_sigaction;
if (!err) {
err = td_err;
}
}
tidy:
// Unmark guard page.
assert(_gd_state.guard_p != 0);
td_err = _protect_page((void *)_gd_state.guard_p, PROT_READ | PROT_WRITE);
if (td_err) {
fprintf(stderr, "guard: unmark error\r\n");
fprintf(stderr, "%s\r\n", strerror(errno));
if (!err) {
err = td_err;
}
}
_gd_state.guard_p = 0;
}
exit:
return err;
}

View File

@ -1,73 +0,0 @@
#ifndef __GUARD_H__
#define __GUARD_H__
#include <setjmp.h>
#include <stdint.h>
/**
* Error codes.
*/
typedef enum {
guard_null, // null stack or alloc pointer
guard_signal, // invalid signal
guard_oom, // out of memory
guard_malloc, // malloc error
guard_mprotect, // mprotect error
guard_sigaction, // sigaction error
} guard_err;
/**
* @brief Executes the given callback function `f` within the memory arena
* between the stack and allocation pointers pointed to by `s_pp` and `a_pp`,
* with guard page protection. If `f`'s execution succeeds, its result is
* written to the return pointer `*ret`. If `f`'s execution triggers an
* out of memory error or any other `guard_err`, the `guard_err` is
* returned and `*ret` is left empty. In either case, cleanup is performed
* before returning.
*
* Definitions:
* - A guard page is marked `PROT_NONE`.
*
* Assumptions:
* - `NockStack` pages are marked `PROT_READ|PROT_WRITE` by default.
* - All memory access patterns are outside-in.
* - Callback functions are compatible with the C ABI.
* - `NockStack` stack and allocation pointer locations are fixed.
* - The caller is responsible for return value memory allocation.
* - The caller is responsible for managing any external state the callback
* function may mutate.
* - The callback function may be interrupted in the case of memory exhaustion
* or other `guard_err` error (failure to `mprotect`, `malloc`, etc.).
* - `SIGSEGV` (`SIGBUS` on macOS) signals are expected to be raised only on
* guard page accesses.
*
* Invariants:
* - A single guard page is installed and maintained in the approximate center
* until `crate::guard::call_with_guard` returns.
* - A return value is only written to `*ret` on successful callback execution.
* - A `guard_err` is returned.
*
* Enhancements:
* - Use only a single, static jump buffer variable instead of a linked list.
* We currently use a linked list of jump buffers because we don't have a
* function for preserving stack traces across `crate::interpreter::interpret`
* calls.
*
* @param f The callback function to execute.
* @param closure A pointer to the closure data for the callback function.
* @param s_pp A pointer to the stack pointer location.
* @param a_pp A pointer to the allocation pointer location.
* @param ret A pointer to a location where the callback's result can be stored.
*
* @return 0 on callback success; otherwise `guard_err` error code.
*/
uint32_t
guard(
void *(*f)(void *),
void *closure,
const uintptr_t *const s_pp,
const uintptr_t *const a_pp,
void **ret
);
#endif // __GUARD_H__

View File

@ -1 +0,0 @@
#include "guard.h"

View File

@ -1,12 +0,0 @@
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
pub const GUARD_NULL: u32 = guard_err_guard_null;
pub const GUARD_SIGNAL: u32 = guard_err_guard_signal;
pub const GUARD_OOM: u32 = guard_err_guard_oom;
pub const GUARD_MALLOC: u32 = guard_err_guard_malloc;
pub const GUARD_MPROTECT: u32 = guard_err_guard_mprotect;
pub const GUARD_SIGACTION: u32 = guard_err_guard_sigaction;