Merge branch 'as/serf-guard' into msl/guard

- manual
- compiles
- tests fail
- doesn't run
This commit is contained in:
Alex Shelkovnykov 2024-02-14 12:50:37 +09:00
parent 42ffde2208
commit 1df75a2cab
15 changed files with 635 additions and 675 deletions

10
rust/ares/Cargo.lock generated
View File

@ -62,7 +62,7 @@ dependencies = [
"ares_guard",
"ares_macros",
"ares_pma",
"assert_no_alloc 1.1.2",
"assert_no_alloc",
"autotools",
"bitvec",
"cc",
@ -87,7 +87,7 @@ version = "0.1.0"
dependencies = [
"aes",
"aes-siv",
"assert_no_alloc 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"assert_no_alloc",
"curve25519-dalek",
"ed25519-dalek",
"ibig 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -124,12 +124,6 @@ dependencies = [
name = "assert_no_alloc"
version = "1.1.2"
[[package]]
name = "assert_no_alloc"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55ca83137a482d61d916ceb1eba52a684f98004f18e0cafea230fe5579c178a3"
[[package]]
name = "atty"
version = "0.2.14"

View File

@ -18,8 +18,8 @@ ares_macros = { path = "../ares_macros" }
# ares_pma = { path = "../ares_pma", features=["debug_prints"] }
ares_pma = { path = "../ares_pma" }
# use this when debugging requires allocation (e.g. eprintln)
# assert_no_alloc = { path = "../assert-no-alloc", features=["warn_debug"] }
assert_no_alloc = { path = "../assert-no-alloc" }
# assert_no_alloc = { path = "../rust-assert-no-alloc", features=["warn_debug"] }
assert_no_alloc = { path = "../rust-assert-no-alloc" }
bitvec = "1.0.0"
criterion = "0.4"
either = "1.9.0"

View File

@ -1,6 +1,7 @@
use crate::assert_acyclic;
use crate::assert_no_forwarding_pointers;
use crate::assert_no_junior_pointers;
use crate::guard::call_with_guard;
use crate::hamt::Hamt;
use crate::jets::cold;
use crate::jets::cold::Cold;
@ -16,9 +17,9 @@ use crate::serf::TERMINATOR;
use crate::trace::{write_nock_trace, TraceInfo, TraceStack};
use crate::unifying_equality::unifying_equality;
use ares_macros::tas;
use assert_no_alloc::assert_no_alloc;
use assert_no_alloc::{assert_no_alloc, ensure_alloc_counters};
use bitvec::prelude::{BitSlice, Lsb0};
use either::Either::*;
use either::*;
use std::result;
use std::sync::atomic::Ordering;
use std::sync::Arc;
@ -400,7 +401,11 @@ pub fn interpret(context: &mut Context, mut subject: Noun, formula: Noun) -> Res
// ```
//
// (See https://docs.rs/assert_no_alloc/latest/assert_no_alloc/#advanced-use)
let nock = assert_no_alloc(|| unsafe {
let nock = assert_no_alloc(|| {
ensure_alloc_counters(|| {
let stack_pp = context.stack.get_stack_pointer_pointer() as *const *const u64;
let alloc_pp = context.stack.get_alloc_pointer_pointer() as *const *const u64;
let work_f = &mut || unsafe {
push_formula(&mut context.stack, formula, true)?;
loop {
@ -687,7 +692,9 @@ pub fn interpret(context: &mut Context, mut subject: Noun, formula: Noun) -> Res
// '.*', so we can assume it's never directly used to invoke
// jetted code.
if context.trace_info.is_some() {
if let Some(path) = context.cold.matches(stack, &mut res) {
if let Some(path) =
context.cold.matches(stack, &mut res)
{
append_trace(stack, path);
};
};
@ -712,7 +719,9 @@ pub fn interpret(context: &mut Context, mut subject: Noun, formula: Noun) -> Res
// '.*', so we can assume it's never directly used to invoke
// jetted code.
if context.trace_info.is_some() {
if let Some(path) = context.cold.matches(stack, &mut res) {
if let Some(path) =
context.cold.matches(stack, &mut res)
{
append_trace(stack, path);
};
};
@ -748,16 +757,21 @@ pub fn interpret(context: &mut Context, mut subject: Noun, formula: Noun) -> Res
push_formula(&mut context.stack, diet.patch, false)?;
}
Todo10::Edit => {
res = edit(&mut context.stack, diet.axis.as_bitslice(), res, diet.tree);
res = edit(
&mut context.stack,
diet.axis.as_bitslice(),
res,
diet.tree,
);
context.stack.pop::<NockWork>();
}
}
}
NockWork::Work11D(mut dint) => match dint.todo {
Todo11D::ComputeHint => {
if let Some(ret) =
hint::match_pre_hint(context, subject, dint.tag, dint.hint, dint.body)
{
if let Some(ret) = hint::match_pre_hint(
context, subject, dint.tag, dint.hint, dint.body,
) {
match ret {
Ok(found) => {
res = found;
@ -817,9 +831,9 @@ pub fn interpret(context: &mut Context, mut subject: Noun, formula: Noun) -> Res
},
NockWork::Work11S(mut sint) => match sint.todo {
Todo11S::ComputeResult => {
if let Some(ret) =
hint::match_pre_nock(context, subject, sint.tag, None, sint.body)
{
if let Some(ret) = hint::match_pre_nock(
context, subject, sint.tag, None, sint.body,
) {
match ret {
Ok(found) => {
res = found;
@ -840,9 +854,9 @@ pub fn interpret(context: &mut Context, mut subject: Noun, formula: Noun) -> Res
}
}
Todo11S::Done => {
if let Some(found) =
hint::match_post_nock(context, subject, sint.tag, None, sint.body, res)
{
if let Some(found) = hint::match_post_nock(
context, subject, sint.tag, None, sint.body, res,
) {
res = found;
}
context.stack.pop::<NockWork>();
@ -877,7 +891,8 @@ pub fn interpret(context: &mut Context, mut subject: Noun, formula: Noun) -> Res
scry_gate.tail().as_cell()?.tail(),
],
);
let scry_form = T(&mut context.stack, &[D(9), D(2), D(1), scry_core]);
let scry_form =
T(&mut context.stack, &[D(9), D(2), D(1), scry_core]);
context.scry_stack = cell.tail();
// Alternately, we could use scry_core as the subject and [9 2 0 1] as
@ -892,11 +907,18 @@ pub fn interpret(context: &mut Context, mut subject: Noun, formula: Noun) -> Res
break Err(Error::ScryCrashed(D(0)));
}
}
Right(cell) => match cell.tail().as_either_atom_cell() {
Right(cell) => {
match cell.tail().as_either_atom_cell() {
Left(_) => {
let stack = &mut context.stack;
let hunk =
T(stack, &[D(tas!(b"hunk")), scry.reff, scry.path]);
let hunk = T(
stack,
&[
D(tas!(b"hunk")),
scry.reff,
scry.path,
],
);
mean_push(stack, hunk);
break Err(Error::ScryCrashed(D(0)));
}
@ -905,10 +927,12 @@ pub fn interpret(context: &mut Context, mut subject: Noun, formula: Noun) -> Res
context.scry_stack = scry_stack;
context.stack.pop::<NockWork>();
}
},
}
}
},
Err(error) => match error {
Error::Deterministic(_, trace) | Error::ScryCrashed(trace) => {
Error::Deterministic(_, trace)
| Error::ScryCrashed(trace) => {
break Err(Error::ScryCrashed(trace));
}
Error::NonDeterministic(_, _) => {
@ -927,6 +951,10 @@ pub fn interpret(context: &mut Context, mut subject: Noun, formula: Noun) -> Res
},
};
}
};
call_with_guard(stack_pp, alloc_pp, work_f)
})
});
match nock {

View File

@ -1,4 +1,3 @@
use crate::guard::call_with_guard;
use crate::hamt::Hamt;
use crate::interpreter;
use crate::interpreter::{inc, interpret, Error, Mote};
@ -403,27 +402,7 @@ fn slam(context: &mut Context, axis: u64, ovo: Noun) -> Result<Noun, Error> {
let fol = T(stack, &[D(8), pul, D(9), D(2), D(10), sam, D(0), D(2)]);
let sub = T(stack, &[arvo, ovo]);
let frame_p = stack.get_frame_pointer();
let stack_pp = stack.get_stack_pointer_pointer();
let alloc_pp = stack.get_alloc_pointer_pointer();
let res = call_with_guard(
stack_pp as *const *const u64,
alloc_pp as *const *const u64,
&mut || interpret(&mut context.nock_context, sub, fol),
);
if let Err(Error::NonDeterministic(Mote::Meme, _)) = res {
unsafe {
let stack = &mut context.nock_context.stack;
assert_no_alloc::reset_counters();
while stack.get_frame_pointer() != frame_p {
stack.frame_pop();
}
}
}
res
interpret(&mut context.nock_context, sub, fol)
}
fn peek(context: &mut Context, ovo: Noun) -> Noun {

View File

@ -7,8 +7,8 @@ edition = "2021"
[dependencies]
# use this when debugging requires allocation (e.g. eprintln)
# assert_no_alloc = {version="1.1.2", features=["warn_debug"]}
assert_no_alloc = "1.1.2"
# assert_no_alloc = { path = "../rust-assert-no-alloc", features=["warn_debug"] }
assert_no_alloc = { path = "../rust-assert-no-alloc" }
ibig = "0.3.6"
# ed25519

View File

@ -15,19 +15,11 @@
#define GD_PAGE_MASK (GD_PAGE_SIZE - 1)
#define GD_PAGE_ROUND_DOWN(foo) (foo & (~GD_PAGE_MASK))
/**
* XX: documentation
*/
typedef struct _gs {
uintptr_t guard_p;
const uintptr_t *stack_pp;
const uintptr_t *alloc_pp;
jmp_buf env_buffer;
struct sigaction prev_sa;
} GuardState;
static GuardState *_guard_state = NULL;
static uintptr_t guard_p;
static const uintptr_t *stack_pp;
static const uintptr_t *alloc_pp;
static jmp_buf env_buffer;
static struct sigaction prev_sa;
static int32_t
_prot_page(void *address, int prot)
@ -53,18 +45,21 @@ _unmark_page(void *address)
return _prot_page(address, PROT_READ | PROT_WRITE);
}
/**
* Center the guard page.
*/
// Center the guard page.
// XX: could be a false positive if the new frame results in exact same guard page
// solution: we only re-center from the signal handler
static int32_t
_focus_guard(
uintptr_t *guard_pp,
const uintptr_t stack_p,
const uintptr_t alloc_p
) {
// Check for strange situations.
_focus_guard()
{
uintptr_t stack_p = *stack_pp;
uintptr_t alloc_p = *alloc_pp;
uintptr_t old_guard_p = guard_p;
uintptr_t new_guard_p;
int32_t err = 0;
fprintf(stderr, "guard: focus: stack pointer at %p\r\n", (void *)stack_p);
fprintf(stderr, "guard: focus: alloc pointer at %p\r\n", (void *)alloc_p);
if (stack_p == 0 || alloc_p == 0) {
fprintf(stderr, "guard: focus: stack or alloc pointer is null\r\n");
return guard_null;
@ -73,20 +68,8 @@ _focus_guard(
return guard_oom;
}
uintptr_t old_guard_p = *guard_pp;
uintptr_t new_guard_p;
int32_t err = 0;
fprintf(stderr, "guard: focus: old guard = %p\r\n", (void *)old_guard_p);
// Unmark the old guard page (if there is one)
if (old_guard_p) {
if ((err = _unmark_page((void *)old_guard_p))) {
fprintf(stderr, "guard: focus: unmark error\r\n");
return err;
}
}
// Compute new guard page
// XX: Should we also check for new_guard_p < min(stack_p, alloc_p)?
new_guard_p = GD_PAGE_ROUND_DOWN((stack_p + alloc_p) / 2);
@ -104,45 +87,50 @@ _focus_guard(
// Update guard page tracker
fprintf(stderr, "guard: focus: installed guard page at %p\r\n", (void *)new_guard_p);
*guard_pp = new_guard_p;
guard_p = new_guard_p;
// Unmark the old guard page (if there is one)
if (old_guard_p) {
if ((err = _unmark_page((void *)old_guard_p))) {
fprintf(stderr, "guard: focus: unmark error\r\n");
return err;
}
}
return 0;
}
void
static void
_signal_handler(int sig, siginfo_t *si, void *unused)
{
uintptr_t sig_addr;
int32_t err = 0;
assert(guard_p);
fprintf(stderr, "guard: sig_handle: %d received\r\n", sig);
if (sig != SIGSEGV) {
fprintf(stderr, "guard: sig_handle: invalid signal\r\n");
siglongjmp(_guard_state->env_buffer, guard_signal);
// XX: do we even want to jump? if this is fatal error, maybe just die now
siglongjmp(env_buffer, guard_signal);
}
sig_addr = (uintptr_t)si->si_addr;
fprintf(stderr, "guard: SIGSEGV address = %p\r\n", (void *)sig_addr);
fprintf(stderr, "guard: sig_handle: %p \r\n", _guard_state);
if (
sig_addr >= _guard_state->guard_p &&
sig_addr < (_guard_state->guard_p + GD_PAGE_SIZE))
if (sig_addr >= guard_p &&
sig_addr < guard_p + GD_PAGE_SIZE)
{
fprintf(stderr, "guard: page at %p hit\r\n", (void *)_guard_state->guard_p);
err = _focus_guard(
&(_guard_state->guard_p),
*(_guard_state->stack_pp),
*(_guard_state->alloc_pp));
fprintf(stderr, "guard: hit: %p\r\n", si->si_addr);
err = _focus_guard();
if (err) {
fprintf(stderr, "guard: sig_handle: focus error\r\n");
siglongjmp(_guard_state->env_buffer, err);
siglongjmp(env_buffer, err);
}
} else {
struct sigaction prev_sa = _guard_state->prev_sa;
fprintf(stderr, "guard: page at %p miss\r\n", (void *)_guard_state->guard_p);
}
else {
fprintf(stderr, "guard: page at %p miss\r\n", (void *)guard_p);
if (prev_sa.sa_sigaction != NULL) {
prev_sa.sa_sigaction(sig, si, unused);
@ -155,8 +143,8 @@ _signal_handler(int sig, siginfo_t *si, void *unused)
}
}
int32_t
_register_handler(struct sigaction *prev_sa)
static int32_t
_register_handler()
{
struct sigaction sa;
@ -171,8 +159,8 @@ _register_handler(struct sigaction *prev_sa)
// sigemptyset(&sa.sa_mask);
// sigaddset(&(sa.sa_mask), SIGSEGV);
// Set the new SIGSEGV handler, and save the old SIGSEGV handler (if any)
if (sigaction(SIGSEGV, &sa, prev_sa)) {
// XX: should assert that prev_sa doesn't have a handler in it, but it's not a pointer so non-trivial
if (sigaction(SIGSEGV, &sa, &prev_sa)) {
fprintf(stderr, "guard: register: sigaction error\r\n");
fprintf(stderr, "%s\r\n", strerror(errno));
return guard_sigaction | errno;
@ -181,107 +169,59 @@ _register_handler(struct sigaction *prev_sa)
return 0;
}
int32_t
_setup(
GuardState **gs_p,
const uintptr_t *const stack_pp,
const uintptr_t *const alloc_pp
) {
GuardState *gs;
int32_t err = 0;
assert(*gs_p == NULL);
fprintf(stderr, "guard: setup: stack pointer at %p\r\n", (void *)(*stack_pp));
fprintf(stderr, "guard: setup: alloc pointer at %p\r\n", (void *)(*alloc_pp));
// Setup guard page state
*gs_p = (GuardState *)malloc(sizeof(GuardState));
gs = *gs_p;
if (gs == NULL) {
fprintf(stderr, "guard: malloc error\r\n");
fprintf(stderr, "%s\r\n", strerror(errno));
return guard_malloc | errno;
}
fprintf(stderr, "guard: state allocated to %p \r\n", gs);
gs->guard_p = 0;
gs->stack_pp = stack_pp;
gs->alloc_pp = alloc_pp;
// Initialize the guard page
if ((err = _focus_guard(&(gs->guard_p), *stack_pp, *alloc_pp))) {
fprintf(stderr, "guard: setup: _focus_guard error\r\n");
return err;
}
// Register guard page signal handler
if ((err = _register_handler(&(gs->prev_sa)))) {
fprintf(stderr, "guard: setup: _register_handler error\r\n");
return err;
}
return 0;
}
int32_t
_teardown(GuardState** gs_p)
{
int32_t err = 0;
if (*gs_p != NULL) {
GuardState *gs = *gs_p;
if (gs->guard_p != 0) {
err = _unmark_page((void *)gs->guard_p);
}
if (sigaction(SIGSEGV, &(gs->prev_sa), NULL)) {
fprintf(stderr, "guard: teardown: sigaction error\r\n");
fprintf(stderr, "%s\r\n", strerror(errno));
err = guard_sigaction | errno;
}
free(gs);
*gs_p = NULL;
}
return err;
}
int32_t
guard(
callback f,
void *closure,
const uintptr_t *const stack_pp,
const uintptr_t *const alloc_pp,
const uintptr_t *const s_pp,
const uintptr_t *const a_pp,
void ** ret
) {
int32_t err = 0;
int32_t td_err;
// Setup the guard page
fprintf(stderr, "guard: setup\r\n");
if ((err = _setup(&_guard_state, stack_pp, alloc_pp))) {
assert(guard_p == 0);
fprintf(stderr, "guard: setup: stack pointer at %p\r\n", (void *)(*stack_pp));
fprintf(stderr, "guard: setup: alloc pointer at %p\r\n", (void *)(*alloc_pp));
guard_p = 0;
stack_pp = s_pp;
alloc_pp = a_pp;
// Initialize the guard page
if ((err = _focus_guard())) {
fprintf(stderr, "guard: setup: _focus_guard error\r\n");
goto done;
}
// Register guard page signal handler
if ((err = _register_handler())) {
fprintf(stderr, "guard: setup: _register_handler error\r\n");
goto done;
}
// Run given closure
fprintf(stderr, "guard: run\r\n");
if (!(err = sigsetjmp(_guard_state->env_buffer, 1))) {
if (!(err = sigsetjmp(env_buffer, 1))) {
*ret = f(closure);
}
// Clean up
fprintf(stderr, "guard: teardown\r\n");
err = _teardown(&_guard_state);
fprintf(stderr, "guard: return\r\n");
return err;
} else {
done:
// Clean up
fprintf(stderr, "guard: teardown\r\n");
_teardown(&_guard_state);
if (guard_p != 0) {
td_err = _unmark_page((void *)guard_p);
}
if (sigaction(SIGSEGV, &prev_sa, NULL)) {
fprintf(stderr, "guard: teardown: sigaction error\r\n");
fprintf(stderr, "%s\r\n", strerror(errno));
td_err = guard_sigaction | errno;
}
if (!err) {
err = td_err;
}
fprintf(stderr, "guard: return\r\n");
return err;
}
}

View File

@ -51,9 +51,9 @@ int32_t
guard(
callback f,
void *closure,
const uintptr_t *const stack_pp,
const uintptr_t *const alloc_pp,
void ** ret
const uintptr_t *const s_pp,
const uintptr_t *const a_pp,
void **ret
);
#endif // __GUARD_H__

View File

@ -1567,6 +1567,7 @@ _flist_grow(BT_state *state, size_t pages)
/* grows the backing file by PMA_GROW_SIZE_p and appends this freespace to the
flist */
{
exit(1);
/* grow the backing file by at least PMA_GROW_SIZE_p */
pages = MAX(pages, PMA_GROW_SIZE_p);
off_t bytes = P2BYTES(pages);

View File

@ -91,6 +91,23 @@ pub fn assert_no_alloc<T, F: FnOnce() -> T> (func: F) -> T {
return ret;
}
#[cfg(not(all(feature = "disable_release", not(debug_assertions))))] // if not disabled
/// Calls the `func` closure, but ensures that the forbid and permit counters
/// are maintained accurately even if a longjmp originates and terminates
/// within the closure. If you longjmp over this function, we can't fix
/// anything about it.
pub fn ensure_alloc_counters<T, F: FnOnce() -> T> (func: F) -> T {
let forbid_counter = ALLOC_FORBID_COUNT.with(|c| c.get());
let permit_counter = ALLOC_PERMIT_COUNT.with(|c| c.get());
let ret = func();
ALLOC_FORBID_COUNT.with(|c| c.set(forbid_counter));
ALLOC_PERMIT_COUNT.with(|c| c.set(permit_counter));
return ret;
}
#[cfg(not(all(feature = "disable_release", not(debug_assertions))))] // if not disabled
/// Calls the `func` closure. Allocations are temporarily allowed, even if this
/// code runs inside of assert_no_alloc.
@ -142,6 +159,7 @@ pub fn reset_counters() {
#[cfg(not(all(feature = "disable_release", not(debug_assertions))))] // if not disabled
/// The custom allocator that handles the checking.
///