Merge branch 'status' into parse

This commit is contained in:
Alex Shelkovnykov 2023-12-19 14:39:15 -03:00
commit 9d65f6ecbc
14 changed files with 198 additions and 491 deletions

16
.github/scripts/arch/get-runner-arch vendored Executable file
View File

@ -0,0 +1,16 @@
#!/usr/bin/env bash
# This could be improved by separately looking up OS and architecture
# but it will do for now.
for kv in "X64-Linux,x86_64-linux" "X64-macOS,x86_64-darwin" "ARM64-macOS,aarch64-darwin"; do
KEY=${kv%,*};
VAL=${kv#*,};
if [ "$KEY" = "${1}-${2}" ]
then
echo "nix-arch=${VAL}"
exit 0
fi
done
echo "Unknown Github runner arch-os pair: ${1}-${2}"
exit 1

View File

@ -15,12 +15,48 @@ jobs:
steps:
- uses: actions/checkout@v3
# Determine proper nix platform attribute for runner
- name: Determine Nix platform
id: set-nix-platform
working-directory: .
run:
.github/scripts/arch/get-runner-arch ${{ runner.arch }} ${{ runner.os }} > $GITHUB_OUTPUT
# Install nix, required by nix-shell-action
- uses: cachix/install-nix-action@v23
name: Install Nix
with:
extra_nix_config: "extra-experimental-features = nix-command flakes"
- name: Set cache key for dev env
id: set-cache-key
run: |
echo "Determining devshell path for platform ${{ steps.set-nix-platform.outputs.nix-arch }}"
echo "nix-devshell-path=$(nix eval --raw ".#devShells.${{ steps.set-nix-platform.outputs.nix-arch }}.default.outPath")" >> $GITHUB_OUTPUT
- name: Cache nix build artifacts
id: nix-cache
uses: actions/cache@v3
with:
path: |
nix-devshell.nixstore
key: nix-${{ steps.set-cache-key.outputs.nix-devshell-path }}
- name: Restore cached nix artifacts
if: steps.nix-cache.outputs.cache-hit == 'true'
working-directory: .
run: |
pwd
ls
nix-store --import < nix-devshell.nixstore
- name: Cache rust build artifacts
id: cache_rust
uses: Swatinem/rust-cache@v2
with:
key: rust-${{ steps.set-cache-key.outputs.nix-devshell-path }}
workspaces: "rust/ares -> target"
# Check formatting
- name: Format
run: |
@ -39,10 +75,19 @@ jobs:
# Build Ares
- name: Build
run: |
nix develop --command bash -c "cargo build --release --verbose --features check_all"
nix develop --command bash -c "cargo build --release --verbose"
# Run tests
- name: Test
run: |
nix develop --command bash -c "cargo test --verbose -- --test-threads=1"
- name: Build nix cache export
if: steps.nix-cache.outputs.cache-hit != 'true'
run: |
pwd
nix eval --json ".#devShells.${{ steps.set-nix-platform.outputs.nix-arch }}.default.buildInputs" 2>/dev/null
nix eval --json ".#devShells.${{ steps.set-nix-platform.outputs.nix-arch }}.default.buildInputs" 2>/dev/null | jq -r '.[]' | xargs nix-store --query --requisites
nix-store --export $(nix eval --json ".#devShells.${{ steps.set-nix-platform.outputs.nix-arch }}.default.buildInputs" 2>/dev/null | jq -r '.[]' | xargs nix-store --query --requisites) > ../../nix-devshell.nixstore
ls -lh

View File

@ -12,9 +12,9 @@ edition = "2018"
# Please keep these alphabetized
[dependencies]
ares_macros = { path = "../ares_macros" }
assert_no_alloc = "1.1.2"
# assert_no_alloc = "1.1.2"
# use this when debugging requires allocation (e.g. eprintln)
# assert_no_alloc = {version="1.1.2", features=["warn_debug"]}
assert_no_alloc = {version="1.1.2", features=["warn_debug"]}
bitvec = "1.0.0"
criterion = "0.4"
either = "1.9.0"
@ -47,8 +47,10 @@ opt-level = 3
# run with e.g. 'cargo build --features check_forwarding,check_acyclic'
[features]
# FOR DEBUGGING MEMORY ISSUES ONLY
check_all = [ "check_acyclic", "check_forwarding", "check_junior" ]
check_acyclic = []
check_forwarding = []
check_junior = []
sham_hints = []
stop_for_debug = []

View File

@ -1,66 +0,0 @@
fn main() {
use std::env;
let profile = env::var("PROFILE").unwrap();
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=./src/pma");
match profile.as_ref() {
"debug" => debug(),
"release" => release(),
_ => {
println!("unknown profile: {}", profile);
std::process::exit(-1);
}
}
cc::Build::new()
.file("./src/pma/test/malloc.c")
.opt_level(0)
.flag("-g3")
.flag("-Wno-int-conversion")
.flag("-w")
.compile("test_pma_malloc_unit");
}
fn debug() {
cc::Build::new()
.file("./src/pma/malloc.c")
.file("./src/pma/includes/checksum.c")
.opt_level(0)
.flag("-g3")
.flag("-Wall")
.flag("-Wextra")
.flag("-Wpedantic")
.flag("-Wformat=2")
.flag("-Wno-unused-parameter")
.flag("-Wshadow")
.flag("-Wwrite-strings")
.flag("-Wstrict-prototypes")
.flag("-Wold-style-definition")
.flag("-Wredundant-decls")
.flag("-Wnested-externs")
.flag("-Wmissing-include-dirs")
.compile("pma_malloc");
}
fn release() {
cc::Build::new()
.file("./src/pma/malloc.c")
.file("./src/pma/includes/checksum.c")
.warnings_into_errors(true)
.opt_level(3)
.flag("-Wall")
.flag("-Wextra")
.flag("-Wpedantic")
.flag("-Wformat=2")
.flag("-Wno-unused-parameter")
.flag("-Wshadow")
.flag("-Wwrite-strings")
.flag("-Wstrict-prototypes")
.flag("-Wold-style-definition")
.flag("-Wredundant-decls")
.flag("-Wnested-externs")
.flag("-Wmissing-include-dirs")
.compile("pma_malloc");
}

View File

@ -317,6 +317,7 @@ impl<T: Copy + Preserve> Hamt<T> {
let chunk = mug & 0x1F; // 5 bits
mug >>= 5;
match stem.entry(chunk) {
// No entry found at mug chunk index; add Leaf to current Stem
None => {
let new_leaf_buffer = stack.struct_alloc(1);
*new_leaf_buffer = (*n, t);
@ -341,6 +342,7 @@ impl<T: Copy + Preserve> Hamt<T> {
};
break Hamt(stem_ret);
}
// Stem found at mug chunk index; insert into found Stem
Some((Left(next_stem), idx)) => {
let new_buffer = stack.struct_alloc(stem.size());
copy_nonoverlapping(stem.buffer, new_buffer, stem.size());
@ -354,7 +356,9 @@ impl<T: Copy + Preserve> Hamt<T> {
depth += 1;
continue;
}
// Leaf found at mug chunk index
Some((Right(leaf), idx)) => {
// Override existing value for key, if one exists
for (ldx, pair) in leaf.to_mut_slice().iter_mut().enumerate() {
if unifying_equality(stack, n, &mut pair.0) {
let new_leaf_buffer = stack.struct_alloc(leaf.len);
@ -376,6 +380,8 @@ impl<T: Copy + Preserve> Hamt<T> {
break 'insert Hamt(stem_ret);
}
}
// No existing pair in this Leaf matches the key, and we've maxxed out the
// Hamt depth; add the the key-value pair to the list of pairs for this Leaf
if depth >= 5 {
let new_leaf_buffer = stack.struct_alloc(leaf.len + 1);
copy_nonoverlapping(leaf.buffer, new_leaf_buffer, leaf.len);
@ -394,15 +400,19 @@ impl<T: Copy + Preserve> Hamt<T> {
buffer: new_buffer,
};
break 'insert Hamt(stem_ret);
// No existing pair in this Leaf matches the key, but we haven't maxxed out
// the Hamt depth yet. If we haven't hit the depth limit yet, we shouldn't
// be making a linked list of pairs. Turn the Leaf into a Stem and insert
// the new pair into the new Stem (also insert the pair in the existing
// Leaf, too).
} else {
// if we haven't hit depth limit yet we shouldn't be chaining
// we'll make a fake node pointing to the old leaf and "insert into" that
// Make a fake node pointing to the old leaf and "insert into it" the
// next time around
assert!(leaf.len == 1);
let fake_buffer = stack.struct_alloc(1);
*fake_buffer = Entry { leaf };
// get the mug chunk for the noun at *the next level* so
// we can build a fake stem for it
// Get the mug chunk for the Noun at the *next* level so that we can
// build a fake stem for it
let fake_mug = mug_u32(stack, (*leaf.buffer).0);
let fake_chunk = (fake_mug >> ((depth + 1) * 5)) & 0x1F;
let next_stem = Stem {

View File

@ -2,7 +2,7 @@ use crate::jets::*;
use crate::noun::{Atom, DirectAtom, IndirectAtom, Noun, D, T};
use ares_macros::tas;
use either::Either::{self, Left, Right};
use std::ptr::null_mut;
use std::ptr::{copy_nonoverlapping, null_mut};
/** Root for Hoon %k.139
*/
@ -909,3 +909,27 @@ struct HotMem {
jet: Jet,
next: Hot,
}
impl Preserve for Hot {
unsafe fn preserve(&mut self, stack: &mut NockStack) {
let mut it = self;
while !it.0.is_null() && stack.is_in_frame(it.0) {
let dest_mem = stack.struct_alloc_in_previous_frame(1);
copy_nonoverlapping(it.0, dest_mem, 1);
it.0 = dest_mem;
(*it.0).a_path.preserve(stack);
(*it.0).axis.preserve(stack);
it = &mut (*it.0).next;
}
}
unsafe fn assert_in_stack(&self, stack: &NockStack) {
let mut it = self;
while !it.0.is_null() {
stack.assert_struct_is_in(it.0, 1);
(*it.0).a_path.assert_in_stack(stack);
(*it.0).axis.assert_in_stack(stack);
it = &mut (*it.0).next;
}
}
}

View File

@ -13,7 +13,6 @@ pub mod noun;
pub mod serf;
//pub mod bytecode;
pub mod serialization;
pub mod snapshot;
pub mod trace;
/** Introduce useful functions for debugging

View File

@ -5,10 +5,13 @@ use std::io;
fn main() -> io::Result<()> {
// debug
// eprintln!("serf: pid {}", std::process::id());
// if unsafe { libc::kill(std::process::id() as i32, libc::SIGSTOP) } != 0 {
// panic!("Could not stop ourselves.");
// };
#[cfg(feature = "stop_for_debug")]
{
eprintln!("serf: pid {}", std::process::id());
if unsafe { libc::kill(std::process::id() as i32, libc::SIGSTOP) } != 0 {
panic!("Could not stop ourselves.");
};
}
let filename = env::args().nth(1).expect("Must provide input filename");
@ -26,9 +29,6 @@ fn main() -> io::Result<()> {
ares::noun::use_gdb();
ares::serf::use_gdb();
ares::serialization::use_gdb();
ares::snapshot::use_gdb();
ares::snapshot::double_jam::use_gdb();
ares::snapshot::pma::use_gdb();
}
if filename == "serf" {

View File

@ -2,7 +2,6 @@ use crate::assert_acyclic;
use crate::assert_no_forwarding_pointers;
use crate::assert_no_junior_pointers;
use crate::noun::{Atom, Cell, CellMemory, IndirectAtom, Noun, NounAllocator};
use crate::snapshot::pma::{pma_in_arena, pma_malloc_w};
use assert_no_alloc::permit_alloc;
use either::Either::{self, Left, Right};
use ibig::Stack;
@ -75,7 +74,7 @@ impl NockStack {
unsafe {
*frame_pointer.sub(FRAME + 1) = ptr::null::<u64>() as u64; // "frame pointer" from "previous" frame
*frame_pointer.sub(STACK + 1) = ptr::null::<u64>() as u64; // "stack pointer" from "previous" frame
*frame_pointer.sub(ALLOC + 1) = ptr::null::<u64>() as u64; // "alloc pointer" from "previous" frame
*frame_pointer.sub(ALLOC + 1) = start as u64; // "alloc pointer" from "previous" frame
};
NockStack {
start,
@ -88,7 +87,43 @@ impl NockStack {
}
}
/** Resets the NockStack. */
/** Resets the NockStack but flipping the top-frame polarity and unsetting PC. Sets the alloc
* pointer to the "previous" alloc pointer stored in the top frame to keep things "preserved"
* from the top frame. This allows us to do a copying GC on the top frame without erroneously
* "popping" the top frame.
*/
pub unsafe fn flip_top_frame(&mut self, top_slots: usize) {
// Assert that we are at the top
assert!((*self.prev_frame_pointer_pointer()).is_null());
assert!((*self.prev_stack_pointer_pointer()).is_null());
let new_alloc_pointer = *(self.prev_alloc_pointer_pointer());
if self.is_west() {
// new top frame will be east
let new_frame_pointer = self.start.add(self.size).sub(RESERVED + top_slots) as *mut u64;
*new_frame_pointer.add(FRAME) = ptr::null::<u64>() as u64;
*new_frame_pointer.add(STACK) = ptr::null::<u64>() as u64;
*new_frame_pointer.add(ALLOC) = self.start.add(self.size) as u64;
self.frame_pointer = new_frame_pointer;
self.stack_pointer = new_frame_pointer;
self.alloc_pointer = new_alloc_pointer;
self.pc = false;
assert!(!self.is_west());
} else {
// new top frame will be west
let new_frame_pointer = self.start.add(RESERVED + top_slots) as *mut u64;
*new_frame_pointer.sub(FRAME + 1) = ptr::null::<u64>() as u64;
*new_frame_pointer.sub(STACK + 1) = ptr::null::<u64>() as u64;
*new_frame_pointer.sub(ALLOC + 1) = self.start as u64;
self.frame_pointer = new_frame_pointer;
self.stack_pointer = new_frame_pointer;
self.alloc_pointer = new_alloc_pointer;
self.pc = false;
assert!(self.is_west());
}
}
/** Resets the NockStack. The top frame is west as in the initial creation of the NockStack. */
pub fn reset(&mut self, top_slots: usize) {
self.frame_pointer = unsafe { self.start.add(RESERVED + top_slots) } as *mut u64;
self.stack_pointer = self.frame_pointer;
@ -97,7 +132,8 @@ impl NockStack {
unsafe {
*self.frame_pointer.sub(FRAME + 1) = ptr::null::<u64>() as u64; // "frame pointer" from "previous" frame
*self.frame_pointer.sub(STACK + 1) = ptr::null::<u64>() as u64; // "stack pointer" from "previous" frame
*self.frame_pointer.sub(ALLOC + 1) = ptr::null::<u64>() as u64; // "alloc pointer" from "previous" frame
*self.frame_pointer.sub(ALLOC + 1) = self.start as u64; // "alloc pointer" from "previous" frame
assert!(self.is_west());
};
}
@ -123,7 +159,17 @@ impl NockStack {
let ptr_u64 = ptr as *const u64;
let prev = *self.prev_stack_pointer_pointer();
if self.is_west() {
ptr_u64 >= self.alloc_pointer && ptr_u64 < prev
// If we are in a top/west frame, the stack pointer will be null, so our allocation
// arena was the alloc pointer to the top of the NockStack arena
if prev.is_null() {
ptr_u64 >= self.alloc_pointer && ptr_u64 < self.start.add(self.size)
} else {
ptr_u64 >= self.alloc_pointer && ptr_u64 < prev
}
// If we are in a top/east frame, the stack pointer will be null, so our allocation arena
// was the alloc pointer to the bottom of the NockStack arena
} else if prev.is_null() {
ptr_u64 >= self.start && ptr_u64 < self.alloc_pointer
} else {
ptr_u64 >= prev && ptr_u64 < self.alloc_pointer
}
@ -526,79 +572,6 @@ impl NockStack {
}
}
pub unsafe fn copy_pma(&mut self, noun: &mut Noun) {
// copy_pma() should only be called when there is a single stack
// frame; these asserts assure that.
assert!(
self.is_west()
&& (*(self.prev_stack_pointer_pointer())).is_null()
&& (*(self.prev_frame_pointer_pointer())).is_null()
);
assert!(self.stack_is_empty());
let noun_ptr = noun as *mut Noun;
*(self.push::<Noun>()) = *noun;
*(self.push::<*mut Noun>()) = noun_ptr;
loop {
if self.stack_is_empty() {
break;
}
let next_dest = *(self.top::<*mut Noun>());
self.pop::<*mut Noun>();
let next_noun = *(self.top::<Noun>());
self.pop::<Noun>();
match next_noun.as_either_direct_allocated() {
Either::Left(_direct) => {
*next_dest = next_noun;
}
Either::Right(allocated) => match allocated.forwarding_pointer() {
Option::Some(new_allocated) => {
*next_dest = new_allocated.as_noun();
}
Option::None => {
if pma_in_arena(allocated.to_raw_pointer()) {
*next_dest = allocated.as_noun();
} else {
match allocated.as_either() {
Either::Left(mut indirect) => {
let new_indirect_alloc =
pma_malloc_w(indirect_raw_size(indirect));
copy_nonoverlapping(
indirect.to_raw_pointer(),
new_indirect_alloc,
indirect_raw_size(indirect),
);
indirect.set_forwarding_pointer(new_indirect_alloc);
*next_dest = IndirectAtom::from_raw_pointer(new_indirect_alloc)
.as_noun();
}
Either::Right(mut cell) => {
let new_cell_alloc: *mut CellMemory =
pma_malloc_w(word_size_of::<CellMemory>());
(*new_cell_alloc).metadata = (*cell.to_raw_pointer()).metadata;
*(self.push::<Noun>()) = cell.tail();
*(self.push::<*mut Noun>()) = &mut (*new_cell_alloc).tail;
*(self.push::<Noun>()) = cell.head();
*(self.push::<*mut Noun>()) = &mut (*new_cell_alloc).head;
cell.set_forwarding_pointer(new_cell_alloc);
*next_dest = Cell::from_raw_pointer(new_cell_alloc).as_noun();
}
}
}
}
},
}
}
}
pub unsafe fn frame_pop(&mut self) {
let prev_frame_ptr = *self.prev_frame_pointer_pointer();
let prev_stack_ptr = *self.prev_stack_pointer_pointer();

View File

@ -31,6 +31,24 @@ const CELL_TAG: u64 = u64::MAX & INDIRECT_MASK;
/** Tag mask for a cell. */
const CELL_MASK: u64 = !(u64::MAX >> 3);
/* A note on forwarding pointers:
*
* Forwarding pointers are only used temporarily during copies between NockStack frames and between
* the NockStack and the PMA. Since unifying equality checks can create structural sharing between
* Noun objects, forwarding pointers act as a signal that a Noun has already been copied to the
* "to" space. The old Noun object in the "from" space is given a forwarding pointer so that any
* future refernces to the same structure know that it has already been copied and that they should
* retain the structural sharing relationship by referencing the new copy in the "to" copy space.
*
* The Nouns in the "from" space marked with forwarding pointers are dangling pointers after a copy
* operation. No code outside of the copying code checks for forwarding pointers. This invariant
* must be enforced in two ways:
* 1. The current frame must be immediately popped after preserving data, when
* copying from a junior NockStack frame to a senior NockStack frame.
* 2. All persistent derived state (e.g. Hot state, Warm state) must be preserved
* and the root NockStack frame flipped after saving data to the PMA.
*/
/** Tag for a forwarding pointer */
const FORWARDING_TAG: u64 = u64::MAX & CELL_MASK;

View File

@ -9,8 +9,6 @@ use crate::mem::NockStack;
use crate::mug::*;
use crate::newt::Newt;
use crate::noun::{Atom, Cell, DirectAtom, Noun, Slots, D, T};
use crate::snapshot::double_jam::DoubleJam;
use crate::snapshot::Snapshot;
use crate::trace::*;
use crate::interpreter;
use ares_macros::tas;
@ -18,7 +16,7 @@ use signal_hook;
use signal_hook::consts::SIGINT;
use std::fs::create_dir_all;
use std::io;
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use std::result::Result;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
@ -31,7 +29,6 @@ const FLAG_TRACE: u32 = 1 << 8;
struct Context {
epoch: u64,
event_num: u64,
snapshot: DoubleJam,
arvo: Noun,
mug: u32,
nock_context: interpreter::Context,
@ -39,19 +36,17 @@ struct Context {
impl Context {
pub fn new(
snap_path: &PathBuf,
_snap_path: &Path,
trace_info: Option<TraceInfo>,
constant_hot_state: &[HotEntry],
) -> Self {
// TODO: switch to Pma when ready
// let snap = &mut snapshot::pma::Pma::new(snap_path);
let mut snapshot = DoubleJam::new(snap_path);
let mut stack = NockStack::new(512 << 10 << 10, 0);
let cold = Cold::new(&mut stack);
let hot = Hot::init(&mut stack, constant_hot_state);
let (epoch, event_num, arvo) = snapshot.load(&mut stack).unwrap_or((0, 0, D(0)));
let (epoch, event_num, arvo) = (0, 0, D(0));
let mug = mug_u32(&mut stack, arvo);
let nock_context = interpreter::Context {
@ -68,7 +63,6 @@ impl Context {
Context {
epoch,
event_num,
snapshot,
arvo,
mug,
nock_context,
@ -83,8 +77,6 @@ impl Context {
// XX: assert event numbers are continuous
self.arvo = new_arvo;
self.event_num = new_event_num;
self.snapshot
.save(&mut self.nock_context.stack, &mut self.arvo);
self.mug = mug_u32(&mut self.nock_context.stack, self.arvo);
}
@ -93,8 +85,8 @@ impl Context {
//
pub fn sync(&mut self) {
self.snapshot
.sync(&mut self.nock_context.stack, self.epoch, self.event_num);
// TODO actually sync
eprintln!("serf: TODO sync");
}
//
@ -224,22 +216,24 @@ pub fn serf(constant_hot_state: &[HotEntry]) -> io::Result<()> {
// Reset the local cache and scry handler stack
context.nock_context.cache = Hamt::<Noun>::new();
context.nock_context.scry_stack = D(0);
context.nock_context.stack.frame_push(0);
let tag = slot(writ, 2)?.as_direct().unwrap();
match tag.data() {
tas!(b"live") => {
let inner = slot(writ, 6)?.as_direct().unwrap();
match inner.data() {
tas!(b"cram") => eprintln!("cram"),
tas!(b"exit") => eprintln!("exit"),
tas!(b"cram") => eprintln!("\r %cram: not implemented"),
tas!(b"exit") => {
eprintln!("\r %exit");
std::process::exit(0);
}
tas!(b"save") => {
// XX what is eve for?
eprintln!("save");
eprintln!("\r %save");
context.sync();
}
tas!(b"meld") => eprintln!("meld"),
tas!(b"pack") => eprintln!("pack"),
tas!(b"meld") => eprintln!("\r %meld: not implemented"),
tas!(b"pack") => eprintln!("\r %pack: not implemented"),
_ => eprintln!("unknown live"),
}
context.live();
@ -269,13 +263,15 @@ pub fn serf(constant_hot_state: &[HotEntry]) -> io::Result<()> {
clear_interrupt();
// Persist data that should survive between events
// XX: Such data should go in the PMA once that's available
// XX: Such data should go in the PMA once that's available, except
// the warm and hot state which should survive between events but not interpreter runs
unsafe {
let stack = &mut context.nock_context.stack;
stack.preserve(&mut context.arvo);
stack.preserve(&mut context.nock_context.cold);
stack.preserve(&mut context.nock_context.warm);
stack.frame_pop();
stack.preserve(&mut context.nock_context.hot);
stack.flip_top_frame(0);
}
}

View File

@ -1,13 +0,0 @@
use crate::mem::NockStack;
use crate::noun::Noun;
pub mod double_jam;
pub mod pma;
crate::gdb!();
pub trait Snapshot {
fn load(&mut self, stack: &mut NockStack) -> std::io::Result<(u64, u64, Noun)>;
fn save(&mut self, stack: &mut NockStack, noun: &mut Noun);
fn sync(&mut self, stack: &mut NockStack, epoch: u64, event: u64);
}

View File

@ -1,172 +0,0 @@
/** Jam-based snapshotting
*
* This is a simple checkpoint system that should be safe but has (very) poor performance. This is
* intended as a working placeholder until the real PMA is hooked up.
*
* This keeps two files, .urb/chk/snapshot-0.jam and .urbit/chk/snapshot-1.jam. Each of these
* contains 64 bits for a mug checksum, then 64 bits for the event number, then a jam of the state.
* We alternate between writing these two files, so that at least one is always valid.
*
* When we start up, we read both files and pick the one with the higher event number. If either
* is corrupted, we use the other.
*/
use super::Snapshot;
use crate::mem::NockStack;
use crate::mug::mug_u32;
use crate::noun::{IndirectAtom, Noun, D};
use crate::serialization::{cue, jam};
use either::Either;
use memmap::Mmap;
use memmap::MmapMut;
use std::fs::{File, OpenOptions};
use std::io;
use std::mem;
use std::path::{Path, PathBuf};
use std::ptr::copy_nonoverlapping;
use std::ptr::write_bytes;
crate::gdb!();
pub struct DoubleJam {
path: PathBuf,
noun: Noun,
}
impl DoubleJam {
pub fn new<P: AsRef<Path>>(path: P) -> Self {
Self {
path: path.as_ref().to_path_buf(),
noun: D(0),
}
}
fn latest_snapshot(&self, stack: &mut NockStack) -> io::Result<(u8, u64, IndirectAtom)> {
let res0 = self.load_snapshot(stack, 0);
let res1 = self.load_snapshot(stack, 1);
match (res0, res1) {
(Ok((event_number_0, state_0)), Ok((event_number_1, state_1))) => {
if event_number_0 > event_number_1 {
Ok((0, event_number_0, state_0))
} else {
Ok((1, event_number_1, state_1))
}
}
(Ok((event_number_0, state_0)), Err(_)) => Ok((0, event_number_0, state_0)),
(Err(_), Ok((event_number_1, state_1))) => Ok((1, event_number_1, state_1)),
(Err(_), Err(_)) => Err(io::Error::new(
io::ErrorKind::NotFound,
"no valid snapshot found",
)),
}
}
fn load_snapshot(&self, stack: &mut NockStack, number: u8) -> io::Result<(u64, IndirectAtom)> {
let path = self.path.join(format!("snapshot-{}.jam", number));
// XX: need NockStack allocated string interpolation
// eprintln!("\rload: snapshot at {:?}", path);
let f = File::open(path)?;
let in_len = f.metadata().unwrap().len() - 8;
let word_len = (in_len + 7) >> 3;
let (event_number, state) = unsafe {
let in_map = Mmap::map(&f).unwrap();
let in_ptr = in_map.as_ptr();
let (mut state, dest) = IndirectAtom::new_raw_mut(stack, word_len as usize);
let mugged = (*in_ptr.add(0) as u32)
| ((*in_ptr.add(1) as u32) << 8)
| ((*in_ptr.add(2) as u32) << 16)
| ((*in_ptr.add(3) as u32) << 24);
write_bytes(dest.add(word_len as usize - 1), 0, 1);
copy_nonoverlapping(in_ptr.add(8), dest as *mut u8, in_len as usize);
mem::drop(in_map);
state.normalize(); // know it's not direct because first word is event number
if mug_u32(stack, state.as_noun()) != mugged {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"snapshot checksum mismatch",
));
}
(*state.data_pointer(), state)
};
Ok((event_number, state))
}
}
impl Snapshot for DoubleJam {
fn load(&mut self, stack: &mut NockStack) -> io::Result<(u64, u64, Noun)> {
let (_num, event_number, state) = self.latest_snapshot(stack)?;
let jammed_arvo =
unsafe { IndirectAtom::new_raw(stack, state.size() - 1, state.data_pointer().add(1)) };
let arvo = cue(stack, jammed_arvo.as_atom());
Ok((0, event_number, arvo))
}
fn save(&mut self, _stack: &mut NockStack, noun: &mut Noun) {
// XX: I don't think this needs to be mut
self.noun = *noun;
}
fn sync(&mut self, stack: &mut NockStack, _epoch: u64, event_number: u64) {
// Find the latest valid snapshot, and write to the other file.
let prev_snap = if let Ok((prev_snap, _, _)) = self.latest_snapshot(stack) {
prev_snap
} else {
0
};
let snap_number = if prev_snap == 0 { 1 } else { 0 };
let path = self.path.join(format!("snapshot-{}.jam", snap_number));
let jammed_arvo = jam(stack, self.noun);
let state = unsafe {
let (mut state, dest) = IndirectAtom::new_raw_mut(stack, jammed_arvo.size() + 1);
dest.write(event_number);
match jammed_arvo.as_either() {
Either::Left(direct) => {
copy_nonoverlapping(&direct.data() as *const u64, dest.add(1), 1);
}
Either::Right(indirect) => {
copy_nonoverlapping(indirect.data_pointer(), dest.add(1), jammed_arvo.size());
}
};
state.normalize_as_atom()
};
let mugged = mug_u32(stack, state.as_noun());
let f = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(path)
.unwrap();
f.set_len(((state.size() + 1) << 3) as u64).unwrap();
unsafe {
let mut out_map = MmapMut::map_mut(&f).unwrap();
let out_ptr = out_map.as_mut_ptr();
out_ptr.add(0).write(mugged as u8);
out_ptr.add(1).write((mugged >> 8) as u8);
out_ptr.add(2).write((mugged >> 16) as u8);
out_ptr.add(3).write((mugged >> 24) as u8);
copy_nonoverlapping(
state.data_pointer() as *mut u8,
out_ptr.add(8),
state.size() << 3,
);
out_map.flush().unwrap();
// This appears to match c3/portable.h: fdatasync for linux, fcntl with F_FULLFSYNC for for
// macos, and fsync for some other platforms.
f.sync_data().unwrap();
};
}
}

View File

@ -1,125 +0,0 @@
use super::Snapshot;
use crate::mem::NockStack;
use crate::mug::mug_u32;
use crate::noun::{Noun, D};
use libc::{c_char, c_int, c_void, size_t};
use std::ffi::CString;
use std::path::{Path, PathBuf};
crate::gdb!();
mod raw {
use super::*;
#[repr(C)]
pub struct RootState {
pub epoch: u64,
pub event: u64,
pub root: u64,
}
#[link(name = "pma_malloc", kind = "static")]
extern "C" {
pub(super) fn pma_init(path: *const c_char) -> c_int;
pub(super) fn pma_load(path: *const c_char) -> RootState;
pub(super) fn pma_close(epoch: u64, event: u64, root: u64) -> c_int;
pub(super) fn pma_malloc(size: size_t) -> *mut c_void;
pub(super) fn pma_free(ptr: *mut c_void) -> c_int;
pub(super) fn pma_sync(epoch: u64, event: u64, root: u64) -> c_int;
pub(super) fn pma_in_arena(ptr: *const c_void) -> bool;
}
#[link(name = "test_pma_malloc_unit", kind = "static")]
extern "C" {
pub(super) fn test_pma(path: *const c_char) -> c_void;
}
}
unsafe fn pma_init<P: AsRef<Path>>(path: P) -> i32 {
let path = CString::new(path.as_ref().to_str().unwrap()).unwrap();
raw::pma_init(path.as_ptr())
}
unsafe fn pma_load<P: AsRef<Path>>(path: P) -> (u64, u64, Noun) {
let path = CString::new(path.as_ref().to_str().unwrap()).unwrap();
let rs = raw::pma_load(path.as_ptr());
(rs.epoch, rs.event, Noun::from_raw(rs.root))
}
#[allow(dead_code)]
unsafe fn pma_close(epoch: u64, event: u64, root: Noun) -> i32 {
raw::pma_close(epoch, event, root.as_raw())
}
pub fn pma_malloc<T>(size: usize) -> *mut T {
unsafe { raw::pma_malloc(size as size_t) as *mut T }
}
/** Allocate a block of memory from the persistent memory arena.
*
* Size is in *words*, unlike the underlying pma_malloc.
*/
pub fn pma_malloc_w<T>(size: usize) -> *mut T {
unsafe { raw::pma_malloc(size << 3 as size_t) as *mut T }
}
#[allow(dead_code)]
unsafe fn pma_free<T>(ptr: *mut T) -> i32 {
raw::pma_free(ptr as *mut c_void)
}
unsafe fn pma_sync(epoch: u64, event: u64, root: Noun) -> i32 {
raw::pma_sync(epoch, event, root.as_raw())
}
pub fn pma_in_arena<T>(ptr: *const T) -> bool {
unsafe { raw::pma_in_arena(ptr as *const c_void) }
}
#[allow(dead_code)]
unsafe fn test_pma<P: AsRef<Path>>(path: P) {
let path = CString::new(path.as_ref().to_str().unwrap()).unwrap();
raw::test_pma(path.as_ptr());
}
pub struct Pma {
path: PathBuf,
noun: Noun,
}
impl Pma {
pub fn new<P: AsRef<Path>>(path: P) -> Self {
let path = path.as_ref().to_path_buf();
Self { path, noun: D(0) }
}
}
impl Snapshot for Pma {
fn save(&mut self, stack: &mut NockStack, noun: &mut Noun) {
// Required so everything in the PMA has a cached mug, otherwise we would try to write
let _mug = mug_u32(stack, *noun);
unsafe { stack.copy_pma(noun) };
self.noun = *noun;
}
fn sync(&mut self, _stack: &mut NockStack, epoch: u64, event: u64) {
unsafe {
pma_sync(epoch, event, self.noun);
}
}
fn load(&mut self, _stack: &mut NockStack) -> std::io::Result<(u64, u64, Noun)> {
let path = self.path.join(".bin/page.bin");
if path.is_file() {
// XX: need NockStack allocated string interpolation
// eprintln!("\rload: found snapshot at {:?}", path);
unsafe { Ok(pma_load(&self.path)) }
} else {
// XX: need NockStack allocated string interpolation
// eprintln!("\rload: creating snapshot at {:?}", path);
unsafe { pma_init(&self.path) };
Ok((0, 0, D(0)))
}
}
}