[ares] hook up pma_malloc to event lifecycle

All tests pass except the azimuth pill, which requires too many dirty
pages in one event.
This commit is contained in:
Philip Monk 2023-02-27 22:43:29 -07:00
parent 7c22096723
commit ffff0657b7
12 changed files with 317 additions and 133 deletions

View File

@ -32,7 +32,7 @@ name = "ares"
path = "src/main.rs"
[profile.dev]
opt-level = 0
opt-level = 3
[profile.dev.package."*"]
opt-level = 3

View File

@ -8,7 +8,6 @@ pub mod mem;
pub mod mug;
pub mod newt;
pub mod noun;
pub mod pma;
pub mod serf;
//pub mod bytecode;
pub mod hamt;

View File

@ -28,6 +28,7 @@ fn main() -> io::Result<()> {
ares::serialization::use_gdb();
ares::snapshot::use_gdb();
ares::snapshot::double_jam::use_gdb();
ares::snapshot::pma::use_gdb();
}
if filename == "serf" {

View File

@ -1,5 +1,6 @@
use crate::assert_acyclic;
use crate::noun::{Atom, Cell, CellMemory, IndirectAtom, Noun, NounAllocator};
use crate::snapshot::pma::{pma_in_arena, pma_malloc};
use either::Either::{self, Left, Right};
use ibig::Stack;
use libc::{c_void, memcmp};
@ -23,7 +24,7 @@ fn indirect_raw_size(atom: IndirectAtom) -> usize {
}
/** Which side of the two opposing stacks are we working on? */
#[derive(Copy, Clone)]
#[derive(Copy, Clone, PartialEq)]
pub enum Polarity {
/** Stack growing down from high memory */
East,
@ -585,6 +586,80 @@ impl NockStack {
assert_acyclic!(*noun);
}
/** Copy out to the PMA
*
* See copy_east/west for inline comments
*/
pub unsafe fn copy_pma(&mut self, noun: &mut Noun) {
assert!(self.polarity == Polarity::West);
let work_start = self.stack_pointer;
self.stack_pointer = self.stack_pointer.add(2);
*(self.stack_pointer.sub(2) as *mut Noun) = *noun;
*(self.stack_pointer.sub(1) as *mut *mut Noun) = noun as *mut Noun;
loop {
if self.stack_pointer == work_start {
break;
}
let next_noun = *(self.stack_pointer.sub(2) as *const Noun);
let next_dest = *(self.stack_pointer.sub(1) as *const *mut Noun);
self.stack_pointer = self.stack_pointer.sub(2);
match next_noun.as_either_direct_allocated() {
Either::Left(_direct) => {
*next_dest = next_noun;
}
Either::Right(allocated) => match allocated.forwarding_pointer() {
Option::Some(new_allocated) => {
*next_dest = new_allocated.as_noun();
}
Option::None => {
if pma_in_arena(allocated.to_raw_pointer()) {
*next_dest = allocated.as_noun();
} else {
match allocated.as_either() {
Either::Left(mut indirect) => {
let new_indirect_alloc =
pma_malloc(indirect_raw_size(indirect));
copy_nonoverlapping(
indirect.to_raw_pointer(),
new_indirect_alloc,
indirect_raw_size(indirect),
);
indirect.set_forwarding_pointer(new_indirect_alloc);
*next_dest = IndirectAtom::from_raw_pointer(new_indirect_alloc)
.as_noun();
}
Either::Right(mut cell) => {
let new_cell_alloc: *mut CellMemory =
pma_malloc(word_size_of::<CellMemory>());
(*new_cell_alloc).metadata = (*cell.to_raw_pointer()).metadata;
*(self.stack_pointer as *mut Noun) = cell.tail();
*(self.stack_pointer.add(1) as *mut *mut Noun) =
&mut (*new_cell_alloc).tail;
*(self.stack_pointer.add(2) as *mut Noun) = cell.head();
*(self.stack_pointer.add(3) as *mut *mut Noun) =
&mut (*new_cell_alloc).head;
self.stack_pointer = self.stack_pointer.add(4);
cell.set_forwarding_pointer(new_cell_alloc);
*next_dest = Cell::from_raw_pointer(new_cell_alloc).as_noun();
}
}
}
}
},
}
}
assert_acyclic!(*noun);
}
pub fn frame_size(&self) -> usize {
match self.polarity {
Polarity::East => self.frame_pointer as usize - self.stack_pointer as usize,

View File

@ -842,6 +842,10 @@ impl Noun {
self.raw == other.raw
}
pub unsafe fn as_raw(&self) -> u64 {
self.raw
}
pub unsafe fn from_raw(raw: u64) -> Noun {
Noun { raw: raw }
}

View File

@ -1,89 +0,0 @@
use libc::{c_char, c_int, c_void, size_t};
use std::ffi::CString;
use std::path::Path;
mod raw {
use super::*;
#[link(name = "pma_malloc", kind = "static")]
extern "C" {
pub(super) fn pma_init(path: *const c_char) -> c_int;
pub(super) fn pma_load(path: *const c_char) -> c_int;
pub(super) fn pma_close(epoch: u64, event: u64) -> c_int;
pub(super) fn pma_malloc(size: size_t) -> *mut c_void;
pub(super) fn pma_free(ptr: *mut c_void) -> c_int;
pub(super) fn pma_sync(epoch: u64, event: u64) -> c_int;
}
}
pub unsafe fn pma_init<P: AsRef<Path>>(path: P) -> i32 {
let path = CString::new(path.as_ref().to_str().unwrap()).unwrap();
raw::pma_init(path.as_ptr()) as i32
}
pub unsafe fn pma_load<P: AsRef<Path>>(path: P) -> i32 {
let path = CString::new(path.as_ref().to_str().unwrap()).unwrap();
raw::pma_load(path.as_ptr()) as i32
}
pub unsafe fn pma_close(epoch: u64, event: u64) -> i32 {
raw::pma_close(epoch, event) as i32
}
pub unsafe fn pma_malloc(size: usize) -> *mut c_void {
raw::pma_malloc(size as size_t)
}
pub unsafe fn pma_free(ptr: *mut c_void) -> i32 {
raw::pma_free(ptr) as i32
}
pub unsafe fn pma_sync(epoch: u64, event: u64) -> i32 {
raw::pma_sync(epoch, event) as i32
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pma() {
let path = "/tmp/ares_pma_test";
if let Err(err) = std::fs::remove_dir_all(path) {
if err.kind() != std::io::ErrorKind::NotFound {
panic!("failed to remove dir: {}", err);
}
}
unsafe {
pma_init(path);
let eight = pma_malloc(8) as *mut u64;
*eight = 0xdeadbeef;
assert!(0 == pma_close(10, 12));
pma_load(path);
assert_eq!(0, pma_sync(13, 15));
let _ = pma_malloc(8) as *mut u64;
assert_eq!(0, pma_sync(14, 16));
let _ = pma_malloc(8) as *mut u64;
assert_eq!(0, pma_sync(15, 16));
let twenty = pma_malloc(8) as *mut u64;
pma_free(twenty as *mut c_void);
assert_eq!(0, pma_sync(16, 15));
let _ = pma_malloc(8) as *mut u64;
assert_eq!(0, pma_sync(17, 15));
let _ = pma_malloc(8) as *mut u64;
assert_eq!(0, pma_sync(18, 15));
let _ = pma_malloc(8) as *mut u64;
let twenty = pma_malloc(8) as *mut u64;
*twenty = 0xcafebabe;
pma_free(twenty as *mut c_void);
pma_close(123, 124);
}
if let Err(err) = std::fs::remove_dir_all(path) {
if err.kind() != std::io::ErrorKind::NotFound {
panic!("failed to remove dir: {}", err);
}
}
}
}

View File

@ -3,12 +3,13 @@ use crate::mem::NockStack;
use crate::mug::mug_u32;
use crate::newt::Newt;
use crate::noun::{Noun, D, T};
use crate::snapshot::double_jam::DoubleJam;
use crate::snapshot::Snapshot;
use crate::snapshot::{self, Snapshot};
use ares_macros::tas;
use std::fs::create_dir_all;
use std::io;
use std::path::PathBuf;
use std::thread::sleep;
use std::time;
crate::gdb!();
@ -24,6 +25,7 @@ const WISH_AXIS: u64 = 10;
* u3_lord_init in vere to point at this binary and start vere like normal.
*/
pub fn serf() -> io::Result<()> {
sleep(time::Duration::from_secs(0));
let snap_path_string = std::env::args()
.nth(2)
.ok_or(io::Error::new(io::ErrorKind::Other, "no pier path"))?;
@ -31,7 +33,9 @@ pub fn serf() -> io::Result<()> {
snap_path.push(".urb");
snap_path.push("chk");
create_dir_all(&snap_path)?;
let ref mut snap = DoubleJam::new(snap_path);
// PMA is currently limited to ~650KB, use DoubleJam for anything bigger
// let ref mut snap = snapshot::double_jam::DoubleJam::new(snap_path);
let ref mut snap = snapshot::pma::Pma::new(snap_path);
let ref mut stack = NockStack::new(96 << 10 << 10, 0);
let ref mut newt = Newt::new();
@ -102,6 +106,8 @@ pub fn serf() -> io::Result<()> {
break;
}
}
snap.save(stack, &mut arvo);
newt.play_done(stack, 0);
}
tas!(b"work") => {
@ -109,7 +115,7 @@ pub fn serf() -> io::Result<()> {
let res = slam(stack, newt, arvo, POKE_AXIS, ovo).as_cell().unwrap();
let fec = res.head();
arvo = res.tail();
snap.save(stack, arvo);
snap.save(stack, &mut arvo);
event_number += 1;

View File

@ -2,11 +2,12 @@ use crate::mem::NockStack;
use crate::noun::Noun;
pub mod double_jam;
pub mod pma;
crate::gdb!();
pub trait Snapshot {
fn save(&mut self, stack: &mut NockStack, noun: Noun);
fn sync(&mut self, stack: &mut NockStack, epoch: u64, event: u64);
fn load(&mut self, stack: &mut NockStack) -> std::io::Result<(u64, u64, Noun)>;
fn save(&mut self, stack: &mut NockStack, noun: &mut Noun);
fn sync(&mut self, stack: &mut NockStack, epoch: u64, event: u64);
}

View File

@ -21,8 +21,7 @@ use memmap::MmapMut;
use std::fs::{File, OpenOptions};
use std::io;
use std::mem;
use std::path::Path;
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use std::ptr::copy_nonoverlapping;
use std::ptr::write_bytes;
@ -99,8 +98,19 @@ impl DoubleJam {
}
impl Snapshot for DoubleJam {
fn save(&mut self, _stack: &mut NockStack, noun: Noun) {
self.noun = noun;
fn load(&mut self, stack: &mut NockStack) -> io::Result<(u64, u64, Noun)> {
let (_num, event_number, state) = self.latest_snapshot(stack)?;
let jammed_arvo =
unsafe { IndirectAtom::new_raw(stack, state.size() - 1, state.data_pointer().add(1)) };
let arvo = cue(stack, jammed_arvo.as_atom());
Ok((0, event_number, arvo))
}
fn save(&mut self, _stack: &mut NockStack, noun: &mut Noun) {
self.noun = *noun;
}
fn sync(&mut self, stack: &mut NockStack, _epoch: u64, event_number: u64) {
@ -157,15 +167,4 @@ impl Snapshot for DoubleJam {
f.sync_data().unwrap();
};
}
fn load(&mut self, stack: &mut NockStack) -> io::Result<(u64, u64, Noun)> {
let (_num, event_number, state) = self.latest_snapshot(stack)?;
let jammed_arvo =
unsafe { IndirectAtom::new_raw(stack, state.size() - 1, state.data_pointer().add(1)) };
let arvo = cue(stack, jammed_arvo.as_atom());
Ok((0, event_number, arvo))
}
}

View File

@ -0,0 +1,157 @@
use super::Snapshot;
use crate::mem::NockStack;
use crate::mug::mug_u32;
use crate::noun::{Noun, D};
use libc::{c_char, c_int, c_void, size_t};
use std::ffi::CString;
use std::path::{Path, PathBuf};
crate::gdb!();
mod raw {
use super::*;
#[repr(C)]
pub struct RootState {
pub epoch: u64,
pub event: u64,
pub root: u64,
}
#[link(name = "pma_malloc", kind = "static")]
extern "C" {
pub(super) fn pma_init(path: *const c_char) -> c_int;
pub(super) fn pma_load(path: *const c_char) -> RootState;
pub(super) fn pma_close(epoch: u64, event: u64, root: u64) -> c_int;
pub(super) fn pma_malloc(size: size_t) -> *mut c_void;
pub(super) fn pma_free(ptr: *mut c_void) -> c_int;
pub(super) fn pma_sync(epoch: u64, event: u64, root: u64) -> c_int;
pub(super) fn pma_in_arena(ptr: *const c_void) -> bool;
}
}
unsafe fn pma_init<P: AsRef<Path>>(path: P) -> i32 {
let path = CString::new(path.as_ref().to_str().unwrap()).unwrap();
raw::pma_init(path.as_ptr()) as i32
}
unsafe fn pma_load<P: AsRef<Path>>(path: P) -> (u64, u64, Noun) {
let path = CString::new(path.as_ref().to_str().unwrap()).unwrap();
let rs = raw::pma_load(path.as_ptr());
(rs.epoch, rs.event, Noun::from_raw(rs.root as u64))
}
#[allow(dead_code)]
unsafe fn pma_close(epoch: u64, event: u64, root: Noun) -> i32 {
raw::pma_close(epoch, event, root.as_raw()) as i32
}
/** Allocate a block of memory from the persistent memory arena.
*
* Size is in *words*, unlike the underlying pma_malloc.
*/
pub fn pma_malloc<T>(size: usize) -> *mut T {
unsafe { raw::pma_malloc(size << 3 as size_t) as *mut T }
}
#[allow(dead_code)]
unsafe fn pma_free<T>(ptr: *mut T) -> i32 {
raw::pma_free(ptr as *mut c_void) as i32
}
unsafe fn pma_sync(epoch: u64, event: u64, root: Noun) -> i32 {
raw::pma_sync(epoch, event, root.as_raw()) as i32
}
pub fn pma_in_arena<T>(ptr: *const T) -> bool {
unsafe { raw::pma_in_arena(ptr as *const c_void) }
}
pub struct Pma {
path: PathBuf,
noun: Noun,
}
impl Pma {
pub fn new<P: AsRef<Path>>(path: P) -> Self {
let path = path.as_ref().to_path_buf();
Self { path, noun: D(0) }
}
}
impl Snapshot for Pma {
fn save(&mut self, stack: &mut NockStack, noun: &mut Noun) {
// Required so everything in the PMA has a cached mug, otherwise we would try to write
let _mug = mug_u32(stack, *noun);
unsafe { stack.copy_pma(noun) };
self.noun = *noun;
}
fn sync(&mut self, _stack: &mut NockStack, epoch: u64, event: u64) {
unsafe {
pma_sync(epoch, event, self.noun);
}
}
fn load(&mut self, _stack: &mut NockStack) -> std::io::Result<(u64, u64, Noun)> {
let path = self.path.join(".bin/page.bin");
if path.is_file() {
eprintln!("\rload: found snapshot at {:?}", path);
unsafe { Ok(pma_load(&self.path)) }
} else {
eprintln!("\rload: creating snapshot at {:?}", path);
unsafe { pma_init(&self.path) };
Ok((0, 0, D(0)))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::noun::IndirectAtom;
#[test]
fn test_pma() {
let path = "/tmp/ares_pma_test";
if let Err(err) = std::fs::remove_dir_all(path) {
if err.kind() != std::io::ErrorKind::NotFound {
panic!("failed to remove dir: {}", err);
}
}
unsafe {
pma_init(path);
let ref mut stack = NockStack::new(8 << 10 << 10, 0);
let root = IndirectAtom::new_raw(stack, 1, &0xffff_ffff_ffff_ffff).as_noun();
let eight = pma_malloc(8) as *mut u64;
*eight = 0xdeadbeef;
assert!(0 == pma_close(10, 12, root));
pma_load(path);
assert_eq!(0, pma_sync(13, 15, root));
let _ = pma_malloc(8) as *mut u64;
assert_eq!(0, pma_sync(14, 16, root));
let _ = pma_malloc(8) as *mut u64;
assert_eq!(0, pma_sync(15, 16, root));
let twenty = pma_malloc(8) as *mut u64;
pma_free(twenty as *mut c_void);
assert_eq!(0, pma_sync(16, 15, root));
let _ = pma_malloc(8) as *mut u64;
assert_eq!(0, pma_sync(17, 15, root));
let _ = pma_malloc(8) as *mut u64;
assert_eq!(0, pma_sync(18, 15, root));
let _ = pma_malloc(8) as *mut u64;
let twenty = pma_malloc(8) as *mut u64;
*twenty = 0xcafebabe;
pma_free(twenty as *mut c_void);
pma_close(123, 124, root);
}
if let Err(err) = std::fs::remove_dir_all(path) {
if err.kind() != std::io::ErrorKind::NotFound {
panic!("failed to remove dir: {}", err);
}
}
}
}

View File

@ -168,11 +168,11 @@
/**
*
* (4096 - (20 * 8)) / 24 = 164
* (4096 - (21 * 8)) / 24 = 163
*
* TODO
*/
#define PMA_DIRTY_PAGE_LIMIT 164
#define PMA_DIRTY_PAGE_LIMIT 163
/**
* Default settings for new PMA backing files
@ -359,6 +359,7 @@ typedef struct _pma_metadata_t {
uint32_t version; // Version of Vere (New Mars?) used to produce the backing file
uint64_t epoch; // Epoch ID of the most recently processed event
uint64_t event; // ID of the most recently processed event
uint64_t root; // Root after most recent event
void *arena_start; // Beginning of mapped address space
void *arena_end; // End of mapped address space (first address beyond mapped range)
SharedPageHeader *shared_pages[PMA_MAX_SHARED_SHIFT]; // Shared allocation pages
@ -366,7 +367,8 @@ typedef struct _pma_metadata_t {
uint64_t snapshot_size; // TODO
uint64_t next_offset; // TODO
uint8_t num_dirty_pages; // TODO
DirtyPageEntry dirty_pages[PMA_DIRTY_PAGE_LIMIT]; // Array of changes not yet synced to page directory (exactly 164)
uint64_t padding[2]; // sizeof(metadata) must be PMA_PAGE_SIZE
DirtyPageEntry dirty_pages[PMA_DIRTY_PAGE_LIMIT]; // Array of changes not yet synced to page directory (exactly 163)
} Metadata;
/**
@ -571,6 +573,7 @@ pma_init(const char *path) {
_pma_state->metadata.version = PMA_DATA_VERSION;
_pma_state->metadata.epoch = 0;
_pma_state->metadata.event = 0;
_pma_state->metadata.root = 0;
// Initialize shared pages stacks
for(uint8_t i = 0; i < PMA_MAX_SHARED_SHIFT; ++i) {
@ -622,6 +625,7 @@ pma_init(const char *path) {
// First page used by dpage cache
_pma_state->page_directory.entries[0].status = FIRST;
_pma_state->page_directory.entries[0].offset = meta_bytes;
_pma_state->metadata.next_offset += PMA_PAGE_SIZE;
//
// Setup transient state
@ -700,7 +704,7 @@ init_error:
exit(err);
}
int
RootState
pma_load(const char *path) {
Metadata *newer_page;
Metadata *older_page;
@ -720,7 +724,7 @@ pma_load(const char *path) {
// Only init once
if (_pma_state != NULL) {
return 0;
return (RootState){0, 0, 0};
}
// Precompute metadata and page directory sizes in bytes
@ -867,7 +871,7 @@ pma_load(const char *path) {
(PMA_PAGE_SIZE * count),
PROT_READ,
MAP_SHARED | MAP_FIXED_NOREPLACE,
page_dir_fd,
snapshot_fd,
_pma_state->page_directory.entries[index - count].offset);
continue;
@ -879,7 +883,7 @@ pma_load(const char *path) {
PMA_PAGE_SIZE,
PROT_READ,
MAP_SHARED | MAP_FIXED_NOREPLACE,
page_dir_fd,
snapshot_fd,
_pma_state->page_directory.entries[index].offset);
if (address == MAP_FAILED) LOAD_ERROR;
@ -904,7 +908,7 @@ pma_load(const char *path) {
(count * PMA_PAGE_SIZE),
PROT_READ,
MAP_SHARED | MAP_FIXED_NOREPLACE,
page_dir_fd,
snapshot_fd,
_pma_state->page_directory.entries[index - count].offset);
if (address == MAP_FAILED) LOAD_ERROR;
@ -933,7 +937,11 @@ pma_load(const char *path) {
munmap(meta_pages, meta_bytes);
free((void*)filepath);
return 0;
return (RootState){
_pma_state->metadata.epoch,
_pma_state->metadata.event,
_pma_state->metadata.root
};
load_error:
err = errno;
@ -950,9 +958,9 @@ load_error:
}
int
pma_close(uint64_t epoch, uint64_t event) {
pma_close(uint64_t epoch, uint64_t event, uint64_t root) {
// Sync changes to disk
if (pma_sync(epoch, event)) {
if (pma_sync(epoch, event, root)) {
return -1;
}
@ -1018,7 +1026,7 @@ pma_free(void *address) {
}
int
pma_sync(uint64_t epoch, uint64_t event) {
pma_sync(uint64_t epoch, uint64_t event, uint64_t root) {
DPageCache *dpage_cache = _pma_state->metadata.dpage_cache;
ssize_t bytes_out;
int err;
@ -1056,6 +1064,7 @@ pma_sync(uint64_t epoch, uint64_t event) {
// Compute checksum
_pma_state->metadata.epoch = epoch;
_pma_state->metadata.event = event;
_pma_state->metadata.root = root;
_pma_state->metadata.checksum = 0;
_pma_state->metadata.checksum = crc_32(
(const unsigned char *)(&(_pma_state->metadata)),
@ -1092,6 +1101,12 @@ sync_error:
return -1;
}
bool
pma_in_arena(void* address) {
return (address >= _pma_state->metadata.arena_start)
&& (address < _pma_state->metadata.arena_end);
}
//==============================================================================
// PRIVATE FUNCTIONS
//==============================================================================
@ -1327,7 +1342,7 @@ _pma_malloc_shared_page(uint8_t bucket)
// Initialize header for shared page
shared_page->dirty = 1;
shared_page->size = (bucket + 1);
shared_page->free = ((PMA_PAGE_SIZE - sizeof(SharedPageHeader)) / (1 << bucket));
shared_page->free = ((PMA_PAGE_SIZE - sizeof(SharedPageHeader)) / (1 << (bucket + 1)));
for (uint8_t i = 0; i < PMA_BITMAP_SIZE; ++i) {
shared_page->bits[i] = PMA_EMPTY_BITMAP;
}
@ -1681,6 +1696,7 @@ _pma_get_cached_dpage(void) {
// Pop page off queue
uint16_t head = _pma_state->metadata.dpage_cache->head;
offset = _pma_state->metadata.dpage_cache->queue[head];
assert(offset != 0);
_pma_state->metadata.dpage_cache->size -= 1;
_pma_state->metadata.dpage_cache->head = ((head + 1) % PMA_DPAGE_CACHE_SIZE);
@ -1696,7 +1712,6 @@ _pma_copy_dpage_cache(void) {
uint64_t offset;
uint16_t dirty = _pma_state->metadata.dpage_cache->dirty;
uint16_t size = _pma_state->metadata.dpage_cache->size;
uint16_t head = _pma_state->metadata.dpage_cache->head;
// Sanity check
// TODO: throw warning?
@ -1707,14 +1722,14 @@ _pma_copy_dpage_cache(void) {
// If pages available in cache...
if (size) {
// Use a page from the cache and record that it was used afterwards
uint16_t head = _pma_state->metadata.dpage_cache->head;
offset = _pma_state->metadata.dpage_cache->queue[head];
assert(offset != 0);
_pma_copy_page(address, offset, FIRST, _pma_state->snapshot_fd);
_pma_state->metadata.dpage_cache->size -= 1;
_pma_state->metadata.dpage_cache->head = ((head + 1) % PMA_DPAGE_CACHE_SIZE);
_pma_state->metadata.dpage_cache->head = (_pma_state->metadata.dpage_cache->head + 1) % PMA_DPAGE_CACHE_SIZE;
} else {
// Otherwise, get a brand new page from disk
offset = _pma_get_disk_dpage();
@ -1785,7 +1800,7 @@ void
_pma_mark_page_dirty(uint64_t index, uint64_t offset, PageStatus status, uint32_t num_pages) {
DirtyPageEntry *dirty_page = (DirtyPageEntry *)_pma_state->metadata.dirty_pages;
// TODO: check for dirty page overflow
assert(_pma_state->metadata.num_dirty_pages < PMA_DIRTY_PAGE_LIMIT);
dirty_page += _pma_state->metadata.num_dirty_pages++;
dirty_page->index = index;

View File

@ -4,6 +4,7 @@
#pragma once
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
@ -11,6 +12,15 @@
// PROTOTYPES
//==============================================================================
/**
* Struct returned from pma_load()
*/
typedef struct _pma_root_state_t {
uint64_t epoch; // Epoch ID of the most recently processed event
uint64_t event; // ID of the most recently processed event
uint64_t root; // Root after most recent event
} RootState;
/**
* Initialize a brand new PMA environment and event snapshot
*
@ -25,14 +35,14 @@ pma_init(const char *path);
/**
* TODO
*/
int
RootState
pma_load(const char *path);
/**
* TODO
*/
int
pma_close(uint64_t epoch, uint64_t event);
pma_close(uint64_t epoch, uint64_t event, uint64_t root);
/**
* Allocate a new block of memory in the PMA
@ -59,4 +69,10 @@ pma_free(void *address);
* TODO
*/
int
pma_sync(uint64_t epoch, uint64_t event);
pma_sync(uint64_t epoch, uint64_t event, uint64_t root);
/**
* True if the address is in the PMA
*/
bool
pma_in_arena(void *address);