philip's pma changes inter alia

This commit is contained in:
barter-simsum 2023-05-09 11:31:59 -04:00
parent 69a0cca983
commit 15c4af135c
11 changed files with 365 additions and 74 deletions

7
rust/ares/Cargo.lock generated
View File

@ -15,6 +15,7 @@ dependencies = [
"ares_macros",
"assert_no_alloc",
"bitvec",
"cc",
"criterion",
"either",
"ibig",
@ -88,6 +89,12 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cc"
version = "1.0.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
[[package]]
name = "cfg-if"
version = "1.0.0"

View File

@ -24,6 +24,9 @@ static_assertions = "1.1.0"
ibig = "0.3.6"
assert_no_alloc = "1.1.2"
[build-dependencies]
cc = "1.0.79"
[[bin]]
name = "ares"
path = "src/main.rs"

9
rust/ares/build.rs Normal file
View File

@ -0,0 +1,9 @@
fn main() {
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=./src/pma");
cc::Build::new()
.file("./src/pma/malloc.c")
.file("./src/pma/includes/checksum.c")
.opt_level(3)
.compile("pma_malloc");
}

View File

@ -28,6 +28,7 @@ fn main() -> io::Result<()> {
ares::serialization::use_gdb();
ares::snapshot::use_gdb();
ares::snapshot::double_jam::use_gdb();
ares::snapshot::pma::use_gdb();
}
if filename == "serf" {

View File

@ -1,5 +1,6 @@
use crate::assert_acyclic;
use crate::noun::{Atom, Cell, CellMemory, IndirectAtom, Noun, NounAllocator};
use crate::snapshot::pma::{pma_in_arena, pma_malloc};
use either::Either::{self, Left, Right};
use ibig::Stack;
use libc::{c_void, memcmp};
@ -585,6 +586,80 @@ impl NockStack {
assert_acyclic!(*noun);
}
/** Copy out to the PMA
*
* See copy_east/west for inline comments
*/
pub unsafe fn copy_pma(&mut self, noun: &mut Noun) {
assert!(self.polarity == Polarity::West);
let work_start = self.stack_pointer;
self.stack_pointer = self.stack_pointer.add(2);
*(self.stack_pointer.sub(2) as *mut Noun) = *noun;
*(self.stack_pointer.sub(1) as *mut *mut Noun) = noun as *mut Noun;
loop {
if self.stack_pointer == work_start {
break;
}
let next_noun = *(self.stack_pointer.sub(2) as *const Noun);
let next_dest = *(self.stack_pointer.sub(1) as *const *mut Noun);
self.stack_pointer = self.stack_pointer.sub(2);
match next_noun.as_either_direct_allocated() {
Either::Left(_direct) => {
*next_dest = next_noun;
}
Either::Right(allocated) => match allocated.forwarding_pointer() {
Option::Some(new_allocated) => {
*next_dest = new_allocated.as_noun();
}
Option::None => {
if pma_in_arena(allocated.to_raw_pointer()) {
*next_dest = allocated.as_noun();
} else {
match allocated.as_either() {
Either::Left(mut indirect) => {
let new_indirect_alloc =
pma_malloc(indirect_raw_size(indirect));
copy_nonoverlapping(
indirect.to_raw_pointer(),
new_indirect_alloc,
indirect_raw_size(indirect),
);
indirect.set_forwarding_pointer(new_indirect_alloc);
*next_dest = IndirectAtom::from_raw_pointer(new_indirect_alloc)
.as_noun();
}
Either::Right(mut cell) => {
let new_cell_alloc: *mut CellMemory =
pma_malloc(word_size_of::<CellMemory>());
(*new_cell_alloc).metadata = (*cell.to_raw_pointer()).metadata;
*(self.stack_pointer as *mut Noun) = cell.tail();
*(self.stack_pointer.add(1) as *mut *mut Noun) =
&mut (*new_cell_alloc).tail;
*(self.stack_pointer.add(2) as *mut Noun) = cell.head();
*(self.stack_pointer.add(3) as *mut *mut Noun) =
&mut (*new_cell_alloc).head;
self.stack_pointer = self.stack_pointer.add(4);
cell.set_forwarding_pointer(new_cell_alloc);
*next_dest = Cell::from_raw_pointer(new_cell_alloc).as_noun();
}
}
}
}
},
}
}
assert_acyclic!(*noun);
}
pub fn frame_size(&self) -> usize {
match self.polarity {
Polarity::East => self.frame_pointer as usize - self.stack_pointer as usize,

View File

@ -8,7 +8,7 @@ LIB_CFLAGS := -D_GNU_SOURCE
DEV_CFLAGS := -Wall -Wextra -Wpedantic -Wformat=2 -Wno-unused-parameter \
-Wshadow -Wwrite-strings -Wstrict-prototypes \
-Wold-style-definition -Wredundant-decls -Wnested-externs \
-Wmissing-include-dirs -Og
-Wmissing-include-dirs -Og -g3
# Core sources
SRC_DIR := ./
@ -58,7 +58,7 @@ default : help
# target: help - Display all targets in makefile
#
help :
@egrep "^# target:" makefile
@grep -E "^# target:" makefile
# Run sanity check
#

View File

@ -100,12 +100,12 @@
/**
* Convert pointer to index in page directory
*/
#define PTR_TO_INDEX(foo) ((((uint64_t)foo) - ((uint64_t)_pma_state->metadata->arena_start)) >> PMA_PAGE_SHIFT)
#define PTR_TO_INDEX(foo) ((((uint64_t)(foo)) - ((uint64_t)_pma_state->metadata->arena_start)) >> PMA_PAGE_SHIFT)
/**
* Convert index in page directory to pointer
*/
#define INDEX_TO_PTR(foo) (void *)((char *)_pma_state->metadata->arena_start + (foo * PMA_PAGE_SIZE))
#define INDEX_TO_PTR(foo) (void *)((char *)_pma_state->metadata->arena_start + ((foo) * PMA_PAGE_SIZE))
/**
* Flags to use for all mmap operations, excluding initial metadata page mapping
@ -148,7 +148,7 @@
*
* 511 for 4 KiB page
*/
#define PMA_DPAGE_CACHE_SIZE ((PMA_PAGE_SIZE - sizeof(DPageCache)) / sizeof(uint64_t))
#define PMA_DPAGE_CACHE_SIZE ((PMA_PAGE_SIZE - sizeof(struct _pma_free_dpage_cache_t)) / sizeof(uint64_t))
/**
* Max number of dirty page entries that can be stored in the extra space of the
@ -158,7 +158,8 @@
*
* 164 for 4 KiB page
*/
#define PMA_DIRTY_PAGE_LIMIT ((PMA_PAGE_SIZE - sizeof(Metadata)) / sizeof(DirtyPageEntry))
/* #define PMA_DIRTY_PAGE_LIMIT ((PMA_PAGE_SIZE - sizeof(struct _pma_metadata_t)) / sizeof(DirtyPageEntry)) */
#define PMA_DIRTY_PAGE_LIMIT 163
/**
* Default settings for new PMA backing files
@ -207,6 +208,8 @@
// HELPER MACROS
//==============================================================================
/* TODO: these should just be funlike macros. The "save line" and goto is
unnecessary */
/**
* Log error and return failure during new PMA bootstrap
*/
@ -380,6 +383,7 @@ typedef struct _pma_metadata_t {
uint32_t version; // Version of Vere (New Mars?) used to produce the backing file
uint64_t epoch; // Epoch ID of the most recently processed event
uint64_t event; // ID of the most recently processed event
uint64_t root; // Root after most recent event
void *arena_start; // Beginning of mapped address space
void *arena_end; // End of mapped address space (first address beyond mapped range)
SharedPageHeader *shared_pages[PMA_MAX_SHARED_SHIFT]; // Shared allocation pages
@ -387,8 +391,10 @@ typedef struct _pma_metadata_t {
uint64_t snapshot_size; // Size of the backing file
uint64_t next_offset; // Next open dpage in the backing file
uint8_t num_dirty_pages; // Counter of dirty page entries
DirtyPageEntry dirty_pages[]; // Queue of changes not yet synced to page directory
uint64_t padding[2]; // sizeof(Metadata) must be PMA_PAGE_SIZE
DirtyPageEntry dirty_pages[PMA_DIRTY_PAGE_LIMIT]; // Queue of changes not yet synced to page directory
} Metadata;
static_assert(sizeof(Metadata) == PMA_PAGE_SIZE);
/**
* Struct containing global data used by PMA
@ -455,8 +461,8 @@ int
pma_init(const char *path) {
DIR *dir;
char *filepath;
void *meta_pages;
void *page_dir;
void *meta_pages = NULL;
void *page_dir = NULL;
uint64_t meta_bytes;
int err;
int err_line;
@ -601,6 +607,7 @@ pma_init(const char *path) {
_pma_state->metadata->version = PMA_DATA_VERSION;
_pma_state->metadata->epoch = 0;
_pma_state->metadata->event = 0;
_pma_state->metadata->root = 0;
// Initialize shared pages stacks
for(uint8_t i = 0; i < PMA_MAX_SHARED_SHIFT; ++i) {
@ -651,6 +658,8 @@ pma_init(const char *path) {
// First page used by dpage cache
_pma_state->page_directory.entries[0].status = FIRST;
_pma_state->page_directory.entries[0].offset = meta_bytes;
_pma_state->metadata->next_offset += PMA_PAGE_SIZE;
//
// Setup transient state
@ -679,10 +688,7 @@ pma_init(const char *path) {
if (err) INIT_ERROR;
// Sync page directory
err = msync(
(void *)_pma_state->page_directory.entries,
PMA_PAGE_SIZE,
MS_SYNC);
err = msync(_pma_state->page_directory.entries, PMA_PAGE_SIZE, MS_SYNC);
if (err) INIT_ERROR;
// Compute checksum for metadata
@ -695,10 +701,7 @@ pma_init(const char *path) {
meta_pages,
(const void *)(&(_pma_state->metadata)),
PMA_PAGE_SIZE);
memcpy(
(void *)((Metadata*)meta_pages + 1),
(const void *)(&(_pma_state->metadata)),
PMA_PAGE_SIZE);
memcpy((Metadata*)meta_pages + 1, &_pma_state->metadata, PMA_PAGE_SIZE);
if (msync(meta_pages, meta_bytes, MS_SYNC)) INIT_ERROR;
// Remove PROT_WRITE permissions from snapshot and page directory
@ -711,7 +714,7 @@ pma_init(const char *path) {
//
// Clean up
free((void*)filepath);
free(filepath);
munmap(meta_pages, meta_bytes);
return 0;
@ -719,23 +722,23 @@ pma_init(const char *path) {
init_error:
fprintf(stderr, "(L%d) PMA initialization error: %s\n", err_line, strerror(errno));
munmap(meta_pages, meta_bytes);
munmap(page_dir, PMA_INIT_DIR_SIZE);
if (meta_pages) munmap(meta_pages, meta_bytes);
if (page_dir) munmap(page_dir, PMA_INIT_DIR_SIZE);
if (snapshot_fd) close(snapshot_fd);
if (page_dir_fd) close(page_dir_fd);
free((void*)filepath);
free((void*)_pma_state);
free(filepath);
free(_pma_state);
return -1;
}
int
RootState
pma_load(const char *path) {
Metadata *newer_page;
Metadata *older_page;
char *filepath;
void *address;
void *meta_pages;
void *meta_pages = NULL;
uint64_t index;
uint64_t meta_bytes;
int err;
@ -749,7 +752,7 @@ pma_load(const char *path) {
// Only init once
if (_pma_state != NULL) {
return 0;
return (RootState){0, 0, 0};
}
// Precompute metadata and page directory sizes in bytes
@ -772,18 +775,20 @@ pma_load(const char *path) {
sprintf(filepath, "%s/%s/%s", path, PMA_DEFAULT_DIR_NAME, PMA_SNAPSHOT_FILENAME);
snapshot_fd = open(filepath, PMA_FILE_FLAGS, PMA_FILE_PERMISSIONS);
if (snapshot_fd == -1) LOAD_ERROR;
_pma_state->snapshot_fd = snapshot_fd;
// Open backing file for page directory
sprintf(filepath, "%s/%s/%s", path, PMA_DEFAULT_DIR_NAME, PMA_PAGE_DIR_FILENAME);
page_dir_fd = open(filepath, PMA_FILE_FLAGS, PMA_FILE_PERMISSIONS);
if (page_dir_fd == -1) LOAD_ERROR;
_pma_state->page_dir_fd = page_dir_fd;
//
// Verify file can be loaded
//
// Read magic code
err = read(snapshot_fd, (void*)(&_pma_state->metadata->magic_code), sizeof(uint64_t));
err = read(snapshot_fd, &_pma_state->metadata->magic_code, sizeof(uint64_t));
if ((err != -1) || (_pma_state->metadata->magic_code != PMA_MAGIC_CODE)) {
errno = EILSEQ;
LOAD_ERROR;
@ -803,8 +808,8 @@ pma_load(const char *path) {
newer_page = (Metadata*)meta_pages;
older_page = (Metadata*)((char*)meta_pages + PMA_PAGE_SIZE);
if (
(newer_page->epoch > older_page->epoch) ||
((newer_page->epoch == older_page->epoch) && (newer_page->event > older_page->event))) {
(newer_page->epoch < older_page->epoch) ||
((newer_page->epoch == older_page->epoch) && (newer_page->event < older_page->event))) {
newer_page = older_page;
older_page = (Metadata*)meta_pages;
}
@ -846,9 +851,14 @@ pma_load(const char *path) {
// Map pages and compute free page caches
//
index = 0;
while (1) {
// get total number of indices
struct stat st;
fstat(page_dir_fd, &st);
_pma_state->page_directory.size = (st.st_size / sizeof(PageDirEntry)) - 1;
index = 0;
while (index < _pma_state->page_directory.size) {
uint64_t count = 1;
switch (_pma_state->page_directory.entries[index].status) {
@ -868,7 +878,7 @@ pma_load(const char *path) {
// Add to appropriate free page cache
if (count == 1) {
SinglePageCache *free_page = (SinglePageCache *)malloc(sizeof(SinglePageCache));
SinglePageCache *free_page = malloc(sizeof(SinglePageCache));
// Add it to the single-page cache
free_page->next = _pma_state->free_pages;
@ -876,7 +886,7 @@ pma_load(const char *path) {
_pma_state->free_pages = free_page;
} else {
PageRunCache *page_run = (PageRunCache *)malloc(sizeof(SinglePageCache));
PageRunCache *page_run = malloc(sizeof(PageRunCache));
page_run->next = _pma_state->free_page_runs;
page_run->page = INDEX_TO_PTR(index - count);
@ -890,7 +900,7 @@ pma_load(const char *path) {
(PMA_PAGE_SIZE * count),
PROT_READ,
MAP_SHARED | MAP_FIXED_NOREPLACE,
page_dir_fd,
snapshot_fd,
_pma_state->page_directory.entries[index - count].offset);
continue;
@ -902,7 +912,7 @@ pma_load(const char *path) {
PMA_PAGE_SIZE,
PROT_READ,
MAP_SHARED | MAP_FIXED_NOREPLACE,
page_dir_fd,
snapshot_fd,
_pma_state->page_directory.entries[index].offset);
if (address == MAP_FAILED) LOAD_ERROR;
@ -926,7 +936,7 @@ pma_load(const char *path) {
(count * PMA_PAGE_SIZE),
PROT_READ,
MAP_SHARED | MAP_FIXED_NOREPLACE,
page_dir_fd,
snapshot_fd,
_pma_state->page_directory.entries[index - count].offset);
if (address == MAP_FAILED) LOAD_ERROR;
@ -939,17 +949,11 @@ pma_load(const char *path) {
errno = EINVAL;
LOAD_ERROR;
}
}
// Get next free index
_pma_state->page_directory.next_index = index;
// Get total number of indices
fstat(page_dir_fd, &st);
_pma_state->page_directory.size = ((st.st_size / sizeof(PageDirEntry)) - 1);
break;
}
//
// Done
//
@ -958,28 +962,32 @@ pma_load(const char *path) {
// Clean up
munmap(meta_pages, meta_bytes);
free((void*)filepath);
free(filepath);
return 0;
return (RootState){
.epoch = _pma_state->metadata->epoch,
.event = _pma_state->metadata->event,
.root = _pma_state->metadata->root,
};
load_error:
fprintf(stderr, "(L%d) Error loading PMA from %s: %s\n", err_line, path, strerror(errno));
munmap(meta_pages, meta_bytes);
if (meta_pages) munmap(meta_pages, meta_bytes);
munmap(_pma_state->page_directory.entries, PMA_MAXIMUM_DIR_SIZE);
munmap(_pma_state->metadata->arena_start, ((uint64_t)_pma_state->metadata->arena_end - (uint64_t)_pma_state->metadata->arena_start));
if (snapshot_fd) close(snapshot_fd);
if (page_dir_fd) close(page_dir_fd);
free((void*)filepath);
free((void*)_pma_state);
free(filepath);
free(_pma_state);
return -1;
return (RootState){0};
}
int
pma_close(uint64_t epoch, uint64_t event) {
pma_close(uint64_t epoch, uint64_t event, uint64_t root) {
// Sync changes to disk
if (pma_sync(epoch, event)) {
if (pma_sync(epoch, event, root)) {
return -1;
}
@ -987,14 +995,17 @@ pma_close(uint64_t epoch, uint64_t event) {
munmap(_pma_state->page_directory.entries, PMA_MAXIMUM_DIR_SIZE);
// Unmap snapshot
// XX should just be end - start?
munmap(_pma_state->metadata->arena_start, _pma_state->metadata->snapshot_size);
// Close file descriptors
close(_pma_state->page_dir_fd);
close(_pma_state->snapshot_fd);
_pma_state = NULL;
// Free PMA state
free((void*)_pma_state);
free(_pma_state);
return 0;
}
@ -1075,7 +1086,7 @@ pma_free(void *address) {
}
int
pma_sync(uint64_t epoch, uint64_t event) {
pma_sync(uint64_t epoch, uint64_t event, uint64_t root) {
DPageCache *dpage_cache = _pma_state->metadata->dpage_cache;
ssize_t bytes_out;
int err;
@ -1114,6 +1125,7 @@ pma_sync(uint64_t epoch, uint64_t event) {
// Compute checksum
_pma_state->metadata->epoch = epoch;
_pma_state->metadata->event = event;
_pma_state->metadata->root = root;
_pma_state->metadata->checksum = 0;
_pma_state->metadata->checksum = crc_32(
(const unsigned char *)(&(_pma_state->metadata)),
@ -1151,6 +1163,12 @@ sync_error:
return -1;
}
bool
pma_in_arena(void *address) {
return (address >= _pma_state->metadata->arena_start)
&& (address < _pma_state->metadata->arena_end);
}
//==============================================================================
// PRIVATE FUNCTIONS
//==============================================================================
@ -1172,10 +1190,7 @@ _pma_verify_checksum(Metadata *meta_page) {
// Copy metadata in advance of using it, since: 1) we expect the checksum to
// be valid; 2) we need to set the value of the checksum in the metadata to 0.
memcpy(
(void*)(&(_pma_state->metadata)),
(const void *)meta_page,
PMA_PAGE_SIZE);
memcpy(&_pma_state->metadata, meta_page, PMA_PAGE_SIZE);
// Since we're computing the checksum on the object which itself includes the
// checksum, we treat the checksum as 0.
@ -1187,7 +1202,7 @@ _pma_verify_checksum(Metadata *meta_page) {
PMA_PAGE_SIZE);
// Compare checksums
return (checksum == _pma_state->metadata->checksum);
return (checksum == meta_page->checksum);
}
/**
@ -1327,7 +1342,7 @@ _pma_update_free_pages(uint8_t num_dirty_pages, DirtyPageEntry *dirty_pages) {
if (dirty_pages[i].status != FREE) continue;
if (dirty_pages[i].num_pages > 1) {
page_run = (PageRunCache *)malloc(sizeof(PageRunCache));
page_run = malloc(sizeof(PageRunCache));
if (page_run == NULL) return -1;
page_run->next = _pma_state->free_page_runs;
@ -1336,7 +1351,7 @@ _pma_update_free_pages(uint8_t num_dirty_pages, DirtyPageEntry *dirty_pages) {
_pma_state->free_page_runs = page_run;
} else {
free_page = (SinglePageCache *)malloc(sizeof(SinglePageCache));
free_page = malloc(sizeof(SinglePageCache));
if (free_page == NULL) return -1;
free_page->next = _pma_state->free_pages;
@ -1391,7 +1406,7 @@ _pma_malloc_bytes(size_t size)
shared_page = _pma_state->metadata->shared_pages[bucket];
} else {
if (_pma_copy_shared_page((void *)shared_page)) {
if (_pma_copy_shared_page(shared_page)) {
return NULL;
}
}
@ -1416,10 +1431,9 @@ _pma_malloc_bytes(size_t size)
--(shared_page->free);
// Return slot
return (void *)(
(char *)shared_page +
return (char *)shared_page +
(sizeof(SharedPageHeader)) +
(slot_size * ((PMA_BITMAP_BITS * byte) + bit)));
(slot_size * ((PMA_BITMAP_BITS * byte) + bit));
}
/**
@ -1508,7 +1522,7 @@ _pma_malloc_single_page(PageStatus status) {
if (free_page != NULL) {
address = free_page->page;
_pma_state->free_pages = free_page->next;
free((void *)free_page);
free(free_page);
// Make the page writeable
mprotect(address, PMA_PAGE_SIZE, (PROT_READ | PROT_WRITE));
@ -1612,7 +1626,7 @@ _pma_get_cached_pages(uint64_t num_pages) {
_pma_state->free_pages = trailing_page;
}
free((void *)valid_page_run);
free(valid_page_run);
}
// Make pages writeable
@ -1773,7 +1787,7 @@ _pma_free_bytes(void *address) {
uint8_t bit = slot % PMA_BITMAP_BITS;
// Copy-on-write
_pma_copy_shared_page((void *)header);
_pma_copy_shared_page(header);
if (header->bits[byte] & (1 << bit)) {
WARNING("bucketized address already free");
@ -1844,6 +1858,8 @@ _pma_get_single_dpage(void) {
offset = _pma_get_cached_dpage();
if (!offset) {
// Otherwise, get a new dpage from disk
//
// XX returns 0 on failure, should assert
offset = _pma_get_disk_dpage();
}
@ -1882,6 +1898,7 @@ _pma_get_cached_dpage(void) {
// TODO: macros for dealing with cache?
// Pop page off queue
offset = _pma_state->metadata->dpage_cache->queue[head];
assert(offset != 0);
_pma_state->metadata->dpage_cache->size -= 1;
_pma_state->metadata->dpage_cache->head = ((head + 1) % PMA_DPAGE_CACHE_SIZE);
@ -1909,12 +1926,13 @@ _pma_copy_dpage_cache(void) {
assert(!dirty);
address = (void *)_pma_state->metadata->dpage_cache;
address = _pma_state->metadata->dpage_cache;
// If pages available in cache...
if (size) {
// Use a page from the cache and record that it was used afterwards
offset = _pma_state->metadata->dpage_cache->queue[head];
assert(offset != 0);
_pma_copy_page(address, offset, FIRST, _pma_state->snapshot_fd);
@ -1992,8 +2010,10 @@ _pma_copy_page(void *address, uint64_t offset, PageStatus status, int fd) {
// Add previous dpage to cache
// Note: the dpage cache should always be writeable here, either because the dpage cache is the page we just copied,
// or because it was made writeable in advance by _pma_copy_shared_page
assert(_pma_state->page_directory.entries[index].offset != 0);
_pma_state->metadata->dpage_cache->queue[tail] = _pma_state->page_directory.entries[index].offset;
_pma_state->metadata->dpage_cache->tail = ((tail + 1) % PMA_DPAGE_CACHE_SIZE);
_pma_state->metadata->dpage_cache->size += 1;
// Add page to dirty page list
_pma_mark_page_dirty(index, offset, status, 1);

View File

@ -4,6 +4,7 @@
#pragma once
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
@ -11,6 +12,15 @@
// PROTOTYPES
//==============================================================================
/**
* Struct returned from pma_load()
*/
typedef struct _pma_root_state_t {
uint64_t epoch; // Epoch ID of the most recently processed event
uint64_t event; // ID of the most recently processed event
uint64_t root; // Root after most recent event
} RootState;
/**
* Initialize a brand new PMA environment and event snapshot
*
@ -32,7 +42,7 @@ pma_init(const char *path);
* @return 0 success
* @return -1 failure; errno set to error code
*/
int
RootState
pma_load(const char *path);
/**
@ -46,7 +56,7 @@ pma_load(const char *path);
* @return -1 failure; errno set to error code
*/
int
pma_close(uint64_t epoch, uint64_t event);
pma_close(uint64_t epoch, uint64_t event, uint64_t root);
/**
* Allocate a new block of memory in the PMA
@ -81,4 +91,10 @@ pma_free(void *address);
* @return -1 failure; errno set to error code
*/
int
pma_sync(uint64_t epoch, uint64_t event);
pma_sync(uint64_t epoch, uint64_t event, uint64_t root);
/**
* True if the address is in the PMA
*/
bool
pma_in_arena(void *address);

View File

@ -33,7 +33,9 @@ pub fn serf() -> io::Result<()> {
snap_path.push(".urb");
snap_path.push("chk");
create_dir_all(&snap_path)?;
let snap = &mut snapshot::double_jam::DoubleJam::new(snap_path);
// PMA is currently limited to ~650KB, use DoubleJam for anything bigger
// let ref mut snap = snapshot::double_jam::DoubleJam::new(snap_path);
let ref mut snap = snapshot::pma::Pma::new(snap_path);
let stack = &mut NockStack::new(96 << 10 << 10, 0);
let newt = &mut Newt::new();

View File

@ -2,6 +2,7 @@ use crate::mem::NockStack;
use crate::noun::Noun;
pub mod double_jam;
pub mod pma;
crate::gdb!();

View File

@ -0,0 +1,157 @@
use super::Snapshot;
use crate::mem::NockStack;
use crate::mug::mug_u32;
use crate::noun::{Noun, D};
use libc::{c_char, c_int, c_void, size_t};
use std::ffi::CString;
use std::path::{Path, PathBuf};
crate::gdb!();
mod raw {
use super::*;
#[repr(C)]
pub struct RootState {
pub epoch: u64,
pub event: u64,
pub root: u64,
}
#[link(name = "pma_malloc", kind = "static")]
extern "C" {
pub(super) fn pma_init(path: *const c_char) -> c_int;
pub(super) fn pma_load(path: *const c_char) -> RootState;
pub(super) fn pma_close(epoch: u64, event: u64, root: u64) -> c_int;
pub(super) fn pma_malloc(size: size_t) -> *mut c_void;
pub(super) fn pma_free(ptr: *mut c_void) -> c_int;
pub(super) fn pma_sync(epoch: u64, event: u64, root: u64) -> c_int;
pub(super) fn pma_in_arena(ptr: *const c_void) -> bool;
}
}
unsafe fn pma_init<P: AsRef<Path>>(path: P) -> i32 {
let path = CString::new(path.as_ref().to_str().unwrap()).unwrap();
raw::pma_init(path.as_ptr()) as i32
}
unsafe fn pma_load<P: AsRef<Path>>(path: P) -> (u64, u64, Noun) {
let path = CString::new(path.as_ref().to_str().unwrap()).unwrap();
let rs = raw::pma_load(path.as_ptr());
(rs.epoch, rs.event, Noun::from_raw(rs.root as u64))
}
#[allow(dead_code)]
unsafe fn pma_close(epoch: u64, event: u64, root: Noun) -> i32 {
raw::pma_close(epoch, event, root.as_raw()) as i32
}
/** Allocate a block of memory from the persistent memory arena.
*
* Size is in *words*, unlike the underlying pma_malloc.
*/
pub fn pma_malloc<T>(size: usize) -> *mut T {
unsafe { raw::pma_malloc(size << 3 as size_t) as *mut T }
}
#[allow(dead_code)]
unsafe fn pma_free<T>(ptr: *mut T) -> i32 {
raw::pma_free(ptr as *mut c_void) as i32
}
unsafe fn pma_sync(epoch: u64, event: u64, root: Noun) -> i32 {
raw::pma_sync(epoch, event, root.as_raw()) as i32
}
pub fn pma_in_arena<T>(ptr: *const T) -> bool {
unsafe { raw::pma_in_arena(ptr as *const c_void) }
}
pub struct Pma {
path: PathBuf,
noun: Noun,
}
impl Pma {
pub fn new<P: AsRef<Path>>(path: P) -> Self {
let path = path.as_ref().to_path_buf();
Self { path, noun: D(0) }
}
}
impl Snapshot for Pma {
fn save(&mut self, stack: &mut NockStack, noun: &mut Noun) {
// Required so everything in the PMA has a cached mug, otherwise we would try to write
let _mug = mug_u32(stack, *noun);
unsafe { stack.copy_pma(noun) };
self.noun = *noun;
}
fn sync(&mut self, _stack: &mut NockStack, epoch: u64, event: u64) {
unsafe {
pma_sync(epoch, event, self.noun);
}
}
fn load(&mut self, _stack: &mut NockStack) -> std::io::Result<(u64, u64, Noun)> {
let path = self.path.join(".bin/page.bin");
if path.is_file() {
eprintln!("\rload: found snapshot at {:?}", path);
unsafe { Ok(pma_load(&self.path)) }
} else {
eprintln!("\rload: creating snapshot at {:?}", path);
unsafe { pma_init(&self.path) };
Ok((0, 0, D(0)))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::noun::IndirectAtom;
#[test]
fn test_pma() {
let path = "/tmp/ares_pma_test";
if let Err(err) = std::fs::remove_dir_all(path) {
if err.kind() != std::io::ErrorKind::NotFound {
panic!("failed to remove dir: {}", err);
}
}
unsafe {
pma_init(path);
let ref mut stack = NockStack::new(8 << 10 << 10, 0);
let root = IndirectAtom::new_raw(stack, 1, &0xffff_ffff_ffff_ffff).as_noun();
let eight = pma_malloc(8) as *mut u64;
*eight = 0xdeadbeef;
assert!(0 == pma_close(10, 12, root));
pma_load(path);
assert_eq!(0, pma_sync(13, 15, root));
let _ = pma_malloc(8) as *mut u64;
assert_eq!(0, pma_sync(14, 16, root));
let _ = pma_malloc(8) as *mut u64;
assert_eq!(0, pma_sync(15, 16, root));
let twenty = pma_malloc(8) as *mut u64;
pma_free(twenty as *mut c_void);
assert_eq!(0, pma_sync(16, 15, root));
let _ = pma_malloc(8) as *mut u64;
assert_eq!(0, pma_sync(17, 15, root));
let _ = pma_malloc(8) as *mut u64;
assert_eq!(0, pma_sync(18, 15, root));
let _ = pma_malloc(8) as *mut u64;
let twenty = pma_malloc(8) as *mut u64;
*twenty = 0xcafebabe;
pma_free(twenty as *mut c_void);
pma_close(123, 124, root);
}
if let Err(err) = std::fs::remove_dir_all(path) {
if err.kind() != std::io::ErrorKind::NotFound {
panic!("failed to remove dir: {}", err);
}
}
}
}