u3: removes obsolete portable snapshot implementation

This commit is contained in:
Joe Bryan 2020-09-01 13:59:20 -07:00
parent 7d541b5a1f
commit 2897feeea3
6 changed files with 0 additions and 599 deletions

View File

@ -78,27 +78,6 @@
c3_w
u3e_dirty(void);
/* u3e_hold(): backup memory images
*/
c3_o
u3e_hold(void);
/* u3e_drop(): remove backed-up memory images
*/
c3_o
u3e_drop(void);
/* u3e_fall(): restore memory images
*/
c3_o
u3e_fall(void);
/* u3e_wipe(): discard memory images
*/
c3_o
u3e_wipe(void);
/* u3e_yolo(): disable dirty page tracking, read/write whole loom.
*/
c3_o

View File

@ -145,23 +145,3 @@
*/
c3_w
u3m_pack(void);
/* u3m_rock_stay(): jam state into [dir_c] at [evt_d]
*/
c3_o
u3m_rock_stay(c3_c* dir_c, c3_d evt_d);
/* u3m_rock_load(): load state from [dir_c] at [evt_d]
*/
c3_o
u3m_rock_load(c3_c* dir_c, c3_d evt_d);
/* u3m_rock_drop(): delete saved state from [dir_c] at [evt_d]
*/
c3_o
u3m_rock_drop(c3_c* dir_c, c3_d evt_d);
/* u3m_wipe(): purge and reinitialize loom, with checkpointing
*/
void
u3m_wipe(void);

View File

@ -18,24 +18,6 @@
c3_w*
u3s_jam_fib(u3_noun a, c3_w* bit_w);
/* u3s_jam_met(): measure a noun for jam, calculating backrefs
*/
c3_d
u3s_jam_met(u3_noun a, u3p(u3h_root)* bak_p);
/* u3s_jam_buf(): jam [a] into [buf_w], without allocation
**
** using backrefs in [bak_p], as computed by u3s_jam_met
** can only encode up to c3_w bits
*/
void
u3s_jam_buf(u3_noun a, u3p(u3h_root) bak_p, c3_w* buf_w);
/* u3s_jam_file(): jam [a] into a file, overwriting
*/
c3_o
u3s_jam_file(u3_noun a, c3_c* pas_c);
/* u3s_jam_xeno(): jam with off-loom buffer (re-)allocation.
*/
c3_d

View File

@ -882,120 +882,6 @@ u3e_live(c3_o nuu_o, c3_c* dir_c)
return nuu_o;
}
static c3_o
_ce_image_move(u3e_image* img_u, c3_o bak_o)
{
c3_c old_c[8193];
c3_c new_c[8197];
snprintf(old_c, 8193, "%s/.urb/chk/%s.bin", u3P.dir_c, img_u->nam_c);
snprintf(new_c, 8197, "%s.bak", old_c);
c3_i ret_i;
if ( c3y == bak_o ) {
ret_i = rename(old_c, new_c);
}
else {
ret_i = rename(new_c, old_c);
}
if ( 0 != ret_i ) {
u3l_log("loom: %s %s failed: %s\r\n", ( c3y == bak_o ) ? "hold" : "fall",
img_u->nam_c, strerror(errno));
return c3n;
}
return c3y;
}
/* u3e_hold(): backup memory images
*/
c3_o
u3e_hold(void)
{
if ( (c3n == _ce_image_move(&u3P.nor_u, c3y)) ||
(c3n == _ce_image_move(&u3P.sou_u, c3y)) )
{
return c3n;
}
// XX sync directory
return c3y;
}
static c3_o
_ce_image_drop(u3e_image* img_u)
{
c3_c pat_c[8193];
snprintf(pat_c, 8192, "%s/.urb/chk/%s.bin.bak", u3P.dir_c, img_u->nam_c);
if ( 0 != unlink(pat_c) ) {
u3l_log("loom: drop %s failed: %s\r\n", img_u->nam_c, strerror(errno));
return c3n;
}
return c3y;
}
/* u3e_drop(): remove backed-up memory images
*/
c3_o
u3e_drop(void)
{
if ( (c3n == _ce_image_drop(&u3P.nor_u)) ||
(c3n == _ce_image_drop(&u3P.sou_u)) )
{
return c3n;
}
return c3y;
}
/* u3e_fall(): restore memory images
*/
c3_o
u3e_fall(void)
{
if ( (c3n == _ce_image_move(&u3P.nor_u, c3n)) ||
(c3n == _ce_image_move(&u3P.sou_u, c3n)) )
{
return c3n;
}
// XX sync directory
return c3y;
}
/* u3e_wipe(): discard memory images
*/
c3_o
u3e_wipe(void)
{
// XX ensure no patch files are present
if ( 0 != ftruncate(u3P.nor_u.fid_i, 0) ) {
u3l_log("loom: wipe %s failed: %s\r\n", u3P.nor_u.nam_c, strerror(errno));
return c3n;
}
if ( 0 != ftruncate(u3P.sou_u.fid_i, 0) ) {
u3l_log("loom: wipe %s failed: %s\r\n", u3P.sou_u.nam_c, strerror(errno));
return c3n;
}
c3_sync(u3P.nor_u.fid_i);
c3_sync(u3P.sou_u.fid_i);
close(u3P.nor_u.fid_i);
close(u3P.sou_u.fid_i);
// XX sync directory
return c3y;
}
/* u3e_yolo(): disable dirty page tracking, read/write whole loom.
*/
c3_o

View File

@ -1729,121 +1729,6 @@ u3m_boot_lite(void)
return 0;
}
/* u3m_rock_stay(): jam state into [dir_c] at [evt_d]
*/
c3_o
u3m_rock_stay(c3_c* dir_c, c3_d evt_d)
{
c3_c nam_c[8193];
snprintf(nam_c, 8192, "%s", dir_c);
mkdir(nam_c, 0700);
snprintf(nam_c, 8192, "%s/.urb", dir_c);
mkdir(nam_c, 0700);
snprintf(nam_c, 8192, "%s/.urb/roc", dir_c);
mkdir(nam_c, 0700);
snprintf(nam_c, 8192, "%s/.urb/roc/%" PRIu64 ".jam", dir_c, evt_d);
{
u3_noun dat = u3nt(c3__fast, u3k(u3A->roc), u3j_stay());
c3_o ret_o = u3s_jam_file(dat, nam_c);
u3z(dat);
return ret_o;
}
}
/* u3m_rock_load(): load state from [dir_c] at [evt_d]
*/
c3_o
u3m_rock_load(c3_c* dir_c, c3_d evt_d)
{
c3_c nam_c[8193];
snprintf(nam_c, 8192, "%s/.urb/roc/%" PRIu64 ".jam", dir_c, evt_d);
{
u3_noun dat;
{
// XX u3m_file bails, but we'd prefer to return errors
//
u3_noun fil = u3m_file(nam_c);
u3a_print_memory(stderr, "rock: load", u3r_met(5, fil));
u3_noun pro = u3m_soft(0, u3ke_cue, fil);
if ( u3_blip != u3h(pro) ) {
fprintf(stderr, "rock: unable to cue %s\r\n", nam_c);
u3z(pro);
return c3n;
}
else {
dat = u3k(u3t(pro));
u3z(pro);
}
}
{
u3_noun roc, rel;
if ( u3r_pq(dat, c3__fast, &roc, &rel) ) {
u3z(dat);
return c3n;
}
u3A->roc = u3k(roc);
u3j_load(u3k(rel));
}
u3z(dat);
}
u3A->ent_d = evt_d;
u3j_ream();
u3n_ream();
return c3y;
}
/* u3m_rock_drop(): delete saved state from [dir_c] at [evt_d]
*/
c3_o
u3m_rock_drop(c3_c* dir_c, c3_d evt_d)
{
c3_c nam_c[8193];
snprintf(nam_c, 8192, "%s/.urb/roc/%" PRIu64 ".jam", dir_c, evt_d);
if ( 0 != unlink(nam_c) ) {
u3l_log("rock: drop %s failed: %s\r\n", nam_c, strerror(errno));
return c3n;
}
return c3y;
}
/* u3m_wipe(): purge and reinitialize loom, with checkpointing
*/
void
u3m_wipe(void)
{
// clear page flags
//
memset((void*)u3P.dit_w, 0, u3a_pages >> 3);
// reinitialize checkpoint system
//
// NB: callers must first u3e_hold() or u3e_wipe()
//
u3e_live(c3n, u3P.dir_c);
// reinitialize loom
//
u3m_pave(c3y, c3n);
// reinitialize jets
//
u3j_boot(c3y);
}
/* u3m_reclaim: clear persistent caches to reclaim memory
*/
void

View File

@ -191,317 +191,6 @@ u3s_jam_fib(u3_noun a, c3_w* bit_w)
return fib_u.buf_w;
}
/* _cs_jam_met_mat(): the jam bitwidth of an atom of bitwidth [wid_w]
**
** equivalent to (head (rub a))
*/
static c3_d
_cs_jam_met_mat(c3_w wid_w)
{
return ( 0 == wid_w ) ? 1ULL :
(c3_d)wid_w + (2ULL * (c3_d)_cs_met0_w(wid_w));
}
/* _cs_jam_met: struct for tracking the jam bitwidth of a noun
*/
struct _cs_jam_met {
u3p(u3h_root) har_p;
u3p(u3h_root) bak_p;
c3_d len_d;
};
/* _cs_jam_met_atom_cb(): bitwidth of atom or backref encoding for [a]
*/
static void
_cs_jam_met_atom_cb(u3_atom a, void* ptr_v)
{
struct _cs_jam_met* met_u = ptr_v;
c3_w a_w = u3r_met(0, a);
u3_weak b = u3h_git(met_u->har_p, a);
// if we haven't haven't seen [a], put cursor into [har_p]
//
if ( u3_none == b ) {
u3h_put(met_u->har_p, a, u3i_chubs(1, &(met_u->len_d)));
met_u->len_d += 1ULL + _cs_jam_met_mat(a_w);
}
else {
c3_w b_w = u3r_met(0, b);
// if [a] is smaller than a backref, use directly
//
if ( a_w <= b_w ) {
met_u->len_d += 1ULL + _cs_jam_met_mat(a_w);
}
// otherwise, save backref
//
else {
u3h_put(met_u->bak_p, a, u3k(b));
met_u->len_d += 2ULL + _cs_jam_met_mat(b_w);
}
}
}
/* _cs_jam_met_cell_cb(): bitwidth of cell or backref encoding for [a]
*/
static c3_o
_cs_jam_met_cell_cb(u3_noun a, void* ptr_v)
{
struct _cs_jam_met* met_u = ptr_v;
u3_weak b = u3h_git(met_u->har_p, a);
// if we haven't haven't seen [a], put cursor into [har_p]
//
if ( u3_none == b ) {
u3h_put(met_u->har_p, a, u3i_chubs(1, &(met_u->len_d)));
met_u->len_d += 2ULL;
return c3y;
}
// otherwise, save backref and shortcircuit traversal
//
else {
c3_w b_w = u3r_met(0, b);
u3h_put(met_u->bak_p, a, u3k(b));
met_u->len_d += 2ULL + _cs_jam_met_mat(b_w);
return c3n;
}
}
/* u3s_jam_met(): measure a noun for jam, calculating backrefs
*/
c3_d
u3s_jam_met(u3_noun a, u3p(u3h_root)* bak_p)
{
struct _cs_jam_met met_u;
met_u.har_p = u3h_new();
met_u.bak_p = u3h_new();
met_u.len_d = 0ULL;
u3a_walk_fore(a, &met_u, _cs_jam_met_atom_cb,
_cs_jam_met_cell_cb);
u3h_free(met_u.har_p);
*bak_p = met_u.bak_p;
return met_u.len_d;
}
/* _cs_jam_buf: struct for tracking the pre-measured jam of a noun
*/
struct _cs_jam_buf {
u3p(u3h_root) bak_p;
c3_w bit_w;
c3_w* buf_w;
};
/* _cs_jam_buf_chop(): chop [met_w] bits of [a] into [buf_u]
*/
static void
_cs_jam_buf_chop(struct _cs_jam_buf* buf_u, c3_w met_w, u3_noun a)
{
u3r_chop(0, 0, met_w, buf_u->bit_w, buf_u->buf_w, a);
buf_u->bit_w += met_w;
}
/* _cs_jam_buf_mat(): length-prefixed encode (mat) [a] into [buf_u]
*/
static void
_cs_jam_buf_mat(struct _cs_jam_buf* buf_u, u3_atom a)
{
if ( 0 == a ) {
_cs_jam_buf_chop(buf_u, 1, 1);
}
else {
c3_w a_w = u3r_met(0, a);
c3_w b_w = _cs_met0_w(a_w);
_cs_jam_buf_chop(buf_u, b_w+1, 1 << b_w);
_cs_jam_buf_chop(buf_u, b_w-1, a_w & ((1 << (b_w-1)) - 1));
_cs_jam_buf_chop(buf_u, a_w, a);
}
}
/* _cs_jam_buf_atom_cb(): encode atom or backref
*/
static void
_cs_jam_buf_atom_cb(u3_atom a, void* ptr_v)
{
struct _cs_jam_buf* buf_u = ptr_v;
u3_weak b = u3h_git(buf_u->bak_p, a);
// if [a] has no backref (or this is the referent), encode atom
//
if ( (u3_none == b) ||
(u3r_word(0, b) == buf_u->bit_w) )
{
_cs_jam_buf_chop(buf_u, 1, 0);
_cs_jam_buf_mat(buf_u, a);
}
else {
c3_w a_w = u3r_met(0, a);
c3_w b_w = u3r_met(0, b);
// if [a] is smaller than the backref, encode atom
//
if ( a_w <= b_w ) {
_cs_jam_buf_chop(buf_u, 1, 0);
_cs_jam_buf_mat(buf_u, a);
}
// otherwise, encode backref
//
else {
_cs_jam_buf_chop(buf_u, 2, 3);
_cs_jam_buf_mat(buf_u, b);
}
}
}
/* _cs_jam_buf_cell_cb(): encode cell or backref
*/
static c3_o
_cs_jam_buf_cell_cb(u3_noun a, void* ptr_v)
{
struct _cs_jam_buf* buf_u = ptr_v;
u3_weak b = u3h_git(buf_u->bak_p, a);
// if [a] has no backref (or this is the referent), encode cell
//
if ( (u3_none == b) ||
(u3r_word(0, b) == buf_u->bit_w) )
{
_cs_jam_buf_chop(buf_u, 2, 1);
return c3y;
}
// otherwise, encode backref and shortcircuit traversal
//
else {
_cs_jam_buf_chop(buf_u, 2, 3);
_cs_jam_buf_mat(buf_u, b);
return c3n;
}
}
/* u3s_jam_buf(): jam [a] into pre-allocated [buf_w], without allocation
**
** using backrefs in [bak_p], as computed by u3s_jam_met()
** NB [buf_w] must be pre-allocated with sufficient space
**
** XX can only encode up to c3_w bits, due to use of chop
*/
void
u3s_jam_buf(u3_noun a, u3p(u3h_root) bak_p, c3_w* buf_w)
{
struct _cs_jam_buf buf_u;
buf_u.bak_p = bak_p;
buf_u.buf_w = buf_w;
buf_u.bit_w = 0;
// this is in fact safe under normal usage, as
// the stack will have been checked in u3s_jam_met()
//
u3a_walk_fore_unsafe(a, &buf_u, _cs_jam_buf_atom_cb,
_cs_jam_buf_cell_cb);
}
/* u3s_jam_file(): jam [a] into a file, overwriting
*/
c3_o
u3s_jam_file(u3_noun a, c3_c* pas_c)
{
u3p(u3h_root) bak_p;
c3_i fid_i = open(pas_c, O_RDWR | O_CREAT | O_TRUNC, 0644);
c3_w byt_w, wor_w, len_w;
if ( fid_i < 0 ) {
fprintf(stderr, "jam: open %s: %s\r\n", pas_c, strerror(errno));
return c3n;
}
{
c3_d len_d = u3s_jam_met(a, &bak_p);
if ( len_d > 0xffffffffULL ) {
fprintf(stderr, "jam: overflow c3_w: %" PRIu64 "\r\n", len_d);
u3h_free(bak_p);
return c3n;
}
// length in bytes a la u3i_bytes
//
byt_w = (c3_w)(len_d >> 3ULL);
if ( len_d > (c3_d)(byt_w << 3) ) {
byt_w++;
}
// length in words
//
wor_w = (c3_w)(len_d >> 5ULL);
if ( len_d > (c3_d)(wor_w << 5) ) {
wor_w++;
}
// byte-length of word-length
//
len_w = 4 * wor_w;
}
// grow [fid_i] to [len_w]
//
if ( 0 != ftruncate(fid_i, len_w) ) {
fprintf(stderr, "jam: ftruncate grow %s: %s\r\n", pas_c, strerror(errno));
goto error;
}
// mmap [fid_i], jam into it, sync, and unmap
//
{
c3_w* buf_w;
void* ptr_v = mmap(0, len_w, PROT_READ|PROT_WRITE, MAP_SHARED, fid_i, 0);
if ( MAP_FAILED == ptr_v ) {
fprintf(stderr, "jam: mmap %s: %s\r\n", pas_c, strerror(errno));
goto error;
}
buf_w = ptr_v;
u3s_jam_buf(a, bak_p, buf_w);
if ( 0 != msync(ptr_v, len_w, MS_SYNC) ) {
fprintf(stderr, "jam: msync %s: %s\r\n", pas_c, strerror(errno));
// XX ignore return?
//
munmap(ptr_v, len_w);
goto error;
}
if ( 0 != munmap(ptr_v, len_w) ) {
fprintf(stderr, "jam: munmap %s: %s\r\n", pas_c, strerror(errno));
// XX fatal error?
//
goto error;
}
}
// shrink [fid_i] to [byt_w]
//
if ( 0 != ftruncate(fid_i, byt_w) ) {
fprintf(stderr, "jam: ftruncate shrink %s: %s\r\n", pas_c, strerror(errno));
goto error;
}
{
close(fid_i);
u3h_free(bak_p);
return c3y;
}
error: {
close(fid_i);
unlink(pas_c);
u3h_free(bak_p);
return c3n;
}
}
typedef struct _jam_xeno_s {
u3p(u3h_root) har_p;
ur_bsw_t rit_u;