Merge remote-tracking branch 'origin/jam-cue-rock' into rc

This commit is contained in:
Philip Monk 2019-12-02 02:08:37 -08:00
commit 0431c3c073
No known key found for this signature in database
GPG Key ID: B66E1F02604E44EC
25 changed files with 1478 additions and 610 deletions

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:1129a8962ad6990d24478fbdd734cffe6761b1903106098bf626e40070ce358a
size 10212672
oid sha256:b2b9bacfbc5d66cc383c2504684b6662913b816c279376917a1e4a68eb04e476
size 10213849

View File

@ -43,6 +43,10 @@ herb ./ship -d '+test, =seed `@uvI`(shaz %reproducible)' |
herb ./ship -p hood -d '+hood/mass'
herb ./ship -d '~& ~ ~& %start-pack ~'
herb ./ship -p hood -d '+hood/pack'
herb ./ship -d '~& ~ ~& %finish-pack ~'
shutdown
# Collect output

View File

@ -87,9 +87,9 @@
==
::
=^ cards-1=(list card) old-state
%- (slog leaf+"upgrading eth-watcher from %1" ~)
?. ?=(%1 -.old-state)
`old-state
%- (slog leaf+"upgrading eth-watcher from %1" ~)
:_ old-state(- %2)
%+ turn ~(tap by dogs.old-state)
|= [=path dog=watchdog]

View File

@ -0,0 +1,13 @@
:: Helm: compact memory
::
:::: /hoon/pack/hood/gen
::
/? 310
::
::::
::
:- %say
|= $: {now/@da eny/@uvJ bec/beak}
{arg/~ ~}
==
[%helm-pack ~]

View File

@ -78,7 +78,7 @@
::
++ poke-mass
|= ~ =< abet
(flog %crud %hax-heft ~)
(emit %pass /heft %arvo %d %flog %heft ~)
::
++ poke-automass
|= recur=@dr
@ -90,6 +90,10 @@
|= ~
abet:(emit %pass way.mass-timer.sez %arvo %b %rest nex.mass-timer.sez)
::
++ poke-pack
|= ~ =< abet
(emit %pass /pack %arvo %d %flog %pack ~)
::
++ take-wake-automass
|= [way=wire error=(unit tang)]
?^ error
@ -189,6 +193,7 @@
?+ mark ~|([%poke-helm-bad-mark mark] !!)
%helm-hi =;(f (f !<(_+<.f vase)) poke-hi)
%helm-mass =;(f (f !<(_+<.f vase)) poke-mass)
%helm-pack =;(f (f !<(_+<.f vase)) poke-pack)
%helm-reload =;(f (f !<(_+<.f vase)) poke-reload)
%helm-reload-desk =;(f (f !<(_+<.f vase)) poke-reload-desk)
%helm-reset =;(f (f !<(_+<.f vase)) poke-reset)

View File

@ -200,6 +200,7 @@
$blew (send %rez p.p.kyz q.p.kyz)
$heft heft
$lyra (dump kyz)
$pack (dump kyz)
$veer (dump kyz)
$verb (dump kyz)
==

View File

@ -710,6 +710,7 @@
{$logo ~} :: logout
{$lyra p/@t q/@t} :: upgrade kernel
{$mass p/mass} :: memory usage
{$pack ~} :: compact memory
{$veer p/@ta q/path r/@t} :: install vane
{$verb ~} :: verbose mode
== ::
@ -728,6 +729,7 @@
$>(%init vane-task) :: after gall ready
{$lyra p/@t q/@t} :: upgrade kernel
{$noop ~} :: no operation
{$pack ~} :: compact memory
{$talk p/tank} ::
{$text p/tape} ::
{$veer p/@ta q/path r/@t} :: install vane
@ -792,6 +794,7 @@
$% {$crud p/@tas q/(list tank)} ::
{$heft ~} ::
{$lyra p/@t q/@t} :: upgrade kernel
{$pack ~} :: compact memory
{$text p/tape} ::
{$veer p/@ta q/path r/@t} :: install vane
{$verb ~} :: verbose mode

View File

@ -24,6 +24,7 @@
# include "noun/nock.h" // u3n: nock execution
# include "noun/options.h" // u3o: config options
# include "noun/retrieve.h" // u3r: noun access (error returns)
# include "noun/serial.h" // u3s: serialization
# include "noun/trace.h" // u3t: profiling / tracing
# include "noun/xtract.h" // u3x: noun access (error crashes)
# include "noun/vortex.h" // u3v: arvo kernel

View File

@ -803,6 +803,7 @@
# define c3__outd c3_s4('o','u','t','d')
# define c3__ov c3_s2('o','v')
# define c3__over c3_s4('o','v','e','r')
# define c3__pack c3_s4('p','a','c','k')
# define c3__pair c3_s4('p','a','i','r')
# define c3__palm c3_s4('p','a','l','m')
# define c3__palq c3_s4('p','a','l','q')

View File

@ -505,4 +505,21 @@
u3_noun
u3a_mint(c3_w* sal_w, c3_w len_w);
/* u3a_walk_fore(): preorder traversal, visits ever limb of a noun.
**
** cells are visited *before* their heads and tails
** and can shortcircuit traversal by returning [c3n]
*/
void
u3a_walk_fore(u3_noun a,
void* ptr_v,
void (*pat_f)(u3_atom, void*),
c3_o (*cel_f)(u3_noun, void*));
/* u3a_walk_fore_unsafe(): u3a_walk_fore(), without overflow checks
*/
void
u3a_walk_fore_unsafe(u3_noun a,
void* ptr_v,
void (*pat_f)(u3_atom, void*),
c3_o (*cel_f)(u3_noun, void*));

View File

@ -73,13 +73,27 @@
c3_o
u3e_live(c3_o nuu_o, c3_c* dir_c);
/* u3e_live_new(): start the persistence system.
*/
c3_o
u3e_live_new(c3_c* dir_c);
/* u3e_dirty(): count dirty pages.
*/
c3_w
u3e_dirty(void);
/* u3e_hold(): backup memory images
*/
c3_o
u3e_hold(void);
/* u3e_drop(): remove backed-up memory images
*/
c3_o
u3e_drop(void);
/* u3e_fall(): restore memory images
*/
c3_o
u3e_fall(void);
/* u3e_wipe(): discard memory images
*/
c3_o
u3e_wipe(void);

View File

@ -190,6 +190,16 @@
void
u3j_ream(void);
/* u3j_stay(): extract cold state
*/
u3_noun
u3j_stay(void);
/* u3j_load(): inject cold state
*/
void
u3j_load(u3_noun rel);
/* u3j_reap(): promote jet state.
*/
void

View File

@ -140,3 +140,23 @@
*/
void
u3m_reclaim(void);
/* u3m_rock_stay(): jam state into [dir_c] at [evt_d]
*/
c3_o
u3m_rock_stay(c3_c* dir_c, c3_d evt_d);
/* u3m_rock_load(): load state from [dir_c] at [evt_d]
*/
c3_o
u3m_rock_load(c3_c* dir_c, c3_d evt_d);
/* u3m_rock_drop(): delete saved state from [dir_c] at [evt_d]
*/
c3_o
u3m_rock_drop(c3_c* dir_c, c3_d evt_d);
/* u3m_wipe(): purge and reinitialize loom, with checkpointing
*/
void
u3m_wipe(void);

View File

@ -0,0 +1,36 @@
/* i/n/serial.h
**
*/
/* Noun serialization. All noun arguments RETAINED.
*/
/* u3s_jam_fib(): jam without atom allocation.
**
** returns atom-suitable words, and *bit_w will have
** the length (in bits). return should be freed with u3a_wfree().
*/
c3_w*
u3s_jam_fib(u3_noun a, c3_w* bit_w);
/* u3s_jam_met(): measure a noun for jam, calculating backrefs
*/
c3_d
u3s_jam_met(u3_noun a, u3p(u3h_root)* bak_p);
/* u3s_jam_buf(): jam [a] into [buf_w], without allocation
**
** using backrefs in [bak_p], as computed by u3s_jam_met
** can only encode up to c3_w bits
*/
void
u3s_jam_buf(u3_noun a, u3p(u3h_root) bak_p, c3_w* buf_w);
/* u3s_jam_file(): jam [a] into a file, overwriting
*/
c3_o
u3s_jam_file(u3_noun a, c3_c* pas_c);
/* u3s_cue(): cue [a]
*/
u3_noun
u3s_cue(u3_atom a);

View File

@ -13,8 +13,8 @@
u3_noun wen; // current time, as text
u3_noun sev_l; // instance number
u3_noun sen; // instance string
u3_noun our; // identity
u3_noun fak; // c3y is fake
u3_noun our; // identity XX Remove
u3_noun fak; // c3y is fake XX Remove
u3_noun roc; // kernel core
} u3v_arvo;

View File

@ -3,243 +3,22 @@
*/
#include "all.h"
#define CUE_ROOT 0
#define CUE_HEAD 1
#define CUE_TAIL 2
// stack frame for recording head vs tail iteration
//
// In Hoon, this structure would be as follows:
//
// $% [%root ~]
// [%head cell-cursor=@]
// [%tail cell-cursor=@ hed-width=@ hed-value=*]
// ==
//
typedef struct cueframe
{
c3_y tag_y;
u3_atom cur;
u3_atom wid;
u3_noun hed;
} cueframe;
static inline void
_cue_push(c3_ys mov,
c3_ys off,
c3_y tag_y,
u3_atom cur,
u3_atom wid,
u3_noun hed)
{
u3R->cap_p += mov;
// ensure we haven't overflowed the stack
// (off==0 means we're on a north road)
//
if ( 0 == off ) {
if( !(u3R->cap_p > u3R->hat_p) ) {
u3m_bail(c3__meme);
}
}
else {
if( !(u3R->cap_p < u3R->hat_p) ) {
u3m_bail(c3__meme);
}
}
cueframe* fam_u = u3to(cueframe, u3R->cap_p + off);
fam_u->tag_y = tag_y;
fam_u->cur = cur;
fam_u->wid = wid;
fam_u->hed = hed;
}
static inline cueframe
_cue_pop(c3_ys mov, c3_ys off)
{
cueframe* fam_u = u3to(cueframe, u3R->cap_p + off);
u3R->cap_p -= mov;
return *fam_u;
}
u3_noun
u3qe_cue(u3_atom a)
{
// initialize signed stack offsets (relative to north/south road)
//
c3_ys mov, off;
{
c3_y wis_y = c3_wiseof(cueframe);
c3_o nor_o = u3a_is_north(u3R);
mov = ( c3y == nor_o ? -wis_y : wis_y );
off = ( c3y == nor_o ? 0 : -wis_y );
}
// initialize a hash table for dereferencing backrefs
//
u3p(u3h_root) har_p = u3h_new();
// stash the current stack post
//
u3p(cueframe) cap_p = u3R->cap_p;
// push the (only) ROOT stack frame (our termination condition)
//
_cue_push(mov, off, CUE_ROOT, 0, 0, 0);
// initialize cursor to bit-position 0
//
u3_atom cur = 0;
// the bitwidth and product from reading at cursor
//
u3_atom wid, pro;
// read from atom at cursor
//
// TRANSFER .cur
//
advance: {
// read tag bit at cur
//
c3_y tag_y = u3qc_cut(0, cur, 1, a);
// low bit unset, (1 + cur) points to an atom
//
// produce atom and the width we read
//
if ( 0 == tag_y ) {
u3_noun bur;
{
u3_noun x = u3qa_inc(cur);
bur = u3qe_rub(x, a);
u3z(x);
}
pro = u3k(u3t(bur));
u3h_put(har_p, cur, u3k(pro));
wid = u3qa_inc(u3h(bur));
u3z(bur);
goto retreat;
}
else {
// read tag bit at (1 + cur)
//
{
u3_noun x = u3qa_inc(cur);
tag_y = u3qc_cut(0, x, 1, a);
u3z(x);
}
// next bit set, (2 + cur) points to a backref
//
// produce referenced value and the width we read
//
if ( 1 == tag_y ) {
u3_noun bur;
{
u3_noun x = u3ka_add(2, cur);
bur = u3qe_rub(x, a);
u3z(x);
}
pro = u3h_get(har_p, u3k(u3t(bur)));
if ( u3_none == pro ) {
return u3m_bail(c3__exit);
}
wid = u3qa_add(2, u3h(bur));
u3z(bur);
goto retreat;
}
// next bit unset, (2 + cur) points to the head of a cell
//
// push a frame to mark HEAD recursion and read the head
//
else {
_cue_push(mov, off, CUE_HEAD, cur, 0, 0);
cur = u3qa_add(2, cur);
goto advance;
}
}
}
// consume: popped stack frame, .wid and .pro from above.
//
// TRANSFER .wid, .pro, and contents of .fam_u
// (.cur is in scope, but we have already lost our reference to it)
//
retreat: {
cueframe fam_u = _cue_pop(mov, off);
switch ( fam_u.tag_y ) {
default: {
c3_assert(0);
}
// fam_u is our stack root, we're done.
//
case CUE_ROOT: {
break;
}
// .wid and .pro are the head of the cell at fam_u.cur.
// save them (and the cell cursor) in a TAIL frame,
// set the cursor to the tail and read there.
//
case CUE_HEAD: {
_cue_push(mov, off, CUE_TAIL, fam_u.cur, wid, pro);
cur = u3ka_add(2, u3qa_add(wid, fam_u.cur));
goto advance;
}
// .wid and .pro are the tail of the cell at fam_u.cur,
// construct the cell, memoize it, and produce it along with
// its total width (as if it were a read from above).
//
case CUE_TAIL: {
pro = u3nc(fam_u.hed, pro);
u3h_put(har_p, fam_u.cur, u3k(pro));
wid = u3ka_add(2, u3ka_add(wid, fam_u.wid));
goto retreat;
}
}
}
u3z(wid);
u3h_free(har_p);
// sanity check
//
c3_assert( u3R->cap_p == cap_p );
return pro;
return u3s_cue(a);
}
u3_noun
u3we_cue(u3_noun cor)
{
u3_noun a;
if ( (u3_none == (a = u3r_at(u3x_sam, cor))) ) {
return u3m_bail(c3__fail);
} else {
return u3qe_cue(a);
}
return u3qe_cue(u3x_at(u3x_sam, cor));
}
u3_noun
u3ke_cue(u3_atom a)
{
u3_noun b = u3qe_cue(a);
u3z(a);
return b;
}

View File

@ -3,201 +3,31 @@
*/
#include "all.h"
typedef struct {
c3_w a_w;
c3_w b_w;
c3_w bit_w;
c3_w* wor_w;
} _jam_buf;
static void
_jam_buf_grow(_jam_buf* buf_u, c3_w mor_w)
u3_noun
u3qe_jam(u3_atom a)
{
c3_w wan_w = buf_u->bit_w + mor_w;
if ( wan_w < mor_w ) {
// overflowed c3_w bits
u3m_bail(c3__fail);
}
if ( wan_w > buf_u->a_w ) {
c3_w old_w, new_w, c_w = 0;
old_w = buf_u->a_w >> 5;
if ( (old_w << 5) != buf_u->a_w ) {
++old_w;
}
// fibonacci growth
while ( c_w < wan_w ) {
c_w = buf_u->a_w + buf_u->b_w;
buf_u->b_w = buf_u->a_w;
buf_u->a_w = c_w;
}
new_w = c_w >> 5;
if ( (new_w << 5) != c_w ) {
++new_w;
}
buf_u->wor_w = u3a_wealloc(buf_u->wor_w, new_w);
memset(buf_u->wor_w + old_w, 0, (new_w - old_w) * sizeof(c3_w));
c3_w bit_w, *sal_w;
c3_w* wor_w = u3s_jam_fib(a, &bit_w);
c3_w len_w = bit_w >> 5;
if ( (len_w << 5) != bit_w ) {
++len_w;
}
sal_w = u3a_slab(len_w);
memcpy(sal_w, wor_w, len_w*sizeof(c3_w));
u3a_wfree(wor_w);
return u3a_moot(sal_w);
}
static void
_jam_buf_chop(_jam_buf* buf_u, c3_w met_w, u3_noun a)
u3_noun
u3we_jam(u3_noun cor)
{
c3_w bit_w = buf_u->bit_w;
_jam_buf_grow(buf_u, met_w);
u3r_chop(0, 0, met_w, bit_w, buf_u->wor_w, a);
buf_u->bit_w += met_w;
return u3qe_jam(u3x_at(u3x_sam, cor));
}
static void
_jam_buf_atom(_jam_buf* buf_u, u3_noun a)
u3_atom
u3ke_jam(u3_noun a)
{
if ( 0 == a ) {
_jam_buf_chop(buf_u, 1, 1);
}
else {
c3_w b_w = u3r_met(0, a),
c_w = u3r_met(0, b_w);
c3_assert(c_w <= 32);
_jam_buf_chop(buf_u, c_w+1, 1 << c_w);
_jam_buf_chop(buf_u, c_w-1, b_w & ((1 << (c_w-1)) - 1));
_jam_buf_chop(buf_u, b_w, a);
}
u3_atom b = u3qe_jam(a);
u3z(a);
return b;
}
/* functions
*/
/* u3qe_jam_buf(): jam without atom allocation. returns
* atom-suitable words, and *bit_w will
* have the length (in bits). return should
* be freed with u3a_wfree().
*/
c3_w*
u3qe_jam_buf(u3_noun a, c3_w* bit_w)
{
u3p(u3h_root) har_p = u3h_new();
c3_o nor_o = u3a_is_north(u3R);
c3_y wis_y = c3_wiseof(u3_noun);
c3_ys mov = ( c3y == nor_o ? -wis_y : wis_y );
c3_ys off = ( c3y == nor_o ? 0 : -wis_y );
u3_noun* top, *don = u3to(u3_noun, u3R->cap_p + off);
u3_weak c;
c3_o cel_o;
c3_w len_w;
_jam_buf buf_u;
buf_u.a_w = 144; // fib(12) is small enough to be reasonably fast to allocate.
buf_u.b_w = 89; // fib(11) is needed to get fib(13).
len_w = buf_u.a_w >> 5;
if ( (len_w << 5) != buf_u.a_w ) {
++len_w;
}
buf_u.wor_w = u3a_walloc(len_w);
buf_u.bit_w = 0;
memset(buf_u.wor_w, 0, len_w * sizeof(c3_w));
u3R->cap_p += mov;
top = u3to(u3_noun, u3R->cap_p + off);
*top = a;
while ( top != don ) {
a = *top;
cel_o = u3du(a);
c = u3h_git(har_p, a);
if ( u3_none != c ) {
if ( c3y == cel_o ) {
_jam_buf_chop(&buf_u, 2, 3);
_jam_buf_atom(&buf_u, c);
}
else {
if ( u3r_met(0, a) <= u3r_met(0, c) ) {
_jam_buf_chop(&buf_u, 1, 0);
_jam_buf_atom(&buf_u, a);
}
else {
_jam_buf_chop(&buf_u, 2, 3);
_jam_buf_atom(&buf_u, c);
}
}
u3R->cap_p -= mov;
top = u3to(u3_noun, u3R->cap_p + off);
}
else {
u3h_put(har_p, a, buf_u.bit_w);
if ( c3n == cel_o ) {
_jam_buf_chop(&buf_u, 1, 0);
_jam_buf_atom(&buf_u, a);
u3R->cap_p -= mov;
top = u3to(u3_noun, u3R->cap_p + off);
}
else {
_jam_buf_chop(&buf_u, 2, 1);
*top = u3t(a);
u3R->cap_p += mov;
// XX disabled for performance
// may be unnecessary, u3h_put calls u3r_mug,
// which uses and checks the stack
//
#if 0
if ( 0 == off ) {
if( !(u3R->cap_p > u3R->hat_p) ) {
u3m_bail(c3__meme);
}
}
else {
if( !(u3R->cap_p < u3R->hat_p) ) {
u3m_bail(c3__meme);
}
}
#endif
top = u3to(u3_noun, u3R->cap_p + off);
*top = u3h(a);
}
}
}
*bit_w = buf_u.bit_w;
u3h_free(har_p);
return buf_u.wor_w;
}
u3_noun
u3qe_jam(u3_atom a)
{
c3_w bit_w, *sal_w;
c3_w* wor_w = u3qe_jam_buf(a, &bit_w);
c3_w len_w = bit_w >> 5;
if ( (len_w << 5) != bit_w ) {
++len_w;
}
sal_w = u3a_slab(len_w);
memcpy(sal_w, wor_w, len_w*sizeof(c3_w));
u3a_wfree(wor_w);
return u3a_moot(sal_w);
}
u3_noun
u3we_jam(u3_noun cor)
{
u3_noun a;
if ( (u3_none == (a = u3r_at(u3x_sam, cor))) ) {
return u3m_bail(c3__fail);
} else {
return u3qe_jam(a);
}
}
u3_atom
u3ke_jam(u3_noun a)
{
u3_atom b = u3qe_jam(a);
u3z(a);
return b;
}

View File

@ -2193,3 +2193,130 @@ u3a_lop(c3_w lab_w)
{
}
#endif
/* u3a_walk_fore(): preorder traversal, visits ever limb of a noun.
**
** cells are visited *before* their heads and tails
** and can shortcircuit traversal by returning [c3n]
*/
void
u3a_walk_fore(u3_noun a,
void* ptr_v,
void (*pat_f)(u3_atom, void*),
c3_o (*cel_f)(u3_noun, void*))
{
// initialize signed stack offsets (relative to N or S road)
//
c3_o nor_o = u3a_is_north(u3R);
c3_ys mov_ys, off_ys;
{
c3_y wis_y = c3_wiseof(u3_noun);
mov_ys = ( c3y == nor_o ? -wis_y : wis_y );
off_ys = ( c3y == nor_o ? 0 : -wis_y );
}
// set stack root, push argument
//
u3_noun *top, *don;
{
don = u3to(u3_noun, u3R->cap_p + off_ys);
u3R->cap_p += mov_ys;
top = u3to(u3_noun, u3R->cap_p + off_ys);
*top = a;
}
while ( top != don ) {
// visit an atom, then pop the stack
//
if ( c3y == u3a_is_atom(a) ) {
pat_f(a, ptr_v);
u3R->cap_p -= mov_ys;
top = u3to(u3_noun, u3R->cap_p + off_ys);
}
// vist a cell, if c3n, pop the stack
//
else if ( c3n == cel_f(a, ptr_v) ) {
u3R->cap_p -= mov_ys;
top = u3to(u3_noun, u3R->cap_p + off_ys);
}
// otherwise, push the tail and continue into the head
//
else {
*top = u3t(a);
u3R->cap_p += mov_ys;
if ( c3y == nor_o ) {
if( !(u3R->cap_p > u3R->hat_p) ) {
u3m_bail(c3__meme);
}
}
else {
if( !(u3R->cap_p < u3R->hat_p) ) {
u3m_bail(c3__meme);
}
}
top = u3to(u3_noun, u3R->cap_p + off_ys);
*top = u3h(a);
}
a = *top;
}
}
/* u3a_walk_fore_unsafe(): u3a_walk_fore(), without overflow checks
*/
void
u3a_walk_fore_unsafe(u3_noun a,
void* ptr_v,
void (*pat_f)(u3_atom, void*),
c3_o (*cel_f)(u3_noun, void*))
{
// initialize signed stack offsets (relative to N or S road)
//
c3_ys mov_ys, off_ys;
{
c3_y wis_y = c3_wiseof(u3_noun);
c3_o nor_o = u3a_is_north(u3R);
mov_ys = ( c3y == nor_o ? -wis_y : wis_y );
off_ys = ( c3y == nor_o ? 0 : -wis_y );
}
// set stack root, push argument
//
u3_noun *top, *don;
{
don = u3to(u3_noun, u3R->cap_p + off_ys);
u3R->cap_p += mov_ys;
top = u3to(u3_noun, u3R->cap_p + off_ys);
*top = a;
}
while ( top != don ) {
// visit an atom, then pop the stack
//
if ( c3y == u3a_is_atom(a) ) {
pat_f(a, ptr_v);
u3R->cap_p -= mov_ys;
top = u3to(u3_noun, u3R->cap_p + off_ys);
}
// vist a cell, if c3n, pop the stack
//
else if ( c3n == cel_f(a, ptr_v) ) {
u3R->cap_p -= mov_ys;
top = u3to(u3_noun, u3R->cap_p + off_ys);
}
// otherwise, push the tail and continue into the head
//
else {
*top = u3t(a);
u3R->cap_p += mov_ys;
top = u3to(u3_noun, u3R->cap_p + off_ys);
*top = u3h(a);
}
a = *top;
}
}

View File

@ -878,3 +878,117 @@ u3e_live(c3_o nuu_o, c3_c* dir_c)
}
return nuu_o;
}
static c3_o
_ce_image_move(u3e_image* img_u, c3_o bak_o)
{
c3_c old_c[8193];
c3_c new_c[8193];
snprintf(old_c, 8192, "%s/.urb/chk/%s.bin", u3P.dir_c, img_u->nam_c);
snprintf(new_c, 8192, "%s.bak", old_c);
c3_i ret_i;
if ( c3y == bak_o ) {
ret_i = rename(old_c, new_c);
}
else {
ret_i = rename(new_c, old_c);
}
if ( 0 != ret_i ) {
u3l_log("loom: %s %s failed: %s\r\n", ( c3y == bak_o ) ? "hold" : "fall",
img_u->nam_c, strerror(errno));
return c3n;
}
return c3y;
}
/* u3e_hold(): backup memory images
*/
c3_o
u3e_hold(void)
{
if ( (c3n == _ce_image_move(&u3P.nor_u, c3y)) ||
(c3n == _ce_image_move(&u3P.sou_u, c3y)) )
{
return c3n;
}
// XX sync directory
return c3y;
}
static c3_o
_ce_image_drop(u3e_image* img_u)
{
c3_c pat_c[8193];
snprintf(pat_c, 8192, "%s/.urb/chk/%s.bin.bak", u3P.dir_c, img_u->nam_c);
if ( 0 != unlink(pat_c) ) {
u3l_log("loom: drop %s failed: %s\r\n", img_u->nam_c, strerror(errno));
return c3n;
}
return c3y;
}
/* u3e_drop(): remove backed-up memory images
*/
c3_o
u3e_drop(void)
{
if ( (c3n == _ce_image_drop(&u3P.nor_u)) ||
(c3n == _ce_image_drop(&u3P.sou_u)) )
{
return c3n;
}
return c3y;
}
/* u3e_fall(): restore memory images
*/
c3_o
u3e_fall(void)
{
if ( (c3n == _ce_image_move(&u3P.nor_u, c3n)) ||
(c3n == _ce_image_move(&u3P.sou_u, c3n)) )
{
return c3n;
}
// XX sync directory
return c3y;
}
/* u3e_wipe(): discard memory images
*/
c3_o
u3e_wipe(void)
{
// XX ensure no patch files are present
if ( 0 != ftruncate(u3P.nor_u.fid_i, 0) ) {
u3l_log("loom: wipe %s failed: %s\r\n", u3P.nor_u.nam_c, strerror(errno));
return c3n;
}
if ( 0 != ftruncate(u3P.sou_u.fid_i, 0) ) {
u3l_log("loom: wipe %s failed: %s\r\n", u3P.sou_u.nam_c, strerror(errno));
return c3n;
}
c3_sync(u3P.nor_u.fid_i);
c3_sync(u3P.sou_u.fid_i);
close(u3P.nor_u.fid_i);
close(u3P.sou_u.fid_i);
// XX sync directory
return c3y;
}

View File

@ -126,7 +126,7 @@ _cj_bash(u3_noun bat)
c3_y* fat_y;
c3_y dig_y[32];
wor_w = u3qe_jam_buf(bat, &bit_w);
wor_w = u3s_jam_fib(bat, &bit_w);
met_w = bit_w >> 3;
if ( bit_w != met_w << 3 ) {
++met_w;
@ -2191,6 +2191,33 @@ u3j_ream(void)
u3h_walk(u3R->jed.han_p, _cj_ream_hank);
}
/* u3j_stay(): extract cold state
*/
u3_noun
u3j_stay(void)
{
u3_noun rel = u3_nul;
c3_assert(u3R == &(u3H->rod_u));
u3h_walk_with(u3R->jed.cod_p, _cj_warm_tap, &rel);
return rel;
}
/* u3j_load(): inject cold state
*/
void
u3j_load(u3_noun rel)
{
u3_noun ler = rel;
u3_noun lor;
while ( u3_nul != ler ) {
u3x_cell(ler, &lor, &ler);
u3h_put(u3R->jed.cod_p, u3h(lor), u3k(u3t(lor)));
}
u3z(rel);
}
/* _cj_fink_mark(): mark a u3j_fink for gc.
*/
static c3_w

View File

@ -1659,8 +1659,6 @@ u3m_boot(c3_c* dir_c)
/* Basic initialization.
*/
memset(u3A, 0, sizeof(*u3A));
u3A->our = u3_none;
return 0;
}
}
@ -1692,11 +1690,124 @@ u3m_boot_lite(void)
/* Basic initialization.
*/
memset(u3A, 0, sizeof(*u3A));
u3A->our = u3_none;
return 0;
}
/* u3m_rock_stay(): jam state into [dir_c] at [evt_d]
*/
c3_o
u3m_rock_stay(c3_c* dir_c, c3_d evt_d)
{
c3_c nam_c[8193];
snprintf(nam_c, 8192, "%s", dir_c);
mkdir(nam_c, 0700);
snprintf(nam_c, 8192, "%s/.urb", dir_c);
mkdir(nam_c, 0700);
snprintf(nam_c, 8192, "%s/.urb/roc", dir_c);
mkdir(nam_c, 0700);
snprintf(nam_c, 8192, "%s/.urb/roc/%" PRIu64 ".jam", dir_c, evt_d);
{
u3_noun dat = u3nt(c3__fast, u3k(u3A->roc), u3j_stay());
c3_o ret_o = u3s_jam_file(dat, nam_c);
u3z(dat);
return ret_o;
}
}
/* u3m_rock_load(): load state from [dir_c] at [evt_d]
*/
c3_o
u3m_rock_load(c3_c* dir_c, c3_d evt_d)
{
c3_c nam_c[8193];
snprintf(nam_c, 8192, "%s/.urb/roc/%" PRIu64 ".jam", dir_c, evt_d);
{
u3_noun dat;
{
// XX u3m_file bails, but we'd prefer to return errors
//
u3_noun fil = u3m_file(nam_c);
u3a_print_memory(stderr, "rock: load", u3r_met(3, fil));
u3_noun pro = u3m_soft(0, u3ke_cue, fil);
if ( u3_blip != u3h(pro) ) {
fprintf(stderr, "rock: unable to cue %s\r\n", nam_c);
u3z(pro);
return c3n;
}
else {
dat = u3k(u3t(pro));
u3z(pro);
}
}
{
u3_noun roc, rel;
if ( u3r_pq(dat, c3__fast, &roc, &rel) ) {
u3z(dat);
return c3n;
}
u3A->roc = u3k(roc);
u3j_load(u3k(rel));
}
u3z(dat);
}
u3A->ent_d = evt_d;
u3j_ream();
u3n_ream();
return c3y;
}
/* u3m_rock_drop(): delete saved state from [dir_c] at [evt_d]
*/
c3_o
u3m_rock_drop(c3_c* dir_c, c3_d evt_d)
{
c3_c nam_c[8193];
snprintf(nam_c, 8192, "%s/.urb/roc/%" PRIu64 ".jam", dir_c, evt_d);
if ( 0 != unlink(nam_c) ) {
u3l_log("rock: drop %s failed: %s\r\n", nam_c, strerror(errno));
return c3n;
}
return c3y;
}
/* u3m_wipe(): purge and reinitialize loom, with checkpointing
*/
void
u3m_wipe(void)
{
// clear page flags
//
memset((void*)u3P.dit_w, 0, u3a_pages >> 3);
// reinitialize checkpoint system
//
// NB: callers must first u3e_hold() or u3e_wipe()
//
u3e_live(c3n, u3P.dir_c);
// reinitialize loom
//
u3m_pave(c3y, c3n);
// reinitialize jets
//
u3j_boot(c3y);
}
/* u3m_reclaim: clear persistent caches to reclaim memory
*/
void

728
pkg/urbit/noun/serial.c Normal file
View File

@ -0,0 +1,728 @@
/* noun/serial.c
**
*/
#include <errno.h>
#include <fcntl.h>
#include "all.h"
/* _cs_met0_w(): safe bitwidth for any c3_w
*/
static inline c3_w
_cs_met0_w(c3_w wid_w) {
return ( wid_w >> 31 ) ? 32 : u3r_met(0, wid_w);
}
/* _cs_jam_buf: struct for tracking the fibonacci-allocated jam of a noun
*/
struct _cs_jam_fib {
u3p(u3h_root) har_p;
c3_w a_w;
c3_w b_w;
c3_w bit_w;
c3_w* buf_w;
};
/* _cs_jam_fib_grow(): reallocate buffer with fibonacci growth
*/
static void
_cs_jam_fib_grow(struct _cs_jam_fib* fib_u, c3_w mor_w)
{
c3_w wan_w = fib_u->bit_w + mor_w;
// check for c3_w overflow
//
if ( wan_w < mor_w ) {
u3m_bail(c3__fail);
}
if ( wan_w > fib_u->a_w ) {
c3_w old_w, new_w, c_w = 0;
old_w = fib_u->a_w >> 5;
if ( (old_w << 5) != fib_u->a_w ) {
++old_w;
}
// fibonacci growth
//
while ( c_w < wan_w ) {
c_w = fib_u->a_w + fib_u->b_w;
fib_u->b_w = fib_u->a_w;
fib_u->a_w = c_w;
}
new_w = c_w >> 5;
if ( (new_w << 5) != c_w ) {
++new_w;
}
fib_u->buf_w = u3a_wealloc(fib_u->buf_w, new_w);
memset(fib_u->buf_w + old_w, 0, (new_w - old_w) * sizeof(c3_w));
}
}
/* _cs_jam_fib_chop(): chop [met_w] bits of [a] into [fib_u]
*/
static void
_cs_jam_fib_chop(struct _cs_jam_fib* fib_u, c3_w met_w, u3_noun a)
{
c3_w bit_w = fib_u->bit_w;
_cs_jam_fib_grow(fib_u, met_w);
u3r_chop(0, 0, met_w, bit_w, fib_u->buf_w, a);
fib_u->bit_w += met_w;
}
/* _cs_jam_fib_mat(): length-prefixed encode (mat) [a] into [fib_u]
*/
static void
_cs_jam_fib_mat(struct _cs_jam_fib* fib_u, u3_noun a)
{
if ( 0 == a ) {
_cs_jam_fib_chop(fib_u, 1, 1);
}
else {
c3_w a_w = u3r_met(0, a);
c3_w b_w = _cs_met0_w(a_w);
_cs_jam_fib_chop(fib_u, b_w+1, 1 << b_w);
_cs_jam_fib_chop(fib_u, b_w-1, a_w & ((1 << (b_w-1)) - 1));
_cs_jam_fib_chop(fib_u, a_w, a);
}
}
/* _cs_jam_fib_atom_cb(): encode atom or backref
*/
static void
_cs_jam_fib_atom_cb(u3_atom a, void* ptr_v)
{
struct _cs_jam_fib* fib_u = ptr_v;
u3_weak b = u3h_git(fib_u->har_p, a);
// if [a] has no backref, encode atom and put cursor into [har_p]
//
if ( u3_none == b ) {
u3h_put(fib_u->har_p, a, u3i_words(1, &(fib_u->bit_w)));
_cs_jam_fib_chop(fib_u, 1, 0);
_cs_jam_fib_mat(fib_u, a);
}
else {
c3_w a_w = u3r_met(0, a);
c3_w b_w = u3r_met(0, b);
// if [a] is smaller than the backref, encode atom
//
if ( a_w <= b_w ) {
_cs_jam_fib_chop(fib_u, 1, 0);
_cs_jam_fib_mat(fib_u, a);
}
// otherwise, encode backref
//
else {
_cs_jam_fib_chop(fib_u, 2, 3);
_cs_jam_fib_mat(fib_u, b);
}
}
}
/* _cs_jam_fib_cell_cb(): encode cell or backref
*/
static c3_o
_cs_jam_fib_cell_cb(u3_noun a, void* ptr_v)
{
struct _cs_jam_fib* fib_u = ptr_v;
u3_weak b = u3h_git(fib_u->har_p, a);
// if [a] has no backref, encode cell and put cursor into [har_p]
//
if ( u3_none == b ) {
u3h_put(fib_u->har_p, a, u3i_words(1, &(fib_u->bit_w)));
_cs_jam_fib_chop(fib_u, 2, 1);
return c3y;
}
// otherwise, encode backref and shortcircuit traversal
//
else {
_cs_jam_fib_chop(fib_u, 2, 3);
_cs_jam_fib_mat(fib_u, b);
return c3n;
}
}
/* u3s_jam_fib(): jam without atom allocation.
**
** returns atom-suitable words, and *bit_w will have
** the length (in bits). return should be freed with u3a_wfree().
*/
c3_w*
u3s_jam_fib(u3_noun a, c3_w* bit_w)
{
struct _cs_jam_fib fib_u;
fib_u.har_p = u3h_new();
// fib(12) is small enough to be reasonably fast to allocate.
//
fib_u.a_w = 144;
// fib(11) is needed to get fib(13).
//
fib_u.b_w = 89;
fib_u.bit_w = 0;
{
c3_w len_w = fib_u.a_w >> 5;
if ( (len_w << 5) != fib_u.a_w ) {
++len_w;
}
fib_u.buf_w = u3a_walloc(len_w);
memset(fib_u.buf_w, 0, len_w * sizeof(c3_w));
}
// as this is a hot path, we unsafely elide overflow checks
//
// a page-fault overflow detection system is urgently needed ...
//
u3a_walk_fore_unsafe(a, &fib_u, _cs_jam_fib_atom_cb,
_cs_jam_fib_cell_cb);
*bit_w = fib_u.bit_w;
u3h_free(fib_u.har_p);
return fib_u.buf_w;
}
/* _cs_jam_met_mat(): the jam bitwidth of an atom of bitwidth [wid_w]
**
** equivalent to (head (rub a))
*/
static c3_d
_cs_jam_met_mat(c3_w wid_w)
{
return ( 0 == wid_w ) ? 1ULL :
(c3_d)wid_w + (2ULL * (c3_d)_cs_met0_w(wid_w));
}
/* _cs_jam_met: struct for tracking the jam bitwidth of a noun
*/
struct _cs_jam_met {
u3p(u3h_root) har_p;
u3p(u3h_root) bak_p;
c3_d len_d;
};
/* _cs_jam_met_atom_cb(): bitwidth of atom or backref encoding for [a]
*/
static void
_cs_jam_met_atom_cb(u3_atom a, void* ptr_v)
{
struct _cs_jam_met* met_u = ptr_v;
c3_w a_w = u3r_met(0, a);
u3_weak b = u3h_git(met_u->har_p, a);
// if we haven't haven't seen [a], put cursor into [har_p]
//
if ( u3_none == b ) {
u3h_put(met_u->har_p, a, u3i_chubs(1, &(met_u->len_d)));
met_u->len_d += 1ULL + _cs_jam_met_mat(a_w);
}
else {
c3_w b_w = u3r_met(0, b);
// if [a] is smaller than a backref, use directly
//
if ( a_w <= b_w ) {
met_u->len_d += 1ULL + _cs_jam_met_mat(a_w);
}
// otherwise, save backref
//
else {
u3h_put(met_u->bak_p, a, u3k(b));
met_u->len_d += 2ULL + _cs_jam_met_mat(b_w);
}
}
}
/* _cs_jam_met_cell_cb(): bitwidth of cell or backref encoding for [a]
*/
static c3_o
_cs_jam_met_cell_cb(u3_noun a, void* ptr_v)
{
struct _cs_jam_met* met_u = ptr_v;
u3_weak b = u3h_git(met_u->har_p, a);
// if we haven't haven't seen [a], put cursor into [har_p]
//
if ( u3_none == b ) {
u3h_put(met_u->har_p, a, u3i_chubs(1, &(met_u->len_d)));
met_u->len_d += 2ULL;
return c3y;
}
// otherwise, save backref and shortcircuit traversal
//
else {
c3_w b_w = u3r_met(0, b);
u3h_put(met_u->bak_p, a, u3k(b));
met_u->len_d += 2ULL + _cs_jam_met_mat(b_w);
return c3n;
}
}
/* u3s_jam_met(): measure a noun for jam, calculating backrefs
*/
c3_d
u3s_jam_met(u3_noun a, u3p(u3h_root)* bak_p)
{
struct _cs_jam_met met_u;
met_u.har_p = u3h_new();
met_u.bak_p = u3h_new();
met_u.len_d = 0ULL;
u3a_walk_fore(a, &met_u, _cs_jam_met_atom_cb,
_cs_jam_met_cell_cb);
u3h_free(met_u.har_p);
*bak_p = met_u.bak_p;
return met_u.len_d;
}
/* _cs_jam_buf: struct for tracking the pre-measured jam of a noun
*/
struct _cs_jam_buf {
u3p(u3h_root) bak_p;
c3_w bit_w;
c3_w* buf_w;
};
/* _cs_jam_buf_chop(): chop [met_w] bits of [a] into [buf_u]
*/
static void
_cs_jam_buf_chop(struct _cs_jam_buf* buf_u, c3_w met_w, u3_noun a)
{
u3r_chop(0, 0, met_w, buf_u->bit_w, buf_u->buf_w, a);
buf_u->bit_w += met_w;
}
/* _cs_jam_buf_mat(): length-prefixed encode (mat) [a] into [buf_u]
*/
static void
_cs_jam_buf_mat(struct _cs_jam_buf* buf_u, u3_atom a)
{
if ( 0 == a ) {
_cs_jam_buf_chop(buf_u, 1, 1);
}
else {
c3_w a_w = u3r_met(0, a);
c3_w b_w = _cs_met0_w(a_w);
_cs_jam_buf_chop(buf_u, b_w+1, 1 << b_w);
_cs_jam_buf_chop(buf_u, b_w-1, a_w & ((1 << (b_w-1)) - 1));
_cs_jam_buf_chop(buf_u, a_w, a);
}
}
/* _cs_jam_buf_atom_cb(): encode atom or backref
*/
static void
_cs_jam_buf_atom_cb(u3_atom a, void* ptr_v)
{
struct _cs_jam_buf* buf_u = ptr_v;
u3_weak b = u3h_git(buf_u->bak_p, a);
// if [a] has no backref (or this is the referent), encode atom
//
if ( (u3_none == b) ||
(u3r_word(0, b) == buf_u->bit_w) )
{
_cs_jam_buf_chop(buf_u, 1, 0);
_cs_jam_buf_mat(buf_u, a);
}
else {
c3_w a_w = u3r_met(0, a);
c3_w b_w = u3r_met(0, b);
// if [a] is smaller than the backref, encode atom
//
if ( a_w <= b_w ) {
_cs_jam_buf_chop(buf_u, 1, 0);
_cs_jam_buf_mat(buf_u, a);
}
// otherwise, encode backref
//
else {
_cs_jam_buf_chop(buf_u, 2, 3);
_cs_jam_buf_mat(buf_u, b);
}
}
}
/* _cs_jam_buf_cell_cb(): encode cell or backref
*/
static c3_o
_cs_jam_buf_cell_cb(u3_noun a, void* ptr_v)
{
struct _cs_jam_buf* buf_u = ptr_v;
u3_weak b = u3h_git(buf_u->bak_p, a);
// if [a] has no backref (or this is the referent), encode cell
//
if ( (u3_none == b) ||
(u3r_word(0, b) == buf_u->bit_w) )
{
_cs_jam_buf_chop(buf_u, 2, 1);
return c3y;
}
// otherwise, encode backref and shortcircuit traversal
//
else {
_cs_jam_buf_chop(buf_u, 2, 3);
_cs_jam_buf_mat(buf_u, b);
return c3n;
}
}
/* u3s_jam_buf(): jam [a] into pre-allocated [buf_w], without allocation
**
** using backrefs in [bak_p], as computed by u3s_jam_met()
** NB [buf_w] must be pre-allocated with sufficient space
**
** XX can only encode up to c3_w bits, due to use of chop
*/
void
u3s_jam_buf(u3_noun a, u3p(u3h_root) bak_p, c3_w* buf_w)
{
struct _cs_jam_buf buf_u;
buf_u.bak_p = bak_p;
buf_u.buf_w = buf_w;
buf_u.bit_w = 0;
// this is in fact safe under normal usage, as
// the stack will have been checked in u3s_jam_met()
//
u3a_walk_fore_unsafe(a, &buf_u, _cs_jam_buf_atom_cb,
_cs_jam_buf_cell_cb);
}
/* u3s_jam_file(): jam [a] into a file, overwriting
*/
c3_o
u3s_jam_file(u3_noun a, c3_c* pas_c)
{
u3p(u3h_root) bak_p;
c3_i fid_i = open(pas_c, O_RDWR | O_CREAT | O_TRUNC, 0644);
c3_w byt_w, wor_w, len_w;
if ( fid_i < 0 ) {
fprintf(stderr, "jam: open %s: %s\r\n", pas_c, strerror(errno));
return c3n;
}
{
c3_d len_d = u3s_jam_met(a, &bak_p);
if ( len_d > 0xffffffffULL ) {
fprintf(stderr, "jam: overflow c3_w: %" PRIu64 "\r\n", len_d);
u3h_free(bak_p);
return c3n;
}
// length in bytes a la u3i_bytes
//
byt_w = (c3_w)(len_d >> 3ULL);
if ( len_d > (c3_d)(byt_w << 3) ) {
byt_w++;
}
// length in words
//
wor_w = (c3_w)(len_d >> 5ULL);
if ( len_d > (c3_d)(wor_w << 5) ) {
wor_w++;
}
// byte-length of word-length
//
len_w = 4 * wor_w;
}
// grow [fid_i] to [len_w]
//
if ( 0 != ftruncate(fid_i, len_w) ) {
fprintf(stderr, "jam: ftruncate grow %s: %s\r\n", pas_c, strerror(errno));
goto error;
}
// mmap [fid_i], jam into it, sync, and unmap
//
{
c3_w* buf_w;
void* ptr_v = mmap(0, len_w, PROT_READ|PROT_WRITE, MAP_SHARED, fid_i, 0);
if ( MAP_FAILED == ptr_v ) {
fprintf(stderr, "jam: mmap %s: %s\r\n", pas_c, strerror(errno));
goto error;
}
buf_w = ptr_v;
u3s_jam_buf(a, bak_p, buf_w);
if ( 0 != msync(ptr_v, len_w, MS_SYNC) ) {
fprintf(stderr, "jam: msync %s: %s\r\n", pas_c, strerror(errno));
// XX ignore return?
//
munmap(ptr_v, len_w);
goto error;
}
if ( 0 != munmap(ptr_v, len_w) ) {
fprintf(stderr, "jam: munmap %s: %s\r\n", pas_c, strerror(errno));
// XX fatal error?
//
goto error;
}
}
// shrink [fid_i] to [byt_w]
//
if ( 0 != ftruncate(fid_i, byt_w) ) {
fprintf(stderr, "jam: ftruncate shrink %s: %s\r\n", pas_c, strerror(errno));
goto error;
}
{
close(fid_i);
u3h_free(bak_p);
return c3y;
}
error: {
close(fid_i);
unlink(pas_c);
u3h_free(bak_p);
return c3n;
}
}
#define CUE_ROOT 0
#define CUE_HEAD 1
#define CUE_TAIL 2
// stack frame for recording head vs tail iteration
//
// In Hoon, this structure would be as follows:
//
// $% [%root ~]
// [%head cell-cursor=@]
// [%tail cell-cursor=@ hed-width=@ hed-value=*]
// ==
//
typedef struct _cs_cue_frame
{
c3_y tag_y;
u3_atom cur;
u3_atom wid;
u3_noun hed;
} cueframe;
/* _cs_cue_push(): construct a cueframe and push it onto the road stack.
*/
static inline void
_cs_cue_push(c3_ys mov,
c3_ys off,
c3_y tag_y,
u3_atom cur,
u3_atom wid,
u3_noun hed)
{
u3R->cap_p += mov;
// ensure we haven't overflowed the stack
// (off==0 means we're on a north road)
//
if ( 0 == off ) {
if( !(u3R->cap_p > u3R->hat_p) ) {
u3m_bail(c3__meme);
}
}
else {
if( !(u3R->cap_p < u3R->hat_p) ) {
u3m_bail(c3__meme);
}
}
cueframe* fam_u = u3to(cueframe, u3R->cap_p + off);
fam_u->tag_y = tag_y;
fam_u->cur = cur;
fam_u->wid = wid;
fam_u->hed = hed;
}
/* _cs_cue_pop(): pop a cueframe off the road stack and return it.
*/
static inline cueframe
_cs_cue_pop(c3_ys mov, c3_ys off)
{
cueframe* fam_u = u3to(cueframe, u3R->cap_p + off);
u3R->cap_p -= mov;
return *fam_u;
}
/* u3s_cue(): cue [a]
*/
u3_noun
u3s_cue(u3_atom a)
{
// initialize signed stack offsets (relative to north/south road)
//
c3_ys mov, off;
{
c3_y wis_y = c3_wiseof(cueframe);
c3_o nor_o = u3a_is_north(u3R);
mov = ( c3y == nor_o ? -wis_y : wis_y );
off = ( c3y == nor_o ? 0 : -wis_y );
}
// initialize a hash table for dereferencing backrefs
//
u3p(u3h_root) har_p = u3h_new();
// stash the current stack post
//
u3p(cueframe) cap_p = u3R->cap_p;
// push the (only) ROOT stack frame (our termination condition)
//
_cs_cue_push(mov, off, CUE_ROOT, 0, 0, 0);
// initialize cursor to bit-position 0
//
u3_atom cur = 0;
// the bitwidth and product from reading at cursor
//
u3_atom wid, pro;
// read from atom at cursor
//
// TRANSFER .cur
//
advance: {
// read tag bit at cur
//
c3_y tag_y = u3qc_cut(0, cur, 1, a);
// low bit unset, (1 + cur) points to an atom
//
// produce atom and the width we read
//
if ( 0 == tag_y ) {
u3_noun bur;
{
u3_noun x = u3qa_inc(cur);
bur = u3qe_rub(x, a);
u3z(x);
}
pro = u3k(u3t(bur));
u3h_put(har_p, cur, u3k(pro));
wid = u3qa_inc(u3h(bur));
u3z(bur);
goto retreat;
}
else {
// read tag bit at (1 + cur)
//
{
u3_noun x = u3qa_inc(cur);
tag_y = u3qc_cut(0, x, 1, a);
u3z(x);
}
// next bit set, (2 + cur) points to a backref
//
// produce referenced value and the width we read
//
if ( 1 == tag_y ) {
u3_noun bur;
{
u3_noun x = u3ka_add(2, cur);
bur = u3qe_rub(x, a);
u3z(x);
}
pro = u3h_get(har_p, u3k(u3t(bur)));
if ( u3_none == pro ) {
return u3m_bail(c3__exit);
}
wid = u3qa_add(2, u3h(bur));
u3z(bur);
goto retreat;
}
// next bit unset, (2 + cur) points to the head of a cell
//
// push a frame to mark HEAD recursion and read the head
//
else {
_cs_cue_push(mov, off, CUE_HEAD, cur, 0, 0);
cur = u3qa_add(2, cur);
goto advance;
}
}
}
// consume: popped stack frame, .wid and .pro from above.
//
// TRANSFER .wid, .pro, and contents of .fam_u
// (.cur is in scope, but we have already lost our reference to it)
//
retreat: {
cueframe fam_u = _cs_cue_pop(mov, off);
switch ( fam_u.tag_y ) {
default: {
c3_assert(0);
}
// fam_u is our stack root, we're done.
//
case CUE_ROOT: {
break;
}
// .wid and .pro are the head of the cell at fam_u.cur.
// save them (and the cell cursor) in a TAIL frame,
// set the cursor to the tail and read there.
//
case CUE_HEAD: {
_cs_cue_push(mov, off, CUE_TAIL, fam_u.cur, wid, pro);
cur = u3ka_add(2, u3qa_add(wid, fam_u.cur));
goto advance;
}
// .wid and .pro are the tail of the cell at fam_u.cur,
// construct the cell, memoize it, and produce it along with
// its total width (as if it were a read from above).
//
case CUE_TAIL: {
pro = u3nc(fam_u.hed, pro);
u3h_put(har_p, fam_u.cur, u3k(pro));
wid = u3ka_add(2, u3ka_add(wid, fam_u.wid));
goto retreat;
}
}
}
u3z(wid);
u3h_free(har_p);
// sanity check
//
c3_assert( u3R->cap_p == cap_p );
return pro;
}

View File

@ -254,25 +254,11 @@ _pier_db_load_commits(u3_pier* pir_u,
c3_d lav_d,
c3_d len_d)
{
if (lav_d == 1) {
// We are restarting from event 1. That means we need to set the ship from
// the log identity information.
u3_noun who, fak, len;
c3_o ret = u3_lmdb_read_identity(pir_u->log_u->db_u,
&who,
&fak,
&len);
if (ret == c3n) {
u3l_log("Failed to load identity for replay. Exiting...");
u3_pier_bail();
}
_pier_boot_set_ship(pir_u, u3k(who), u3k(fak));
pir_u->lif_d = u3r_chub(0, len);
u3z(who);
u3z(fak);
u3z(len);
if ( 1ULL == lav_d ) {
// We are replaying the entire event log, and must
// read the header to ensure that our %boot msg is correct.
//
_pier_db_read_header(pir_u);
}
c3_o ret = u3_lmdb_read_events(pir_u,
@ -508,14 +494,16 @@ _pier_work_boot(u3_pier* pir_u, c3_o sav_o)
c3_assert( 0 != pir_u->lif_d );
u3_noun who = u3i_chubs(2, pir_u->who_d);
u3_noun len = u3i_chubs(1, &pir_u->lif_d);
if ( c3y == sav_o ) {
_pier_db_write_header(pir_u, u3k(who), pir_u->fak_o, u3k(len));
_pier_db_write_header(pir_u,
u3i_chubs(2, pir_u->who_d),
pir_u->fak_o,
u3k(len));
}
u3_noun msg = u3nq(c3__boot, who, pir_u->fak_o, len);
u3_noun msg = u3nc(c3__boot, len);
u3_atom mat = u3ke_jam(msg);
u3_newt_write(&god_u->inn_u, mat, 0);
}
@ -910,38 +898,22 @@ _pier_work_poke(void* vod_p,
// the worker process starts with a %play task,
// which tells us where to start playback
// (and who we are, if it knows) XX remove in favor of event-log header
//
case c3__play: {
c3_d lav_d;
c3_l mug_l;
if ( (c3n == u3r_qual(u3t(jar), 0, &p_jar, &q_jar, &r_jar)) ||
if ( (c3n == u3r_trel(jar, 0, &p_jar, &q_jar)) ||
(c3n == u3ud(p_jar)) ||
(u3r_met(6, p_jar) != 1) ||
(c3n == u3ud(q_jar)) ||
(u3r_met(5, p_jar) != 1) ||
(c3n == u3du(r_jar)) ||
(c3n == u3ud(u3h(r_jar))) ||
((c3y != u3t(r_jar)) && (c3n != u3t(r_jar))) )
(1 < u3r_met(5, q_jar)) )
{
if ( u3_nul == u3t(jar) ) {
lav_d = 1ULL;
mug_l = 0;
}
else {
goto error;
}
goto error;
}
if ( u3_nul != u3t(jar) ) {
lav_d = u3r_chub(0, p_jar);
mug_l = u3r_word(0, q_jar);
// single-home
//
_pier_boot_set_ship(pir_u, u3k(u3h(r_jar)), u3k(u3t(r_jar)));
}
lav_d = u3r_chub(0, p_jar);
mug_l = u3r_word(0, q_jar);
_pier_work_play(pir_u, lav_d, mug_l);
break;
@ -1056,6 +1028,7 @@ _pier_work_poke(void* vod_p,
return;
error: {
u3m_p("jar", jar);
u3z(jar); u3z(mat);
_pier_work_bail(0, "bad jar");
}
@ -1656,10 +1629,14 @@ _pier_boot_ready(u3_pier* pir_u)
//
_pier_work_boot(pir_u, c3n);
}
else if ( (1ULL + god_u->dun_d) == log_u->com_d ) {
fprintf(stderr, "pier: replaying event %" PRIu64 "\r\n",
log_u->com_d);
}
else {
fprintf(stderr, "pier: replaying events %" PRIu64
" through %" PRIu64 "\r\n",
god_u->dun_d,
(c3_d)(1ULL + god_u->dun_d),
log_u->com_d);
}

View File

@ -129,7 +129,7 @@ _reck_kick_term(u3_pier* pir_u, u3_noun pox, c3_l tid_l, u3_noun fav)
u3z(pox); u3z(fav); return c3y;
} break;
case c3__mass: p_fav = u3t(fav);
case c3__mass:
{
u3z(pox); u3z(fav);
@ -138,6 +138,14 @@ _reck_kick_term(u3_pier* pir_u, u3_noun pox, c3_l tid_l, u3_noun fav)
uv_timer_start(&u3K.tim_u, (uv_timer_cb)u3_daemon_grab, 0, 0);
return c3y;
} break;
// ignore pack (processed in worker)
//
case c3__pack:
{
u3z(pox); u3z(fav);
return c3y;
} break;
}
c3_assert(!"not reached"); return 0;
}

View File

@ -46,17 +46,10 @@
+$ plea
$% :: status on startup
::
$: %play
$= p
:: ~ if no snapshot
::
%- unit
:: p: event number expected
:: q: mug of kernel
:: r: identity, fake flag
::
[p=@ q=@ r=[our=@p fak=?]]
==
:: p: event number expected
:: q: mug of kernel (or 0)
::
[%play p=@ q=@]
:: event executed unchanged (in response to %work)
::
$: %done
@ -97,11 +90,9 @@
+$ writ
$% :: prepare to boot
::
:: p: identity
:: q: fake?
:: r: number of boot formulas
:: p: length of lifecycle sequence
::
[%boot p=@p q=? r=@]
[%boot p=@]
:: exit immediately
::
:: p: exit code
@ -319,6 +310,62 @@ _worker_grab(u3_noun sac, u3_noun ovo, u3_noun vir)
}
}
/* _worker_static_grab(): garbage collect, checking for profiling. RETAIN.
*/
static void
_worker_static_grab(void)
{
c3_assert( u3R == &(u3H->rod_u) );
fprintf(stderr, "work: measuring memory:\r\n");
u3a_print_memory(stderr, "total marked", u3m_mark(stderr));
u3a_print_memory(stderr, "free lists", u3a_idle(u3R));
u3a_print_memory(stderr, "sweep", u3a_sweep());
fprintf(stderr, "\r\n");
fflush(stderr);
}
/* _worker_pack(): deduplicate and compact memory
*/
static void
_worker_pack(void)
{
_worker_static_grab();
u3l_log("work: compacting loom\r\n");
if ( c3n == u3m_rock_stay(u3V.dir_c, u3V.dun_d) ) {
u3l_log("work: unable to jam state\r\n");
return;
}
if ( c3n == u3e_hold() ) {
u3l_log("work: unable to backup checkpoint\r\n");
return;
}
u3m_wipe();
if ( c3n == u3m_rock_load(u3V.dir_c, u3V.dun_d) ) {
u3l_log("work: compaction failed, restoring checkpoint\r\n");
if ( c3n == u3e_fall() ) {
fprintf(stderr, "work: unable to restore checkpoint\r\n");
c3_assert(0);
}
}
if ( c3n == u3e_drop() ) {
u3l_log("work: warning: orphaned backup checkpoint file\r\n");
}
if ( c3n == u3m_rock_drop(u3V.dir_c, u3V.dun_d) ) {
u3l_log("work: warning: orphaned state file\r\n");
}
u3l_log("work: compacted loom\r\n");
_worker_static_grab();
}
/* _worker_fail(): failure stub.
*/
static void
@ -376,10 +423,14 @@ _worker_send_slog(u3_noun hod)
/* _worker_lame(): event failed, replace with error event.
*/
static void
_worker_lame(c3_d evt_d, u3_noun now, u3_noun ovo, u3_noun why, u3_noun tan)
_worker_lame(u3_noun now, u3_noun ovo, u3_noun why, u3_noun tan)
{
u3_noun rep;
u3_noun wir, tag, cad;
c3_o pac_o = c3n;
c3_d evt_d = u3V.sen_d;
u3V.sen_d = u3V.dun_d;
u3x_trel(ovo, &wir, &tag, &cad);
@ -425,22 +476,27 @@ _worker_lame(c3_d evt_d, u3_noun now, u3_noun ovo, u3_noun why, u3_noun tan)
rep = u3nc(u3k(wir), u3nt(c3__crud, u3k(tag), nat));
}
pac_o = _(c3__meme == why);
_worker_send_replace(evt_d, u3nc(now, rep));
u3z(ovo); u3z(why); u3z(tan);
// XX review, always pack on meme?
//
if ( c3y == pac_o ) {
_worker_pack();
}
}
/* _worker_sure(): event succeeded, report completion.
/* _worker_sure_feck(): event succeeded, send effects.
*/
static void
_worker_sure(u3_noun ovo, u3_noun vir, u3_noun cor)
_worker_sure_feck(u3_noun ovo, u3_noun vir, c3_w pre_w)
{
u3z(u3A->roc);
u3A->roc = cor;
u3A->ent_d = u3V.dun_d;
u3V.mug_l = u3r_mug(u3A->roc);
u3_noun sac = u3_nul;
c3_o pac_o = c3n;
c3_o rec_o = c3n;
// intercept |mass, observe |reset
//
@ -472,7 +528,13 @@ _worker_sure(u3_noun ovo, u3_noun vir, u3_noun cor)
// reclaim memory from persistent caches on |reset
//
if ( c3__vega == u3h(fec) ) {
u3m_reclaim();
rec_o = c3y;
}
// pack memory on |pack
//
if ( c3__pack == u3h(fec) ) {
pac_o = c3y;
}
riv = u3t(riv);
@ -480,12 +542,86 @@ _worker_sure(u3_noun ovo, u3_noun vir, u3_noun cor)
}
}
// after a successful event, we check for memory pressure.
//
// if we've exceeded either of two thresholds, we reclaim
// from our persistent caches, and notify the daemon
// (via a "fake" effect) that arvo should trim state
// (trusting that the daemon will enqueue an appropriate event).
// For future flexibility, the urgency of the notification is represented
// by a *decreasing* number: 0 is maximally urgent, 1 less so, &c.
//
// high-priority: 2^22 contiguous words remaining (~8 MB)
// low-priority: 2^27 contiguous words remaining (~536 MB)
// XX maybe use 2^23 (~16 MB) and 2^26 (~268 MB?
//
{
u3_noun pri = u3_none;
c3_w pos_w = u3a_open(u3R);
c3_w low_w = (1 << 27);
c3_w hig_w = (1 << 22);
if ( (pre_w > low_w) && !(pos_w > low_w) ) {
// XX set flag(s) in u3V so we don't repeat endlessly?
// XX pack here too?
//
pac_o = c3y;
rec_o = c3y;
pri = 1;
}
else if ( (pre_w > hig_w) && !(pos_w > hig_w) ) {
// XX we should probably jam/cue our entire state at this point
//
pac_o = c3y;
rec_o = c3y;
pri = 0;
}
// reclaim memory from persistent caches periodically
//
// XX this is a hack to work two things
// - bytecode caches grow rapidly and can't be simply capped
// - we don't make very effective use of our free lists
//
else {
rec_o = _(0 == (u3V.dun_d % 1000ULL));
}
// notify daemon of memory pressure via "fake" effect
//
if ( u3_none != pri ) {
u3_noun cad = u3nc(u3nt(u3_blip, c3__arvo, u3_nul),
u3nc(c3__trim, pri));
vir = u3nc(cad, vir);
}
}
if ( c3y == rec_o ) {
u3m_reclaim();
}
// XX this runs on replay too
//
_worker_grab(sac, ovo, vir);
_worker_send_complete(vir);
u3z(sac); u3z(ovo);
if ( c3y == pac_o ) {
_worker_pack();
}
}
/* _worker_sure_core(): event succeeded, save state.
*/
static void
_worker_sure_core(u3_noun cor)
{
u3V.dun_d = u3V.sen_d;
u3z(u3A->roc);
u3A->roc = cor;
u3A->ent_d = u3V.dun_d;
u3V.mug_l = u3r_mug(u3A->roc);
}
/* _worker_work_live(): apply event.
@ -539,7 +675,6 @@ _worker_work_live(c3_d evt_d, u3_noun job)
// event rejected
//
if ( u3_blip != u3h(gon) ) {
u3V.sen_d = u3V.dun_d;
// restore previous time
//
u3_noun nex = u3A->now;
@ -551,13 +686,11 @@ _worker_work_live(c3_d evt_d, u3_noun job)
u3k(ovo); u3k(why); u3k(tan);
u3z(gon); u3z(job);
_worker_lame(evt_d, nex, ovo, why, tan);
_worker_lame(nex, ovo, why, tan);
}
// event accepted
//
else {
c3_o rec_o = c3n;
// vir/(list ovum) list of effects
// cor/arvo arvo core
//
@ -567,66 +700,8 @@ _worker_work_live(c3_d evt_d, u3_noun job)
u3k(ovo); u3k(vir); u3k(cor);
u3z(gon); u3z(job); u3z(last_date);
u3V.dun_d = u3V.sen_d;
// after a successful event, we check for memory pressure.
//
// if we've exceeded either of two thresholds, we reclaim
// from our persistent caches, and notify the daemon
// (via a "fake" effect) that arvo should trim state
// (trusting that the daemon will enqueue an appropriate event).
// For future flexibility, the urgency of the notification is represented
// by a *decreasing* number: 0 is maximally urgent, 1 less so, &c.
//
// high-priority: 2^22 contiguous words remaining (~8 MB)
// low-priority: 2^27 contiguous words remaining (~536 MB)
// XX maybe use 2^23 (~16 MB) and 2^26 (~268 MB?
//
// XX refactor: we should measure memory after losing the old kernel
//
{
u3_noun pri = u3_none;
c3_w pos_w = u3a_open(u3R);
c3_w low_w = (1 << 27);
c3_w hig_w = (1 << 22);
if ( (pre_w > low_w) && !(pos_w > low_w) ) {
// XX set flag in u3V so we don't repeat endlessly?
//
rec_o = c3y;
pri = 1;
}
else if ( (pre_w > hig_w) && !(pos_w > hig_w) ) {
// XX we should probably jam/cue our entire state at this point
//
rec_o = c3y;
pri = 0;
}
// reclaim memory from persistent caches periodically
//
// XX this is a hack to work two things
// - bytecode caches grow rapidly and can't be simply capped
// - we don't make very effective use of our free lists
//
else {
rec_o = _(0 == (evt_d % 1000ULL));
}
// notify daemon of memory pressure via "fake" effect
//
if ( u3_none != pri ) {
u3_noun cad = u3nc(u3nt(u3_blip, c3__arvo, u3_nul),
u3nc(c3__trim, pri));
vir = u3nc(cad, vir);
}
}
_worker_sure(ovo, vir, cor);
if ( c3y == rec_o ) {
u3m_reclaim();
}
_worker_sure_core(cor);
_worker_sure_feck(ovo, vir, pre_w);
}
}
@ -773,13 +848,9 @@ _worker_poke_exit(c3_w cod_w) // exit code
/* _worker_poke_boot(): prepare to boot.
*/
static void
_worker_poke_boot(u3_noun who, u3_noun fak, c3_w len_w)
_worker_poke_boot(c3_w len_w)
{
c3_assert( u3_none == u3A->our );
c3_assert( 0 != len_w );
u3A->our = who;
u3A->fak = fak;
u3V.len_w = len_w;
}
@ -800,14 +871,10 @@ _worker_poke(void* vod_p, u3_noun mat)
}
case c3__boot: {
u3_noun who, fak, len;
c3_w len_w;
u3_noun len;
c3_w len_w;
if ( (c3n == u3r_qual(jar, 0, &who, &fak, &len)) ||
(c3n == u3ud(who)) ||
(1 < u3r_met(7, who)) ||
(c3n == u3ud(fak)) ||
(1 < u3r_met(0, fak)) ||
if ( (c3n == u3r_cell(jar, 0, &len)) ||
(c3n == u3ud(len)) ||
(1 < u3r_met(3, len)) )
{
@ -815,11 +882,8 @@ _worker_poke(void* vod_p, u3_noun mat)
}
len_w = u3r_word(0, len);
u3k(who);
u3k(fak);
u3z(jar);
return _worker_poke_boot(who, fak, len_w);
return _worker_poke_boot(len_w);
}
case c3__work: {
@ -894,50 +958,28 @@ _worker_poke(void* vod_p, u3_noun mat)
}
}
/* _worker_static_grab(): garbage collect, checking for profiling. RETAIN.
*/
static void
_worker_static_grab(void)
{
c3_assert( u3R == &(u3H->rod_u) );
fprintf(stderr, "work: measuring memory:\r\n");
u3a_print_memory(stderr, "total marked", u3m_mark(stderr));
u3a_print_memory(stderr, "free lists", u3a_idle(u3R));
u3a_print_memory(stderr, "sweep", u3a_sweep());
fprintf(stderr, "\r\n");
fflush(stderr);
}
/* u3_worker_boot(): send startup message to manager.
*/
void
u3_worker_boot(void)
{
c3_d nex_d = 1ULL;
u3_noun dat = u3_nul;
c3_d nex_d = 1ULL;
if ( u3_none != u3A->our ) {
// if a lifecycle sequence is needed, [len_w] will be set on %boot
//
u3V.len_w = 0;
if ( 0 != u3V.dun_d ) {
u3V.mug_l = u3r_mug(u3A->roc);
nex_d = u3V.dun_d + 1ULL;
dat = u3nc(u3_nul, u3nt(u3i_chubs(1, &nex_d),
u3V.mug_l,
u3nc(u3k(u3A->our), u3k(u3A->fak))));
// disable hashboard for fake ships
//
if ( c3y == u3A->fak ) {
u3C.wag_w |= u3o_hashless;
}
// no boot sequence expected
//
u3V.len_w = 0;
nex_d += u3V.dun_d;
}
else {
u3V.mug_l = 0;
}
u3l_log("work: play %" PRIu64 "\r\n", nex_d);
_worker_send(u3nc(c3__play, dat));
_worker_send(u3nt(c3__play, u3i_chubs(1, &nex_d), u3V.mug_l));
// measure/print static memory usage if < 1/2 of the loom is available
//