Merge branch 'retrofit-cash' into retrofit

This commit is contained in:
C. Guy Yarvin 2017-11-13 09:49:37 -08:00
commit 3d2ed11bb7
10 changed files with 605 additions and 189 deletions

0
.d/tests/.gitignore vendored Normal file
View File

1
.gitignore vendored
View File

@ -13,6 +13,7 @@
/outside/re2/obj
# build
/bin/urbit
/bin/test_hash
/vere.pkg
.tags
.etags

View File

@ -380,6 +380,19 @@ VERE_DFILES=$(VERE_OFILES:%.o=.d/%.d)
-include $(VERE_DFILES)
TEST_HASH_MAIN_FILE =\
tests/hashtable_tests.o
TEST_HASH_OFILES=\
$(OUT_OFILES) \
$(BASE_OFILES) \
$(TEST_HASH_MAIN_FILE) \
$(V_OFILES)
TEST_HASH_DFILES=$(TEST_HASH_OFILES:%.o=.d/%.d)
-include $(TEST_HASH_DFILES)
# This is a silly hack necessitated by the fact that libuv uses configure
#
# * Making 'all' obviously requires outside/libuv,
@ -479,6 +492,20 @@ endif
test:
@echo "FIXME no tests defined"
test_hash: $(BIN)/test_hash
ifdef NO_SILENT_RULES
$(BIN)/test_hash: $(LIBCOMMONMARK) $(TEST_HASH_OFILES) $(LIBUV) $(LIBED25519) $(LIBANACHRONISM) $(LIBSCRYPT) $(LIBSOFTFLOAT)
mkdir -p $(BIN)
$(CLD) $(CLDOSFLAGS) -o $(BIN)/test_hash $(TEST_HASH_OFILES) $(LIBUV) $(LIBED25519) $(LIBANACHRONISM) $(LIBS) $(LIBCOMMONMARK) $(LIBSCRYPT) $(LIBSOFTFLOAT)
else
$(BIN)/test_hash: $(LIBCOMMONMARK) $(TEST_HASH_OFILES) $(LIBUV) $(LIBED25519) $(LIBANACHRONISM) $(LIBSCRYPT) $(LIBSOFTFLOAT)
@echo "VERE_DFILES=$(VERE_DFILES)"
@echo " CCLD $(BIN)/test_hash"
@mkdir -p $(BIN)
@$(CLD) $(CLDOSFLAGS) -o $(BIN)/test_hash $(TEST_HASH_OFILES) $(LIBUV) $(LIBED25519) $(LIBANACHRONISM) $(LIBS) $(LIBCOMMONMARK) $(LIBSCRYPT) $(LIBSOFTFLOAT)
endif
tags: ctags etags gtags cscope
ctags:

4
gdb-test-hash Normal file
View File

@ -0,0 +1,4 @@
file bin/test_hash
handle SIGSEGV nostop noprint
start

View File

@ -7,9 +7,12 @@
/** Straightforward implementation of the classic Bagwell
*** HAMT (hash array mapped trie), using a mug hash.
***
*** Because a mug is 31 bits, the root table is 64 wide.
*** Thereupon 5 bits each are warm for each layer. The
*** final leaf is simply a linear search.
*** Because a mug is 31 bits, the root table has 64 slots.
*** The 31 bits of a mug are divided into the first lookup,
*** which is 6 bits (corresponding to the 64 entries in the
*** root table), followed by 5 more branchings of 5 bits each,
*** corresponding to the 32-slot nodes for everything under
*** the root node.
***
*** We store an extra "freshly warm" bit for a simple
*** clock-algorithm reclamation policy, not yet implemented.
@ -18,8 +21,8 @@
/* u3h_slot: map slot.
**
** Either a key-value cell or a loom offset, decoded as a pointer
** to a u3h_node. Matches the u3_noun format - coordinate with
** meme.h. The top two bits are:
** to a u3h_node, or a u3h_buck at the bottom. Matches the u3_noun
** format - coordinate with allocate.h. The top two bits are:
**
** 00 - empty (in the root table only)
** 01 - table
@ -35,18 +38,24 @@
u3h_slot sot_w[0];
} u3h_node;
/* u3h_root: hash root table, with future-proof clock.
/* u3h_root: hash root table
*/
typedef struct {
c3_w clk_w;
u3h_slot sot_w[64];
c3_w max_w; // number of cache lines (0 for no trimming)
c3_w use_w; // number of lines currently filled
struct {
c3_w mug_w; // current hash
c3_w inx_w; // index into current hash bucket iff buc_o
c3_o buc_o; // yes if in middle of hash bucket
} arm_u; // clock arm
u3h_slot sot_w[64]; // slots
} u3h_root;
/* u3h_buck: bottom bucket.
*/
typedef struct {
c3_w len_w;
u3_noun kev[0];
u3h_slot sot_w[0];
} u3h_buck;
/** HAMT macros.
@ -61,6 +70,8 @@
** u3h_node_to_slot(): node pointer to slot
** u3h_slot_to_noun(): slot to cell
** u3h_noun_to_slot(): cell to slot
** u3h_noun_be_warm(): warm mutant
** u3h_noun_be_cold(): cold mutant
*/
# define u3h_slot_is_null(sot) ((0 == ((sot) >> 30)) ? c3y : c3n)
# define u3h_slot_is_node(sot) ((1 == ((sot) >> 30)) ? c3y : c3n)
@ -68,16 +79,20 @@
# define u3h_slot_is_warm(sot) (((sot) & 0x40000000) ? c3y : c3n)
# define u3h_slot_to_node(sot) (u3a_into((sot) & 0x3fffffff))
# define u3h_node_to_slot(ptr) (u3a_outa(ptr) | 0x40000000)
# define u3h_slot_to_noun(sot) (0x40000000 | (sot))
# define u3h_noun_to_slot(som) (som)
# define u3h_noun_be_warm(sot) ((sot) | 0x40000000)
# define u3h_noun_be_cold(sot) ((sot) & ~0x40000000)
# define u3h_slot_to_noun(sot) (0x40000000 | (sot))
# define u3h_noun_to_slot(som) (u3h_noun_be_warm(som))
/** Functions.
***
*** Needs: delete and merge functions; clock reclamation function.
**/
/* u3h_new_cache(): create hashtable with bounded size.
*/
u3p(u3h_root)
u3h_new_cache(c3_w clk_w);
/* u3h_new(): create hashtable.
*/
u3p(u3h_root)
@ -111,6 +126,11 @@
u3_weak
u3h_gut(u3p(u3h_root) har_p, u3_noun key);
/* u3h_trim_to(): trim to n key-value pairs
*/
void
u3h_trim_to(u3p(u3h_root) har_p, c3_w n_w);
/* u3h_free(): free hashtable.
*/
void

View File

@ -26,6 +26,16 @@
c3_i
u3m_bail(c3_m how_m) __attribute__((noreturn));
/* u3m_init(): start the environment, with/without checkpointing.
*/
void
u3m_init(c3_o chk_o);
/* u3m_pave(): instantiate or activate image.
*/
void
u3m_pave(c3_o nuu_o, c3_o bug_o);
/* u3m_file(): load file, as atom, or bail.
*/
u3_noun

View File

@ -173,24 +173,42 @@ _ca_box_make_hat(c3_w len_w, c3_w ald_w, c3_w alp_w, c3_w use_w)
u3_post all_p;
if ( c3y == u3a_is_north(u3R) ) {
// if allocation would fail, halve the cache until we have enough space.
while ( u3R->hat_p + len_w + _me_align_pad(u3R->hat_p, ald_w, alp_w)
>= u3R->cap_p ) {
// if we can't trim any more and we're still out of loom, give up.
if ( 0 == u3R->cax.har_p ) {
u3m_bail(c3__meme); return 0;
}
u3h_trim_to(u3R->cax.har_p, u3to(u3h_root, u3R->cax.har_p)->use_w / 2);
}
all_p = u3R->hat_p;
pad_w = _me_align_pad(all_p, ald_w, alp_w);
u3R->hat_p += (len_w + pad_w);
if ( u3R->hat_p >= u3R->cap_p ) {
u3m_bail(c3__meme); return 0;
}
}
else {
// if allocation would fail, halve the cache until we have enough space.
while ( u3R->hat_p - len_w - _me_align_dap(u3R->hat_p - len_w, ald_w, alp_w)
<= u3R->cap_p ) {
// if we can't trim any omre and we're still out of loom, give up.
if ( 0 == u3R->cax.har_p ) {
u3m_bail(c3__meme); return 0;
}
u3h_trim_to(u3R->cax.har_p, u3to(u3h_root, u3R->cax.har_p)->use_w / 2);
}
all_p = (u3R->hat_p - len_w);
pad_w = _me_align_dap(all_p, ald_w, alp_w);
all_p -= pad_w;
u3R->hat_p = all_p;
if ( u3R->hat_p <= u3R->cap_p ) {
u3m_bail(c3__meme); return 0;
}
}
return _box_make(u3a_into(all_p), (len_w + pad_w), use_w);
}

View File

@ -3,23 +3,42 @@
*/
#include "all.h"
static void* _ch_some_add(void* han_v, c3_w, c3_w, u3_noun);
static void* _ch_some_new(c3_w lef_w);
static void
_ch_slot_put(u3h_slot* sot_w, u3_noun kev, c3_w lef_w, c3_w rem_w, c3_w* use_w);
static c3_o
_ch_trim_slot(u3h_root* har_u, u3h_slot *sot_w, c3_w lef_w, c3_w rem_w);
c3_w
_ch_skip_slot(c3_w mug_w, c3_w lef_w);
/* u3h_new_cache(): create hashtable with bounded size.
*/
u3p(u3h_root)
u3h_new_cache(c3_w max_w)
{
u3h_root* har_u = u3a_walloc(c3_wiseof(u3h_root));
u3p(u3h_root) har_p = u3of(u3h_root, har_u);
c3_w i_w;
har_u->max_w = max_w;
har_u->use_w = 0;
har_u->arm_u.mug_w = 0;
har_u->arm_u.inx_w = 0;
har_u->arm_u.buc_o = c3n;
for ( i_w = 0; i_w < 64; i_w++ ) {
har_u->sot_w[i_w] = 0;
}
return har_p;
}
/* u3h_new(): create hashtable.
*/
u3p(u3h_root)
u3h_new(void)
{
u3h_root* har_u = u3a_walloc(c3_wiseof(u3h_root));
u3p(u3h_root) har_p = u3of(u3h_root, har_u);
c3_w i_w;
har_u->clk_w = 0;
for ( i_w = 0; i_w < 64; i_w++ ) {
har_u->sot_w[i_w] = 0;
}
return har_p;
return u3h_new_cache(0);
}
/* _ch_popcount(): number of bits set in word. A standard intrinsic.
@ -41,41 +60,6 @@ _ch_buck_new(void)
return hab_u;
}
/* ha_buck_add(): add to bucket.
*/
static u3h_buck*
_ch_buck_add(u3h_buck* hab_u, u3_noun kev)
{
c3_w i_w;
for ( i_w = 0; i_w < hab_u->len_w; i_w++ ) {
if ( c3y == u3r_sing(u3h(kev), u3h(hab_u->kev[i_w])) ) {
u3a_lose(hab_u->kev[i_w]);
hab_u->kev[i_w] = kev;
return hab_u;
}
}
{
c3_w len_w = hab_u->len_w;
u3h_buck* bah_u = u3a_walloc(c3_wiseof(u3h_buck) +
(len_w + 1) * c3_wiseof(u3_noun));
bah_u->len_w = len_w + 1;
bah_u->kev[0] = kev;
// Optimize: use u3a_wealloc().
//
for ( i_w = 0; i_w < hab_u->len_w; i_w++ ) {
bah_u->kev[i_w + 1] = hab_u->kev[i_w];
}
u3a_wfree(hab_u);
return bah_u;
}
}
/* _ch_node_new(): create new, empty node.
*/
static u3h_node*
@ -87,73 +71,6 @@ _ch_node_new(void)
return han_u;
}
/* _ch_node_add(): add to node.
*/
static u3h_node*
_ch_node_add(u3h_node* han_u, c3_w lef_w, c3_w rem_w, u3_noun kev)
{
c3_w bit_w, inx_w, map_w, i_w;
lef_w -= 5;
bit_w = (rem_w >> lef_w);
rem_w = (rem_w & ((1 << lef_w) - 1));
map_w = han_u->map_w;
inx_w = _ch_popcount(map_w & ((1 << bit_w) - 1));
if ( map_w & (1 << bit_w) ) {
c3_w sot_w = han_u->sot_w[inx_w];
if ( _(u3h_slot_is_node(sot_w)) ) {
void* hav_v = u3h_slot_to_node(sot_w);
hav_v = _ch_some_add(hav_v, lef_w, rem_w, kev);
han_u->sot_w[inx_w] = u3h_node_to_slot(hav_v);
return han_u;
}
else {
u3_noun kov = u3h_slot_to_noun(sot_w);
if ( c3y == u3r_sing(u3h(kev), u3h(kov)) ) {
u3a_lose(kov);
han_u->sot_w[inx_w] = u3h_noun_to_slot(kev);
return han_u;
}
else {
c3_w rom_w = u3r_mug(u3h(kov)) & ((1 << lef_w) - 1);
void* hav_v = _ch_some_new(lef_w);
// Optimize: need a custom collision create.
//
hav_v = _ch_some_add(hav_v, lef_w, rem_w, kev);
hav_v = _ch_some_add(hav_v, lef_w, rom_w, kov);
han_u->sot_w[inx_w] = u3h_node_to_slot(hav_v);
return han_u;
}
}
}
else {
// Optimize: use u3a_wealloc.
//
c3_w len_w = _ch_popcount(map_w);
u3h_node* nah_u = u3a_walloc(c3_wiseof(u3h_node) +
((len_w + 1) * c3_wiseof(u3h_slot)));
nah_u->map_w = han_u->map_w | (1 << bit_w);
for ( i_w = 0; i_w < inx_w; i_w++ ) {
nah_u->sot_w[i_w] = han_u->sot_w[i_w];
}
nah_u->sot_w[inx_w] = u3h_noun_to_slot(kev);
for ( i_w = inx_w; i_w < len_w; i_w++ ) {
nah_u->sot_w[i_w + 1] = han_u->sot_w[i_w];
}
u3a_wfree(han_u);
return nah_u;
}
}
/* _ch_some_new(): create node or bucket.
*/
static void*
@ -167,15 +84,308 @@ _ch_some_new(c3_w lef_w)
}
}
/* _ch_node_add(): add to node.
*/
static u3h_node*
_ch_node_add(u3h_node* han_u, c3_w lef_w, c3_w rem_w, u3_noun kev, c3_w *use_w)
{
c3_w bit_w, inx_w, map_w, i_w;
lef_w -= 5;
bit_w = (rem_w >> lef_w);
rem_w = (rem_w & ((1 << lef_w) - 1));
map_w = han_u->map_w;
inx_w = _ch_popcount(map_w & ((1 << bit_w) - 1));
if ( map_w & (1 << bit_w) ) {
_ch_slot_put(&(han_u->sot_w[inx_w]), kev, lef_w, rem_w, use_w);
return han_u;
}
else {
// nothing was at this slot.
// Optimize: use u3a_wealloc.
//
c3_w len_w = _ch_popcount(map_w);
u3h_node* nah_u = u3a_walloc(c3_wiseof(u3h_node) +
((len_w + 1) * c3_wiseof(u3h_slot)));
nah_u->map_w = han_u->map_w | (1 << bit_w);
for ( i_w = 0; i_w < inx_w; i_w++ ) {
nah_u->sot_w[i_w] = han_u->sot_w[i_w];
}
nah_u->sot_w[inx_w] = u3h_noun_be_warm(u3h_noun_to_slot(kev));
for ( i_w = inx_w; i_w < len_w; i_w++ ) {
nah_u->sot_w[i_w + 1] = han_u->sot_w[i_w];
}
u3a_wfree(han_u);
*use_w += 1;
return nah_u;
}
}
/* ch_buck_add(): add to bucket.
*/
static u3h_buck*
_ch_buck_add(u3h_buck* hab_u, u3_noun kev, c3_w *use_w)
{
c3_w i_w;
// if our key is equal to any of the existing keys in the bucket,
// then replace that key-value pair with kev.
//
for ( i_w = 0; i_w < hab_u->len_w; i_w++ ) {
u3_noun kov = u3h_slot_to_noun(hab_u->sot_w[i_w]);
if ( c3y == u3r_sing(u3h(kev), u3h(kov)) ) {
u3a_lose(kov);
hab_u->sot_w[i_w] = u3h_noun_to_slot(kev);
return hab_u;
}
}
// create mutant bucket with added key-value pair.
{
c3_w len_w = hab_u->len_w;
u3h_buck* bah_u = u3a_walloc(c3_wiseof(u3h_buck) +
(len_w + 1) * c3_wiseof(u3h_slot));
bah_u->len_w = len_w + 1;
bah_u->sot_w[0] = u3h_noun_to_slot(kev);
// Optimize: use u3a_wealloc().
//
for ( i_w = 0; i_w < hab_u->len_w; i_w++ ) {
bah_u->sot_w[i_w + 1] = hab_u->sot_w[i_w];
}
u3a_wfree(hab_u);
*use_w += 1;
return bah_u;
}
}
/* _ch_some_add(): add to node or bucket.
*/
static void*
_ch_some_add(void* han_v, c3_w lef_w, c3_w rem_w, u3_noun kev)
_ch_some_add(void* han_v, c3_w lef_w, c3_w rem_w, u3_noun kev, c3_w *use_w)
{
if ( 0 == lef_w ) {
return _ch_buck_add(han_v, kev);
return _ch_buck_add((u3h_buck*)han_v, kev, use_w);
}
else return _ch_node_add((u3h_node*)han_v, lef_w, rem_w, kev, use_w);
}
/* _ch_slot_put(): store a key-value pair in a u3h_slot (root or node) */
static void
_ch_slot_put(u3h_slot* sot_w, u3_noun kev, c3_w lef_w, c3_w rem_w, c3_w* use_w)
{
if ( c3y == u3h_slot_is_null(*sot_w) ) {
*sot_w = u3h_noun_be_warm(u3h_noun_to_slot(kev));
*use_w += 1;
}
else if ( c3y == u3h_slot_is_noun(*sot_w) ) {
u3_noun kov = u3h_slot_to_noun(*sot_w);
if ( c3y == u3r_sing(u3h(kev), u3h(kov)) ) {
*sot_w = u3h_noun_be_warm(u3h_noun_to_slot(kev));
u3z(kov);
}
else {
c3_w rom_w = u3r_mug(u3h(kov)) & ((1 << lef_w) - 1);
void* hav_v = _ch_some_new(lef_w);
*use_w -= 1; // take one out, add two
hav_v = _ch_some_add(hav_v, lef_w, rom_w, kov, use_w);
hav_v = _ch_some_add(hav_v, lef_w, rem_w, kev, use_w);
*sot_w = u3h_node_to_slot(hav_v);
}
}
else {
c3_assert( c3y == u3h_slot_is_node(*sot_w) );
void* hav_v = _ch_some_add(u3h_slot_to_node(*sot_w), lef_w, rem_w, kev, use_w);
*sot_w = u3h_node_to_slot(hav_v);
}
}
/* _ch_trim_node(): trim one entry from a node slot or its children */
static c3_o
_ch_trim_node(u3h_root* har_u, u3h_slot* sot_w, c3_w lef_w, c3_w rem_w)
{
c3_w bit_w, map_w, inx_w;
u3h_slot* tos_w;
u3h_node* han_u = (u3h_node*) u3h_slot_to_node(*sot_w);
lef_w -= 5;
bit_w = (rem_w >> lef_w);
map_w = han_u->map_w;
if ( 0 == (map_w & (1 << bit_w)) ) {
har_u->arm_u.mug_w = _ch_skip_slot(har_u->arm_u.mug_w, lef_w);
return c3n;
}
rem_w = (rem_w & ((1 << lef_w) - 1));
inx_w = _ch_popcount(map_w & ((1 << bit_w) - 1));
tos_w = &(han_u->sot_w[inx_w]);
if ( c3n == _ch_trim_slot(har_u, tos_w, lef_w, rem_w) ) {
// nothing trimmed
return c3n;
}
else if ( 0 != *tos_w ) {
// something trimmed, but slot still has value
return c3y;
}
else {
// shrink!
c3_w i_w, len_w = _ch_popcount(map_w);
if ( 2 == len_w ) {
// only one left, pick the other
*sot_w = han_u->sot_w[ 0 == inx_w ? 1 : 0 ];
}
else {
// make smaller node
c3_w nel_w = len_w - 1;
u3h_node* nah_u = u3a_walloc(c3_wiseof(u3h_node) +
(nel_w * c3_wiseof(u3h_slot)));
nah_u->map_w = han_u->map_w & ~(1 << bit_w);
for ( i_w = 0; i_w < inx_w; ++i_w ) {
nah_u->sot_w[i_w] = han_u->sot_w[i_w];
}
for ( i_w = inx_w; i_w < nel_w; ++i_w ) {
nah_u->sot_w[i_w] = han_u->sot_w[i_w + 1];
}
*sot_w = u3h_node_to_slot(nah_u);
}
u3a_wfree(han_u);
return c3y;
}
}
/* _ch_trim_node(): trim one entry from a bucket slot */
static c3_o
_ch_trim_buck(u3h_root* har_u, u3h_slot* sot_w)
{
c3_w len_w;
u3h_buck* hab_u = u3h_slot_to_node(*sot_w);
for ( har_u->arm_u.buc_o = c3y, len_w = hab_u->len_w;
har_u->arm_u.inx_w < len_w;
har_u->arm_u.inx_w += 1 )
{
u3h_slot* tos_w = &(hab_u->sot_w[har_u->arm_u.inx_w]);
if ( c3y == _ch_trim_slot(har_u, tos_w, 0, 0) ) {
if ( 2 == len_w ) {
// 2 things in bucket: pick the other and promote
*sot_w = hab_u->sot_w[ (0 == har_u->arm_u.inx_w) ? 1 : 0 ];
}
else {
// make a smaller bucket
c3_w i_w, nel_w = len_w - 1;
u3h_buck* bah_u = u3a_walloc(c3_wiseof(u3h_buck) +
nel_w * c3_wiseof(u3h_slot));
bah_u->len_w = nel_w;
for ( i_w = 0; i_w < har_u->arm_u.inx_w; ++i_w ) {
bah_u->sot_w[i_w] = hab_u->sot_w[i_w];
}
for ( i_w = har_u->arm_u.inx_w; i_w < nel_w; ++i_w ) {
bah_u->sot_w[i_w] = hab_u->sot_w[i_w + 1];
}
*sot_w = u3h_node_to_slot(bah_u);
}
u3a_wfree(hab_u);
return c3y;
}
}
har_u->arm_u.buc_o = c3n;
har_u->arm_u.mug_w = (har_u->arm_u.mug_w + 1) & 0x7FFFFFFF; // modulo 2^31
return c3n;
}
/* _ch_trim_some(): trim one entry from a bucket or node slot */
static c3_o
_ch_trim_some(u3h_root* har_u, u3h_slot* sot_w, c3_w lef_w, c3_w rem_w)
{
if ( 0 == lef_w ) {
return _ch_trim_buck(har_u, sot_w);
}
else {
return _ch_trim_node(har_u, sot_w, lef_w, rem_w);
}
}
/* _ch_skip_slot(): increment arm over hash prefix.
*/
c3_w
_ch_skip_slot(c3_w mug_w, c3_w lef_w)
{
c3_w hig_w = mug_w >> lef_w;
c3_w new_w = (hig_w + 1) & ((1 << (31 - lef_w)) - 1); // modulo 2^(31 - lef_w)
return new_w << lef_w;
}
/* _ch_trim_slot(): trim one entry from a slot */
static c3_o
_ch_trim_slot(u3h_root* har_u, u3h_slot *sot_w, c3_w lef_w, c3_w rem_w)
{
if ( _(u3h_slot_is_null(*sot_w)) ) {
har_u->arm_u.mug_w = _ch_skip_slot(har_u->arm_u.mug_w, lef_w);
return c3n;
}
else if ( _(u3h_slot_is_node(*sot_w)) ) {
return _ch_trim_some(har_u, sot_w, lef_w, rem_w);
}
else if ( _(u3h_slot_is_warm(*sot_w)) ) {
*sot_w = u3h_noun_be_cold(*sot_w);
if ( c3n == har_u->arm_u.buc_o ) {
har_u->arm_u.mug_w = (har_u->arm_u.mug_w + 1) & 0x7FFFFFFF; // modulo 2^31
}
return c3n;
}
else {
u3_noun kev = u3h_slot_to_noun(*sot_w);
*sot_w = 0;
u3z(kev);
har_u->arm_u.mug_w = _ch_skip_slot(har_u->arm_u.mug_w, lef_w);
return c3y;
}
}
/* _ch_trim_slot(): trim one entry from a hashtable */
static c3_o
_ch_trim_root(u3h_root* har_u)
{
c3_w mug_w = har_u->arm_u.mug_w;
c3_w inx_w = mug_w >> 25; // 6 bits
c3_w rem_w = mug_w & ((1 << 25) - 1);
u3h_slot* sot_w = &(har_u->sot_w[inx_w]);
return _ch_trim_slot(har_u, sot_w, 25, rem_w);
}
/* u3h_trim_to(): trim to n key-value pairs
*/
void
u3h_trim_to(u3p(u3h_root) har_p, c3_w n_w)
{
u3h_root* har_u = u3to(u3h_root, har_p);
while ( har_u->use_w > n_w ) {
if ( c3y == _ch_trim_root(har_u) ) {
har_u->use_w -= 1;
}
/* TODO: remove
if ( c3n == har_u->arm_u.buc_o ) {
// lower 31 bits of increment (next mug)
har_u->arm_u.mug_w = (har_u->arm_u.mug_w + 1) & 0x7FFFFFFF;
har_u->arm_u.inx_w = 0;
}
*/
}
else return _ch_node_add(han_v, lef_w, rem_w, kev);
}
/* u3h_put(): insert in hashtable.
@ -188,28 +398,12 @@ u3h_put(u3p(u3h_root) har_p, u3_noun key, u3_noun val)
u3h_root* har_u = u3to(u3h_root, har_p);
u3_noun kev = u3nc(u3k(key), val);
c3_w mug_w = u3r_mug(key);
c3_w inx_w = (mug_w >> 25);
c3_w inx_w = (mug_w >> 25); // 6 bits
c3_w rem_w = (mug_w & ((1 << 25) - 1));
c3_w sot_w = har_u->sot_w[inx_w];
if ( _(u3h_slot_is_null(sot_w)) ) {
har_u->sot_w[inx_w] = u3h_noun_to_slot(kev);
}
else {
u3h_node* han_u;
if ( _(u3h_slot_is_noun(sot_w)) ) {
u3_noun kov = u3h_slot_to_noun(sot_w);
c3_w rom_w = u3r_mug(u3h(kov)) & ((1 << 25) - 1);
han_u = _ch_node_new();
han_u = _ch_node_add(han_u, 25, rem_w, kev);
han_u = _ch_node_add(han_u, 25, rom_w, kov);
}
else {
han_u = _ch_node_add(u3h_slot_to_node(sot_w), 25, rem_w, kev);
}
har_u->sot_w[inx_w] = u3h_node_to_slot(han_u);
_ch_slot_put(&(har_u->sot_w[inx_w]), kev, 25, rem_w, &(har_u->use_w));
if ( har_u->max_w > 0 ) {
u3h_trim_to(har_p, har_u->max_w);
}
}
@ -221,7 +415,7 @@ _ch_buck_hum(u3h_buck* hab_u, c3_w mug_w)
c3_w i_w;
for ( i_w = 0; i_w < hab_u->len_w; i_w++ ) {
if ( mug_w == u3r_mug(u3h(hab_u->kev[i_w])) ) {
if ( mug_w == u3r_mug(u3h(u3h_slot_to_noun(hab_u->sot_w[i_w]))) ) {
return c3y;
}
}
@ -268,7 +462,7 @@ _ch_node_hum(u3h_node* han_u, c3_w lef_w, c3_w rem_w, c3_w mug_w)
}
}
/* u3h_hum(): read from hashtable.
/* u3h_hum(): check presence in hashtable.
**
** `key` is RETAINED.
*/
@ -308,8 +502,9 @@ _ch_buck_git(u3h_buck* hab_u, u3_noun key)
c3_w i_w;
for ( i_w = 0; i_w < hab_u->len_w; i_w++ ) {
if ( _(u3r_sing(key, u3h(hab_u->kev[i_w]))) ) {
return u3t(hab_u->kev[i_w]);
u3_noun kev = u3h_slot_to_noun(hab_u->sot_w[i_w]);
if ( _(u3r_sing(key, u3h(kev))) ) {
return u3t(kev);
}
}
return u3_none;
@ -389,7 +584,7 @@ u3h_git(u3p(u3h_root) har_p, u3_noun key)
}
}
/* u3h_get(): read from hashtable.
/* u3h_get(): read from hashtable, incrementing refcount.
**
** `key` is RETAINED; result is PRODUCED.
*/
@ -412,8 +607,9 @@ _ch_buck_gut(u3h_buck* hab_u, u3_noun key)
c3_w i_w;
for ( i_w = 0; i_w < hab_u->len_w; i_w++ ) {
if ( _(u3r_sung(key, u3h(hab_u->kev[i_w]))) ) {
return u3a_gain(u3t(hab_u->kev[i_w]));
u3_noun kev = u3h_slot_to_noun(hab_u->sot_w[i_w]);
if ( _(u3r_sung(key, u3h(kev))) ) {
return u3a_gain(u3t(kev));
}
}
return u3_none;
@ -501,7 +697,7 @@ _ch_free_buck(u3h_buck* hab_u)
c3_w i_w;
for ( i_w = 0; i_w < hab_u->len_w; i_w++ ) {
u3a_lose(hab_u->kev[i_w]);
u3a_lose(u3h_slot_to_noun(hab_u->sot_w[i_w]));
}
u3a_wfree(hab_u);
}
@ -570,7 +766,7 @@ _ch_walk_buck(u3h_buck* hab_u, void (*fun_f)(u3_noun))
c3_w i_w;
for ( i_w = 0; i_w < hab_u->len_w; i_w++ ) {
fun_f(hab_u->kev[i_w]);
fun_f(u3h_slot_to_noun(hab_u->sot_w[i_w]));
}
}
@ -638,7 +834,7 @@ _ch_mark_buck(u3h_buck* hab_u)
c3_w i_w;
for ( i_w = 0; i_w < hab_u->len_w; i_w++ ) {
tot_w += u3a_mark_noun(hab_u->kev[i_w]);
tot_w += u3a_mark_noun(u3h_slot_to_noun(hab_u->sot_w[i_w]));
}
tot_w += u3a_mark_ptr(hab_u);

View File

@ -479,10 +479,10 @@ u3m_mark(void)
return tot_w;
}
/* _cm_pave(): instantiate or activate image.
/* u3m_pave(): instantiate or activate image.
*/
static void
_cm_pave(c3_o nuu_o, c3_o bug_o)
void
u3m_pave(c3_o nuu_o, c3_o bug_o)
{
if ( c3y == nuu_o ) {
u3H = (void *)_pave_north(u3_Loom + 1,
@ -1453,10 +1453,10 @@ _cm_signals(void)
}
}
/* _cm_init(): start the environment, with/without checkpointing.
/* u3m_init(): start the environment, with/without checkpointing.
*/
void
_cm_init(c3_o chk_o)
u3m_init(c3_o chk_o)
{
_cm_limits();
_cm_signals();
@ -1660,7 +1660,7 @@ u3m_boot(c3_o nuu_o, c3_o bug_o, c3_c* dir_c,
{
/* Activate the loom.
*/
_cm_init(nuu_o);
u3m_init(nuu_o);
/* Activate the storage system.
*/
@ -1672,7 +1672,7 @@ u3m_boot(c3_o nuu_o, c3_o bug_o, c3_c* dir_c,
/* Construct or activate the allocator.
*/
_cm_pave(nuu_o, bug_o);
u3m_pave(nuu_o, bug_o);
/* Initialize the jet system.
*/

140
tests/hashtable_tests.c Normal file
View File

@ -0,0 +1,140 @@
#include "all.h"
static void _setup(void);
static void _test_cache_replace_value(void);
static void _test_cache_trimming(void);
static void _test_no_cache(void);
static void _test_skip_slot(void);
/* main(): run all test cases.
*/
int
main(int argc, char* argv[])
{
_setup();
_test_no_cache();
_test_skip_slot();
_test_cache_trimming();
_test_cache_replace_value();
return 0;
}
/* _setup(): prepare for tests.
*/
static void
_setup(void)
{
u3m_init(c3y);
u3m_pave(c3y, c3n);
}
/* _test_no_cache(): test a hashtable without caching.
*/
static void
_test_no_cache(void)
{
c3_w i_w;
c3_w max_w = 1000;
u3p(u3h_root) har_p = u3h_new();
for ( i_w = 0; i_w < max_w; i_w++ ) {
u3h_put(har_p, i_w, i_w + max_w);
}
for ( i_w = 0; i_w < max_w; i_w++ ) {
c3_assert(i_w + max_w == u3h_get(har_p, i_w));
}
printf("test_no_cache: ok\n");
}
/* _test_skip_slot():
*/
static void
_test_skip_slot(void)
{
// root table
{
c3_w mug_w = 0x17 << 25;
c3_w res_w = _ch_skip_slot(mug_w, 25);
c3_assert((0x18 << 25) == res_w);
}
{
c3_w mug_w = 63 << 25; // 6 bits, all ones
c3_w res_w = _ch_skip_slot(mug_w, 25);
c3_assert(0 == res_w);
}
// child nodes
{
c3_w mug_w = 17 << 20;
c3_w res_w = _ch_skip_slot(mug_w, 20);
c3_assert((18 << 20) == res_w);
}
{
c3_w mug_w = 31 << 20; // 5 bits, all ones
c3_w res_w = _ch_skip_slot(mug_w, 20);
c3_assert((1 << 25) == res_w);
}
fprintf(stderr, "test_skip_slot: ok\n");
}
/* _test_cache_trimming(): ensure a caching hashtable removes stale items.
*/
static void
_test_cache_trimming(void)
{
c3_w max_w = 620;
c3_w i_w;
//u3p(u3h_root) har_p = u3h_new_cache(max_w / 2);
u3p(u3h_root) har_p = u3h_new_cache(max_w / 10 );
u3h_root* har_u = u3to(u3h_root, har_p);
for ( i_w = 0; i_w < max_w; i_w++ ) {
u3h_put(har_p, i_w, i_w + max_w);
}
if ( ( max_w + max_w - 1) != u3h_get(har_p, max_w - 1) ) {
fprintf(stderr, "fail\r\n");
exit(1);
}
if ( ( max_w / 10 ) != har_u->use_w ) {
fprintf(stderr, "fail\r\n");
exit(1);
}
fprintf(stderr, "test_cache_trimming: ok\n");
}
static void
_test_cache_replace_value(void)
{
c3_w max_w = 100;
c3_w i_w;
u3p(u3h_root) har_p = u3h_new_cache(max_w);
u3h_root* har_u = u3to(u3h_root, har_p);
for ( i_w = 0; i_w < max_w; i_w++ ) {
u3h_put(har_p, i_w, i_w + max_w);
}
for ( i_w = 0; i_w < max_w; i_w++ ) {
u3h_put(har_p, i_w, i_w + max_w + 1);
}
if ( (2 * max_w) != u3h_get(har_p, max_w - 1) ) {
fprintf(stderr, "fail\r\n");
exit(1);
}
if ( max_w != har_u->use_w ) {
fprintf(stderr, "fail\r\n");
exit(1);
}
fprintf(stderr, "test_cache_replace_value: ok\r\n");
}