unit test for hash table.

git-svn-id: file:///svn/unbound/trunk@184 be551aaa-1e26-0410-a405-d3ace91eadb9
This commit is contained in:
Wouter Wijngaards 2007-03-21 14:34:57 +00:00
parent 4cbf2705f6
commit 98235df888
4 changed files with 263 additions and 2 deletions

View file

@ -1,3 +1,8 @@
21 March 2007: Wouter
- unit test of hash table, fixup locking problem in table_grow().
- fixup accounting of sizes for removing items from hashtable.
- unit test for hash table, single threaded test of integrity.
16 March 2007: Wouter 16 March 2007: Wouter
- lock-verifier, checks consistent order of locking. - lock-verifier, checks consistent order of locking.

View file

@ -218,14 +218,220 @@ static void test_lru(struct lruhash* table)
delkey(k2); delkey(k2);
} }
/** test hashtable using short sequence */
static void
test_short_table(struct lruhash* table)
{
struct testkey* k = newkey(12);
struct testkey* k2 = newkey(14);
struct testdata* d = newdata(128);
struct testdata* d2 = newdata(129);
k->entry.data = d;
k2->entry.data = d2;
lruhash_insert(table, myhash(12), &k->entry, d);
lruhash_insert(table, myhash(14), &k2->entry, d2);
unit_assert( lruhash_lookup(table, myhash(12), k, 0) == &k->entry);
lock_rw_unlock( &k->entry.lock );
unit_assert( lruhash_lookup(table, myhash(14), k2, 0) == &k2->entry);
lock_rw_unlock( &k2->entry.lock );
lruhash_remove(table, myhash(12), k);
lruhash_remove(table, myhash(14), k2);
}
/** number of hash test max */
#define HASHTESTMAX 100
/** test adding a random element */
static void
testadd(struct lruhash* table, struct testdata* ref[])
{
int numtoadd = random() % HASHTESTMAX;
struct testdata* data = newdata(numtoadd);
struct testkey* key = newkey(numtoadd);
key->entry.data = data;
lruhash_insert(table, myhash(numtoadd), &key->entry, data);
ref[numtoadd] = data;
}
/** test adding a random element */
static void
testremove(struct lruhash* table, struct testdata* ref[])
{
int num = random() % HASHTESTMAX;
struct testkey* key = newkey(num);
lruhash_remove(table, myhash(num), key);
ref[num] = NULL;
delkey(key);
}
/** test adding a random element */
static void
testlookup(struct lruhash* table, struct testdata* ref[])
{
int num = random() % HASHTESTMAX;
struct testkey* key = newkey(num);
struct lruhash_entry* en = lruhash_lookup(table, myhash(num), key, 0);
struct testdata* data = en? (struct testdata*)en->data : NULL;
if(en) {
unit_assert(en->key);
unit_assert(en->data);
}
if(0) log_info("lookup %d got %d, expect %d", num, en? data->data :-1,
ref[num]? ref[num]->data : -1);
unit_assert( data == ref[num] );
if(en) lock_rw_unlock(&en->lock);
delkey(key);
}
/** check integrity of hash table */
static void
check_table(struct lruhash* table)
{
struct lruhash_entry* p;
size_t c = 0;
lock_quick_lock(&table->lock);
unit_assert( table->num <= table->size);
unit_assert( table->size_mask == (int)table->size-1 );
unit_assert( (table->lru_start && table->lru_end) ||
(!table->lru_start && !table->lru_end) );
unit_assert( table->space_used <= table->space_max );
/* check lru list integrity */
if(table->lru_start)
unit_assert(table->lru_start->lru_prev == NULL);
if(table->lru_end)
unit_assert(table->lru_end->lru_next == NULL);
p = table->lru_start;
while(p) {
if(p->lru_prev) {
unit_assert(p->lru_prev->lru_next == p);
}
if(p->lru_next) {
unit_assert(p->lru_next->lru_prev == p);
}
c++;
p = p->lru_next;
}
unit_assert(c == table->num);
/* this assertion is specific to the unit test */
unit_assert( table->space_used ==
table->num * test_sizefunc(NULL, NULL) );
lock_quick_unlock(&table->lock);
}
/** test adding a random element (unlimited range) */
static void
testadd_unlim(struct lruhash* table, struct testdata* ref[])
{
int numtoadd = random() % (HASHTESTMAX * 10);
struct testdata* data = newdata(numtoadd);
struct testkey* key = newkey(numtoadd);
key->entry.data = data;
lruhash_insert(table, myhash(numtoadd), &key->entry, data);
ref[numtoadd] = data;
}
/** test adding a random element (unlimited range) */
static void
testremove_unlim(struct lruhash* table, struct testdata* ref[])
{
int num = random() % (HASHTESTMAX*10);
struct testkey* key = newkey(num);
lruhash_remove(table, myhash(num), key);
ref[num] = NULL;
delkey(key);
}
/** test adding a random element (unlimited range) */
static void
testlookup_unlim(struct lruhash* table, struct testdata* ref[])
{
int num = random() % (HASHTESTMAX*10);
struct testkey* key = newkey(num);
struct lruhash_entry* en = lruhash_lookup(table, myhash(num), key, 0);
struct testdata* data = en? (struct testdata*)en->data : NULL;
if(en) {
unit_assert(en->key);
unit_assert(en->data);
}
if(0) log_info("lookup unlim %d got %d, expect %d", num, en ?
data->data :-1, ref[num] ? ref[num]->data : -1);
if(data) {
/* its okay for !data, it fell off the lru */
unit_assert( data == ref[num] );
}
if(en) lock_rw_unlock(&en->lock);
delkey(key);
}
/** test with long sequence of adds, removes and updates, and lookups */
static void
test_long_table(struct lruhash* table)
{
/* assuming it all fits in the hastable, this check will work */
struct testdata* ref[HASHTESTMAX * 100];
size_t i;
memset(ref, 0, sizeof(ref));
/* test assumption */
unit_assert( test_sizefunc(NULL, NULL)*HASHTESTMAX < table->space_max);
if(0) lruhash_status(table, "unit test", 1);
srandom(48);
for(i=0; i<1000; i++) {
/* what to do? */
switch(random() % 4) {
case 0:
case 3:
testadd(table, ref);
break;
case 1:
testremove(table, ref);
break;
case 2:
testlookup(table, ref);
break;
default:
unit_assert(0);
}
if(0) lruhash_status(table, "unit test", 1);
check_table(table);
unit_assert( table->num <= HASHTESTMAX );
}
/* test more, but 'ref' assumption does not hold anymore */
for(i=0; i<1000; i++) {
/* what to do? */
switch(random() % 4) {
case 0:
case 3:
testadd_unlim(table, ref);
break;
case 1:
testremove_unlim(table, ref);
break;
case 2:
testlookup_unlim(table, ref);
break;
default:
unit_assert(0);
}
if(0) lruhash_status(table, "unlim", 1);
check_table(table);
}
}
void lruhash_test() void lruhash_test()
{ {
/* start very very small array, so it can do lots of table_grow() */ /* start very very small array, so it can do lots of table_grow() */
/* also small in size so that reclaim has to be done quickly. */ /* also small in size so that reclaim has to be done quickly. */
struct lruhash* table = lruhash_create(2, 1024, struct lruhash* table = lruhash_create(2, 4096,
test_sizefunc, test_compfunc, test_delkey, test_deldata, NULL); test_sizefunc, test_compfunc, test_delkey, test_deldata, NULL);
test_bin_find_entry(table); test_bin_find_entry(table);
test_lru(table); test_lru(table);
test_short_table(table);
test_long_table(table);
/* hashtable tests go here */ /* hashtable tests go here */
lruhash_delete(table); lruhash_delete(table);
} }

View file

@ -116,13 +116,16 @@ bin_split(struct lruhash* table, struct lruhash_bin* newa,
/* move entries to new table. Notice that since hash x is mapped to /* move entries to new table. Notice that since hash x is mapped to
* bin x & mask, and new mask uses one more bit, so all entries in * bin x & mask, and new mask uses one more bit, so all entries in
* one bin will go into the old bin or bin | newbit */ * one bin will go into the old bin or bin | newbit */
/* newbit = newmask - table->size_mask; */ int newbit = newmask - table->size_mask;
/* so, really, this task could also be threaded, per bin. */ /* so, really, this task could also be threaded, per bin. */
/* LRU list is not changed */ /* LRU list is not changed */
for(i=0; i<table->size; i++) for(i=0; i<table->size; i++)
{ {
lock_quick_lock(&table->array[i].lock); lock_quick_lock(&table->array[i].lock);
p = table->array[i].overflow_list; p = table->array[i].overflow_list;
/* lock both destination bins */
lock_quick_lock(&newa[i].lock);
lock_quick_lock(&newa[newbit|i].lock);
while(p) { while(p) {
np = p->overflow_next; np = p->overflow_next;
/* link into correct new bin */ /* link into correct new bin */
@ -131,6 +134,8 @@ bin_split(struct lruhash* table, struct lruhash_bin* newa,
newbin->overflow_list = p; newbin->overflow_list = p;
p=np; p=np;
} }
lock_quick_unlock(&newa[i].lock);
lock_quick_unlock(&newa[newbit|i].lock);
} }
} }
@ -366,6 +371,8 @@ lruhash_remove(struct lruhash* table, hashvalue_t hash, void* key)
lock_quick_unlock(&bin->lock); lock_quick_unlock(&bin->lock);
return; return;
} }
table->num--;
table->space_used -= (*table->sizefunc)(entry->key, entry->data);
lock_quick_unlock(&table->lock); lock_quick_unlock(&table->lock);
lock_rw_wrlock(&entry->lock); lock_rw_wrlock(&entry->lock);
lock_quick_unlock(&bin->lock); lock_quick_unlock(&bin->lock);
@ -374,3 +381,38 @@ lruhash_remove(struct lruhash* table, hashvalue_t hash, void* key)
(*table->delkeyfunc)(entry->key, table->cb_arg); (*table->delkeyfunc)(entry->key, table->cb_arg);
(*table->deldatafunc)(entry->data, table->cb_arg); (*table->deldatafunc)(entry->data, table->cb_arg);
} }
void
lruhash_status(struct lruhash* table, const char* id, int extended)
{
lock_quick_lock(&table->lock);
log_info("%s: %u entries, memory %u / %u",
id, (unsigned)table->num, (unsigned)table->space_used,
(unsigned)table->space_max);
log_info(" itemsize %u, array %u, mask %d",
(unsigned)(table->num? table->space_used/table->num : 0),
(unsigned)table->size, table->size_mask);
if(extended) {
size_t i;
int min=table->size*2, max=-2;
for(i=0; i<table->size; i++) {
int here = 0;
struct lruhash_entry *en;
lock_quick_lock(&table->array[i].lock);
en = table->array[i].overflow_list;
while(en) {
here ++;
en = en->overflow_next;
}
lock_quick_unlock(&table->array[i].lock);
if(extended >= 2)
log_info("bin[%d] %d", (int)i, here);
if(here > max) max = here;
if(here < min) min = here;
}
log_info(" bin min %d, avg %.2lf, max %d", min,
(double)table->num/(double)table->size, max);
}
lock_quick_unlock(&table->lock);
}

View file

@ -360,4 +360,12 @@ void lru_front(struct lruhash* table, struct lruhash_entry* entry);
*/ */
void lru_remove(struct lruhash* table, struct lruhash_entry* entry); void lru_remove(struct lruhash* table, struct lruhash_entry* entry);
/**
* Output debug info to the log as to state of the hash table.
* @param table: hash table.
* @param id: string printed with table to identify the hash table.
* @param extended: set to true to print statistics on overflow bin lengths.
*/
void lruhash_status(struct lruhash* table, const char* id, int extended);
#endif /* UTIL_STORAGE_LRUHASH_H */ #endif /* UTIL_STORAGE_LRUHASH_H */