unbound/util/storage/lruhash.c
Wouter Wijngaards 218f5cfc92
Fast Reload Option (#1042)
* - fast-reload, add unbound-control fast_reload

* - fast-reload, make a thread to service the unbound-control command.

* - fast-reload, communication sockets for information transfer.

* - fast-reload, fix compile for unbound-dnstap-socket.

* - fast-reload, set nonblocking communication to keep the server thread
  responding to DNS requests.

* - fast-reload, poll routine to test for readiness, timeout fails connection.

* - fast-reload, detect loop in sock_poll_timeout routine.

* - fast-reload, send done and exited notification.

* - fast-reload, defines for constants in ipc.

* - fast-reload, ipc socket recv and send resists partial reads and writes and
  can continue byte by byte. Also it can continue after an interrupt.

* - fast-reload, send exit command to thread when done.

* - fast-reload, output strings for client on string list.

* - fast-reload, add newline to terminal output.

* - fast-reload, send client string to remote client.

* - fast-reload, better debug output.

* - fast-reload, print queue structure, for output to the remote client.

* - fast-reload, move print items to print queue from fast_reload_thread struct.

* - fast-reload, keep list of pending print queue items in daemon struct.

* - fast-reload, comment explains in_list for printq to print remainder.

* - fast-reload, unit test testdata/fast_reload_thread.tdir that tests the
  thread output.

* - fast-reload, fix test link for fast_reload_printq_list_delete function.

* - fast-reload, reread config file from disk.

* - fast-reload, unshare forwards, making the structure locked, with an rwlock.

* - fast-reload, for nonthreaded, the unbound-control commands forward,
  forward_add and forward_delete should be distributed to other processes,
  but when threaded, they should not be distributed to other threads because
  the structure is not thread specific any more.

* - fast-reload, unshared stub hints, making the structure locked, with an rwlock.

* - fast-reload, helpful comments for hints lookup function return value.

* - fast-reload, fix bug in fast reload printout, the strlist appendlist routine,
  and printout time statistics after the reload is done.

* - fast-reload, keep track of reloadtime and deletestime and print them.

* - fast-reload, keep track of constructtime and print it.

* - fast-reload, construct new items.

* - fast-reload, better comment.

* - fast-reload, reload the config and swap trees for forwards and stub hints.

* - fast-reload, in forwards_swap_tree set protection of trees with locks.

* - fast-reload, in hints_swap_tree also swap the node count of the trees.

* - fast-reload, reload ipc to stop and start threads.

* - fast-reload, unused forward declarations removed.

* - fast-reload, unit test that fast reload works with forwards and stubs.

* - fast-reload, fix clang analyzer warnings.

* - fast-reload, small documentation entry in unbound-control -h output.

* - fast-reload, printout memory use by fast reload, in bytes.

* - fast-reload, compile without threads.

* - fast-reload, document fast_reload in man page.

* - fast-reload, print ok when done successfully.

* - fast-reload, option for fast-reload commandline, +v verbosity option,
  with timing and memory use output.

* - fast-reload, option for fast-reload commandline, +p does not pause threads.

* - fast-reload, option for fast-reload commandline, +d drops mesh queries.

* - fast-reload, fix to poll every thread with nopause to make certain that
  resources are not held by the threads and can be deleted.

* - fast-reload, fix to use atomic store for config variables with nopause.

* - fast-reload, reload views.

* - fast-reload, when tag defines are different, it drops the queries.

* - fast-reload, fix tag define check.

* - fast-reload, document that tag change causes drop of queries.

* - fast-reload, fix space in documentation man page.

* - fast-reload, copy respip client information to query state, put views tree
  in module env for lookup.

* - fast-reload, nicer respip view comparison.

* - fast-reload, respip global set is in module env.

* - fast-reload, document that respip_client_info acl info is copied.

* - fast-reload, reload the respip_set.

* - fast-reload, document no pause and pick up of use_response_ip boolean.

* - fast-reload, fix test compile.

* - fast-reload, reload local zones.

* Update locking management for iter_fwd and iter_hints methods. (#1054)

fast reload, move most of the locking management to iter_fwd and
iter_hints methods. The caller still has the ability to handle its
own locking, if desired, for atomic operations on sets of different
structs.

Co-authored-by: Wouter Wijngaards <wcawijngaards@users.noreply.github.com>

* - fast-reload, reload access-control.

* - fast-reload, reload access control interface, such as interface-action.

* - fast-reload, reload tcp-connection-limit.

* - fast-reload, improve comments on acl_list and tcl_list swap tree.

* - fast-reload, fixup references to old tcp connection limits in open tcp
  connections.

* - fast-reload, fixup to clean tcp connection also for different linked order.

* - fast-reload, if no tcp connection limits existed, no need to remove
  references for that.

* - fast-reload, document more options that work and do not work.

* - fast-reload, reload auth_zone and rpz data.

* - fast-reload, fix auth_zones_get_mem.

* - fast-reload, fix compilation of testbound for the new comm_timer_get_mem
  reference in remote control.

* - fast-reload, change use_rpz with reload.

* - fast-reload, list changes in auth zones and stop zonemd callbacks for
  deleted auth zones.

* - fast-reload, note xtree is not swapped, and why it is not swapped.

* - fast-reload, for added auth zones, pick up zone transfer and zonemd tasks.

* - fast-reload, unlock xfr when done with transfer pick up.

* - fast-reload, unlock z when picking up the xfr for it during transfer task
  pick up.

* - fast-reload, pick up task changes for added, deleted and modified auth zones.

* - fast-reload, remove xfr of auth zone deletion without tasks.

* - fast-reload, pick up zone transfer config.

* - fast-reload, the main worker thread picks up the transfer tasks and also
  performs setup of the xfer struct.

* - fast-reload, keep writelock on newzone when auth zone changes.

* - fast-reload, change cachedb_enabled setting.

* - fast-reload, pick up edns-strings config.

* - fast-reload, note that settings are not updated.

* - fast-reload, pick up dnstap config.

* - fast-reload, dnstap options that need to be loaded without +p.

* - fast-reload, fix auth zone reload

* - fast-reload, remove debug for auth zone test.

* - fast-reload, fix auth zone reload with zone transfer.

* - fast-reload, fix auth zone reload lock order.

* - fast-reload, remove debug from fast reload test.

* - fast-reload, remove unused function.

* - fast-reload, fix the worker trust anchor probe timer lock acquisition in
  the probe answer callback routine for trust anchor probes.

* - fast-reload, reload trust anchors.

* - fast-reload, fix trust anchor reload lock on autr global data and test
  for trust anchor reload.

* - fast-reload, adjust cache sizes.

* - fast-reload, reload cache sizes when changed.

* - fast-reload, reload validator env changes.

* - fast-reload, reload mesh changes.

* - fast-reload, check for incompatible changes.

* - fast-reload, improve error text for incompatible change.

* - fast-reload, fix check config option compatibility.

* - fast-reload, improve error text for nopause change.

* - fast-reload, fix spelling of incompatible options.

* - fast-reload, reload target-fetch-policy, outbound-msg-retry, max-sent-count
  and max-query-restarts.

* - fast-reload, check nopause config change for target-fetch-policy.

* - fast-reload, reload do-not-query-address, private-address and capt-exempt.

* - fast-reload, check nopause config change for do-not-query-address,
  private-address and capt-exempt.

* - fast-reload, check fast reload not possible due to interface and
  outgoing-interface changes.

* - fast-reload, reload nat64 settings.

* - fast-reload, reload settings stored in the infra structure.

* - fast-reload, fix modstack lookup and remove outgoing-range check.

* - fast-reload, more explanation for config parse failure.

* - fast-reload, reload worker outside network changes.

* - fast-reload, detect incompatible changes in network settings.

* fast-reload, commit test files.

* - fast-reload, fix warnings for call types in windows compile.

* - fast-reload, fix warnings and comm_point_internal for tcp wouldblock calls.

* - fast-reload, extend lock checks for repeat thread ids.

* - fast-reload, additional test cases, cache change and tag changes.

* - fast-reload, fix documentation for auth_zone_verify_zonemd_with_key.

* - fast-reload, fix copy_cfg type casts and memory leak on config parse failure.

* - fast-reload, fix use of WSAPoll.

* Review comments for the fast reload feature (#1259)

* - fast-reload review, respip set can be null from a view.

* - fast-reload review, typos.

* - fast-reload review, keep clang static analyzer happy.

* - fast-reload review, don't forget to copy tag_actions.

* - fast-reload review, less indentation.

* - fast-reload review, don't leak respip_actions when reloading.

* - fast-reload review, protect NULL pointer dereference in get_mem
  functions.

* - fast-reload review, add fast_reload_most_options.tdir to test most
  options with high verbosity when fast reloading.

* - fast-reload review, don't skip new line on long error printouts.

* - fast-reload review, typo.

* - fast-reload review, use new_z for consistency.

* - fast-reload review, nit for unlock ordering to make eye comparison
  with the lock counterpart easier.

* - fast-reload review, in case of error the sockets are already closed.

* - fast-reload review, identation.

* - fast-reload review, add static keywords.

* - fast-reload review, update unbound-control usage text.

* - fast-reload review, updates to the man page.

* - fast-reload, the fast-reload command is experimental.

* - fast-reload, fix compile of doqclient for fast reload functions.

* Changelog comment for #1042
- Merge #1042: Fast Reload. The unbound-control fast_reload is added.
  It reads changed config in a thread, then only briefly pauses the
  service threads, that keep running. DNS service is only interrupted
  briefly, less than a second.

---------

Co-authored-by: Yorgos Thessalonikefs <yorgos@nlnetlabs.nl>
2025-03-31 15:25:24 +02:00

706 lines
20 KiB
C

/*
* util/storage/lruhash.c - hashtable, hash function, LRU keeping.
*
* Copyright (c) 2007, NLnet Labs. All rights reserved.
*
* This software is open source.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of the NLNET LABS nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* \file
*
* This file contains a hashtable with LRU keeping of entries.
*
*/
#include "config.h"
#include "util/storage/lruhash.h"
#include "util/fptr_wlist.h"
void
bin_init(struct lruhash_bin* array, size_t size)
{
size_t i;
#ifdef THREADS_DISABLED
(void)array;
#endif
for(i=0; i<size; i++) {
lock_quick_init(&array[i].lock);
lock_protect(&array[i].lock, &array[i],
sizeof(struct lruhash_bin));
}
}
struct lruhash*
lruhash_create(size_t start_size, size_t maxmem,
lruhash_sizefunc_type sizefunc, lruhash_compfunc_type compfunc,
lruhash_delkeyfunc_type delkeyfunc,
lruhash_deldatafunc_type deldatafunc, void* arg)
{
struct lruhash* table = (struct lruhash*)calloc(1,
sizeof(struct lruhash));
if(!table)
return NULL;
lock_quick_init(&table->lock);
table->sizefunc = sizefunc;
table->compfunc = compfunc;
table->delkeyfunc = delkeyfunc;
table->deldatafunc = deldatafunc;
table->cb_arg = arg;
table->size = start_size;
table->size_mask = (int)(start_size-1);
table->lru_start = NULL;
table->lru_end = NULL;
table->num = 0;
table->space_used = 0;
table->space_max = maxmem;
table->max_collisions = 0;
table->array = calloc(table->size, sizeof(struct lruhash_bin));
if(!table->array) {
lock_quick_destroy(&table->lock);
free(table);
return NULL;
}
bin_init(table->array, table->size);
lock_protect(&table->lock, table, sizeof(*table));
lock_protect(&table->lock, table->array,
table->size*sizeof(struct lruhash_bin));
return table;
}
void
bin_delete(struct lruhash* table, struct lruhash_bin* bin)
{
struct lruhash_entry* p, *np;
void *d;
if(!bin)
return;
lock_quick_destroy(&bin->lock);
p = bin->overflow_list;
bin->overflow_list = NULL;
while(p) {
np = p->overflow_next;
d = p->data;
(*table->delkeyfunc)(p->key, table->cb_arg);
(*table->deldatafunc)(d, table->cb_arg);
p = np;
}
}
void
bin_split(struct lruhash* table, struct lruhash_bin* newa,
int newmask)
{
size_t i;
struct lruhash_entry *p, *np;
struct lruhash_bin* newbin;
/* move entries to new table. Notice that since hash x is mapped to
* bin x & mask, and new mask uses one more bit, so all entries in
* one bin will go into the old bin or bin | newbit */
#ifndef THREADS_DISABLED
int newbit = newmask - table->size_mask;
#endif
/* so, really, this task could also be threaded, per bin. */
/* LRU list is not changed */
for(i=0; i<table->size; i++)
{
lock_quick_lock(&table->array[i].lock);
p = table->array[i].overflow_list;
/* lock both destination bins */
lock_quick_lock(&newa[i].lock);
lock_quick_lock(&newa[newbit|i].lock);
while(p) {
np = p->overflow_next;
/* link into correct new bin */
newbin = &newa[p->hash & newmask];
p->overflow_next = newbin->overflow_list;
newbin->overflow_list = p;
p=np;
}
lock_quick_unlock(&newa[i].lock);
lock_quick_unlock(&newa[newbit|i].lock);
lock_quick_unlock(&table->array[i].lock);
}
}
void
lruhash_delete(struct lruhash* table)
{
size_t i;
if(!table)
return;
/* delete lock on hashtable to force check its OK */
lock_quick_destroy(&table->lock);
for(i=0; i<table->size; i++)
bin_delete(table, &table->array[i]);
free(table->array);
free(table);
}
void
bin_overflow_remove(struct lruhash_bin* bin, struct lruhash_entry* entry)
{
struct lruhash_entry* p = bin->overflow_list;
struct lruhash_entry** prevp = &bin->overflow_list;
while(p) {
if(p == entry) {
*prevp = p->overflow_next;
return;
}
prevp = &p->overflow_next;
p = p->overflow_next;
}
}
void
reclaim_space(struct lruhash* table, struct lruhash_entry** list)
{
struct lruhash_entry* d;
struct lruhash_bin* bin;
log_assert(table);
/* does not delete MRU entry, so table will not be empty. */
while(table->num > 1 && table->space_used > table->space_max) {
/* notice that since we hold the hashtable lock, nobody
can change the lru chain. So it cannot be deleted underneath
us. We still need the hashbin and entry write lock to make
sure we flush all users away from the entry.
which is unlikely, since it is LRU, if someone got a rdlock
it would be moved to front, but to be sure. */
d = table->lru_end;
/* specialised, delete from end of double linked list,
and we know num>1, so there is a previous lru entry. */
log_assert(d && d->lru_prev);
table->lru_end = d->lru_prev;
d->lru_prev->lru_next = NULL;
/* schedule entry for deletion */
bin = &table->array[d->hash & table->size_mask];
table->num --;
lock_quick_lock(&bin->lock);
bin_overflow_remove(bin, d);
d->overflow_next = *list;
*list = d;
lock_rw_wrlock(&d->lock);
table->space_used -= table->sizefunc(d->key, d->data);
if(table->markdelfunc)
(*table->markdelfunc)(d->key);
lock_rw_unlock(&d->lock);
lock_quick_unlock(&bin->lock);
}
}
struct lruhash_entry*
bin_find_entry(struct lruhash* table,
struct lruhash_bin* bin, hashvalue_type hash, void* key, size_t* collisions)
{
size_t c = 0;
struct lruhash_entry* p = bin->overflow_list;
while(p) {
if(p->hash == hash && table->compfunc(p->key, key) == 0)
break;
c++;
p = p->overflow_next;
}
if (collisions != NULL)
*collisions = c;
return p;
}
void
table_grow(struct lruhash* table)
{
struct lruhash_bin* newa;
int newmask;
size_t i;
if(table->size_mask == (int)(((size_t)-1)>>1)) {
log_err("hash array malloc: size_t too small");
return;
}
/* try to allocate new array, if not fail */
newa = calloc(table->size*2, sizeof(struct lruhash_bin));
if(!newa) {
log_err("hash grow: malloc failed");
/* continue with smaller array. Though its slower. */
return;
}
bin_init(newa, table->size*2);
newmask = (table->size_mask << 1) | 1;
bin_split(table, newa, newmask);
/* delete the old bins */
lock_unprotect(&table->lock, table->array);
for(i=0; i<table->size; i++) {
lock_quick_destroy(&table->array[i].lock);
}
free(table->array);
table->size *= 2;
table->size_mask = newmask;
table->array = newa;
lock_protect(&table->lock, table->array,
table->size*sizeof(struct lruhash_bin));
return;
}
void
lru_front(struct lruhash* table, struct lruhash_entry* entry)
{
entry->lru_prev = NULL;
entry->lru_next = table->lru_start;
if(!table->lru_start)
table->lru_end = entry;
else table->lru_start->lru_prev = entry;
table->lru_start = entry;
}
void
lru_remove(struct lruhash* table, struct lruhash_entry* entry)
{
if(entry->lru_prev)
entry->lru_prev->lru_next = entry->lru_next;
else table->lru_start = entry->lru_next;
if(entry->lru_next)
entry->lru_next->lru_prev = entry->lru_prev;
else table->lru_end = entry->lru_prev;
}
void
lru_touch(struct lruhash* table, struct lruhash_entry* entry)
{
log_assert(table && entry);
if(entry == table->lru_start)
return; /* nothing to do */
/* remove from current lru position */
lru_remove(table, entry);
/* add at front */
lru_front(table, entry);
}
void
lruhash_insert(struct lruhash* table, hashvalue_type hash,
struct lruhash_entry* entry, void* data, void* cb_arg)
{
struct lruhash_bin* bin;
struct lruhash_entry* found, *reclaimlist=NULL;
size_t need_size;
size_t collisions;
fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
need_size = table->sizefunc(entry->key, data);
if(cb_arg == NULL) cb_arg = table->cb_arg;
/* find bin */
lock_quick_lock(&table->lock);
bin = &table->array[hash & table->size_mask];
lock_quick_lock(&bin->lock);
/* see if entry exists already */
if(!(found=bin_find_entry(table, bin, hash, entry->key, &collisions))) {
/* if not: add to bin */
entry->overflow_next = bin->overflow_list;
bin->overflow_list = entry;
lru_front(table, entry);
table->num++;
if (table->max_collisions < collisions)
table->max_collisions = collisions;
table->space_used += need_size;
} else {
/* if so: update data - needs a writelock */
table->space_used += need_size -
(*table->sizefunc)(found->key, found->data);
(*table->delkeyfunc)(entry->key, cb_arg);
lru_touch(table, found);
lock_rw_wrlock(&found->lock);
(*table->deldatafunc)(found->data, cb_arg);
found->data = data;
lock_rw_unlock(&found->lock);
}
lock_quick_unlock(&bin->lock);
if(table->space_used > table->space_max)
reclaim_space(table, &reclaimlist);
if(table->num >= table->size)
table_grow(table);
lock_quick_unlock(&table->lock);
/* finish reclaim if any (outside of critical region) */
while(reclaimlist) {
struct lruhash_entry* n = reclaimlist->overflow_next;
void* d = reclaimlist->data;
(*table->delkeyfunc)(reclaimlist->key, cb_arg);
(*table->deldatafunc)(d, cb_arg);
reclaimlist = n;
}
}
struct lruhash_entry*
lruhash_lookup(struct lruhash* table, hashvalue_type hash, void* key, int wr)
{
struct lruhash_entry* entry;
struct lruhash_bin* bin;
fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
lock_quick_lock(&table->lock);
bin = &table->array[hash & table->size_mask];
lock_quick_lock(&bin->lock);
if((entry=bin_find_entry(table, bin, hash, key, NULL)))
lru_touch(table, entry);
lock_quick_unlock(&table->lock);
if(entry) {
if(wr) { lock_rw_wrlock(&entry->lock); }
else { lock_rw_rdlock(&entry->lock); }
}
lock_quick_unlock(&bin->lock);
return entry;
}
void
lruhash_remove(struct lruhash* table, hashvalue_type hash, void* key)
{
struct lruhash_entry* entry;
struct lruhash_bin* bin;
void *d;
fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
lock_quick_lock(&table->lock);
bin = &table->array[hash & table->size_mask];
lock_quick_lock(&bin->lock);
if((entry=bin_find_entry(table, bin, hash, key, NULL))) {
bin_overflow_remove(bin, entry);
lru_remove(table, entry);
} else {
lock_quick_unlock(&table->lock);
lock_quick_unlock(&bin->lock);
return;
}
table->num--;
table->space_used -= (*table->sizefunc)(entry->key, entry->data);
lock_rw_wrlock(&entry->lock);
if(table->markdelfunc)
(*table->markdelfunc)(entry->key);
lock_rw_unlock(&entry->lock);
lock_quick_unlock(&bin->lock);
lock_quick_unlock(&table->lock);
/* finish removal */
d = entry->data;
(*table->delkeyfunc)(entry->key, table->cb_arg);
(*table->deldatafunc)(d, table->cb_arg);
}
/** clear bin, respecting locks, does not do space, LRU */
static void
bin_clear(struct lruhash* table, struct lruhash_bin* bin)
{
struct lruhash_entry* p, *np;
void *d;
lock_quick_lock(&bin->lock);
p = bin->overflow_list;
while(p) {
lock_rw_wrlock(&p->lock);
np = p->overflow_next;
d = p->data;
if(table->markdelfunc)
(*table->markdelfunc)(p->key);
lock_rw_unlock(&p->lock);
(*table->delkeyfunc)(p->key, table->cb_arg);
(*table->deldatafunc)(d, table->cb_arg);
p = np;
}
bin->overflow_list = NULL;
lock_quick_unlock(&bin->lock);
}
void
lruhash_clear(struct lruhash* table)
{
size_t i;
if(!table)
return;
fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
lock_quick_lock(&table->lock);
for(i=0; i<table->size; i++) {
bin_clear(table, &table->array[i]);
}
table->lru_start = NULL;
table->lru_end = NULL;
table->num = 0;
table->space_used = 0;
lock_quick_unlock(&table->lock);
}
void
lruhash_status(struct lruhash* table, const char* id, int extended)
{
lock_quick_lock(&table->lock);
log_info("%s: %u entries, memory %u / %u",
id, (unsigned)table->num, (unsigned)table->space_used,
(unsigned)table->space_max);
log_info(" itemsize %u, array %u, mask %d",
(unsigned)(table->num? table->space_used/table->num : 0),
(unsigned)table->size, table->size_mask);
if(extended) {
size_t i;
int min=(int)table->size*2, max=-2;
for(i=0; i<table->size; i++) {
int here = 0;
struct lruhash_entry *en;
lock_quick_lock(&table->array[i].lock);
en = table->array[i].overflow_list;
while(en) {
here ++;
en = en->overflow_next;
}
lock_quick_unlock(&table->array[i].lock);
if(extended >= 2)
log_info("bin[%d] %d", (int)i, here);
if(here > max) max = here;
if(here < min) min = here;
}
log_info(" bin min %d, avg %.2lf, max %d", min,
(double)table->num/(double)table->size, max);
}
lock_quick_unlock(&table->lock);
}
size_t
lruhash_get_mem(struct lruhash* table)
{
size_t s;
lock_quick_lock(&table->lock);
s = sizeof(struct lruhash) + table->space_used;
#ifdef USE_THREAD_DEBUG
if(table->size != 0) {
size_t i;
for(i=0; i<table->size; i++)
s += sizeof(struct lruhash_bin) +
lock_get_mem(&table->array[i].lock);
}
#else /* no THREAD_DEBUG */
if(table->size != 0)
s += (table->size)*(sizeof(struct lruhash_bin) +
lock_get_mem(&table->array[0].lock));
#endif
lock_quick_unlock(&table->lock);
s += lock_get_mem(&table->lock);
return s;
}
void
lruhash_setmarkdel(struct lruhash* table, lruhash_markdelfunc_type md)
{
lock_quick_lock(&table->lock);
table->markdelfunc = md;
lock_quick_unlock(&table->lock);
}
void
lruhash_update_space_used(struct lruhash* table, void* cb_arg, int diff_size)
{
struct lruhash_entry *reclaimlist = NULL;
fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
if(cb_arg == NULL) cb_arg = table->cb_arg;
/* update space used */
lock_quick_lock(&table->lock);
if((int)table->space_used + diff_size < 0)
table->space_used = 0;
else table->space_used = (size_t)((int)table->space_used + diff_size);
if(table->space_used > table->space_max)
reclaim_space(table, &reclaimlist);
lock_quick_unlock(&table->lock);
/* finish reclaim if any (outside of critical region) */
while(reclaimlist) {
struct lruhash_entry* n = reclaimlist->overflow_next;
void* d = reclaimlist->data;
(*table->delkeyfunc)(reclaimlist->key, cb_arg);
(*table->deldatafunc)(d, cb_arg);
reclaimlist = n;
}
}
void lruhash_update_space_max(struct lruhash* table, void* cb_arg, size_t max)
{
struct lruhash_entry *reclaimlist = NULL;
fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
if(cb_arg == NULL) cb_arg = table->cb_arg;
/* update space max */
lock_quick_lock(&table->lock);
table->space_max = max;
if(table->space_used > table->space_max)
reclaim_space(table, &reclaimlist);
lock_quick_unlock(&table->lock);
/* finish reclaim if any (outside of critical region) */
while(reclaimlist) {
struct lruhash_entry* n = reclaimlist->overflow_next;
void* d = reclaimlist->data;
(*table->delkeyfunc)(reclaimlist->key, cb_arg);
(*table->deldatafunc)(d, cb_arg);
reclaimlist = n;
}
}
void
lruhash_traverse(struct lruhash* h, int wr,
void (*func)(struct lruhash_entry*, void*), void* arg)
{
size_t i;
struct lruhash_entry* e;
lock_quick_lock(&h->lock);
for(i=0; i<h->size; i++) {
lock_quick_lock(&h->array[i].lock);
for(e = h->array[i].overflow_list; e; e = e->overflow_next) {
if(wr) {
lock_rw_wrlock(&e->lock);
} else {
lock_rw_rdlock(&e->lock);
}
(*func)(e, arg);
lock_rw_unlock(&e->lock);
}
lock_quick_unlock(&h->array[i].lock);
}
lock_quick_unlock(&h->lock);
}
/*
* Demote: the opposite of touch, move an entry to the bottom
* of the LRU pile.
*/
void
lru_demote(struct lruhash* table, struct lruhash_entry* entry)
{
log_assert(table && entry);
if (entry == table->lru_end)
return; /* nothing to do */
/* remove from current lru position */
lru_remove(table, entry);
/* add at end */
entry->lru_next = NULL;
entry->lru_prev = table->lru_end;
if (table->lru_end == NULL)
{
table->lru_start = entry;
}
else
{
table->lru_end->lru_next = entry;
}
table->lru_end = entry;
}
struct lruhash_entry*
lruhash_insert_or_retrieve(struct lruhash* table, hashvalue_type hash,
struct lruhash_entry* entry, void* data, void* cb_arg)
{
struct lruhash_bin* bin;
struct lruhash_entry* found, *reclaimlist = NULL;
size_t need_size;
size_t collisions;
fptr_ok(fptr_whitelist_hash_sizefunc(table->sizefunc));
fptr_ok(fptr_whitelist_hash_delkeyfunc(table->delkeyfunc));
fptr_ok(fptr_whitelist_hash_deldatafunc(table->deldatafunc));
fptr_ok(fptr_whitelist_hash_compfunc(table->compfunc));
fptr_ok(fptr_whitelist_hash_markdelfunc(table->markdelfunc));
need_size = table->sizefunc(entry->key, data);
if (cb_arg == NULL) cb_arg = table->cb_arg;
/* find bin */
lock_quick_lock(&table->lock);
bin = &table->array[hash & table->size_mask];
lock_quick_lock(&bin->lock);
/* see if entry exists already */
if ((found = bin_find_entry(table, bin, hash, entry->key, &collisions)) != NULL) {
/* if so: keep the existing data - acquire a writelock */
lock_rw_wrlock(&found->lock);
}
else
{
/* if not: add to bin */
entry->overflow_next = bin->overflow_list;
bin->overflow_list = entry;
lru_front(table, entry);
table->num++;
if (table->max_collisions < collisions)
table->max_collisions = collisions;
table->space_used += need_size;
/* return the entry that was presented, and lock it */
found = entry;
lock_rw_wrlock(&found->lock);
}
lock_quick_unlock(&bin->lock);
if (table->space_used > table->space_max)
reclaim_space(table, &reclaimlist);
if (table->num >= table->size)
table_grow(table);
lock_quick_unlock(&table->lock);
/* finish reclaim if any (outside of critical region) */
while (reclaimlist) {
struct lruhash_entry* n = reclaimlist->overflow_next;
void* d = reclaimlist->data;
(*table->delkeyfunc)(reclaimlist->key, cb_arg);
(*table->deldatafunc)(d, cb_arg);
reclaimlist = n;
}
/* return the entry that was selected */
return found;
}