mirror of
https://github.com/NLnetLabs/unbound.git
synced 2025-12-20 23:00:56 -05:00
* - fast-reload, add unbound-control fast_reload * - fast-reload, make a thread to service the unbound-control command. * - fast-reload, communication sockets for information transfer. * - fast-reload, fix compile for unbound-dnstap-socket. * - fast-reload, set nonblocking communication to keep the server thread responding to DNS requests. * - fast-reload, poll routine to test for readiness, timeout fails connection. * - fast-reload, detect loop in sock_poll_timeout routine. * - fast-reload, send done and exited notification. * - fast-reload, defines for constants in ipc. * - fast-reload, ipc socket recv and send resists partial reads and writes and can continue byte by byte. Also it can continue after an interrupt. * - fast-reload, send exit command to thread when done. * - fast-reload, output strings for client on string list. * - fast-reload, add newline to terminal output. * - fast-reload, send client string to remote client. * - fast-reload, better debug output. * - fast-reload, print queue structure, for output to the remote client. * - fast-reload, move print items to print queue from fast_reload_thread struct. * - fast-reload, keep list of pending print queue items in daemon struct. * - fast-reload, comment explains in_list for printq to print remainder. * - fast-reload, unit test testdata/fast_reload_thread.tdir that tests the thread output. * - fast-reload, fix test link for fast_reload_printq_list_delete function. * - fast-reload, reread config file from disk. * - fast-reload, unshare forwards, making the structure locked, with an rwlock. * - fast-reload, for nonthreaded, the unbound-control commands forward, forward_add and forward_delete should be distributed to other processes, but when threaded, they should not be distributed to other threads because the structure is not thread specific any more. * - fast-reload, unshared stub hints, making the structure locked, with an rwlock. * - fast-reload, helpful comments for hints lookup function return value. * - fast-reload, fix bug in fast reload printout, the strlist appendlist routine, and printout time statistics after the reload is done. * - fast-reload, keep track of reloadtime and deletestime and print them. * - fast-reload, keep track of constructtime and print it. * - fast-reload, construct new items. * - fast-reload, better comment. * - fast-reload, reload the config and swap trees for forwards and stub hints. * - fast-reload, in forwards_swap_tree set protection of trees with locks. * - fast-reload, in hints_swap_tree also swap the node count of the trees. * - fast-reload, reload ipc to stop and start threads. * - fast-reload, unused forward declarations removed. * - fast-reload, unit test that fast reload works with forwards and stubs. * - fast-reload, fix clang analyzer warnings. * - fast-reload, small documentation entry in unbound-control -h output. * - fast-reload, printout memory use by fast reload, in bytes. * - fast-reload, compile without threads. * - fast-reload, document fast_reload in man page. * - fast-reload, print ok when done successfully. * - fast-reload, option for fast-reload commandline, +v verbosity option, with timing and memory use output. * - fast-reload, option for fast-reload commandline, +p does not pause threads. * - fast-reload, option for fast-reload commandline, +d drops mesh queries. * - fast-reload, fix to poll every thread with nopause to make certain that resources are not held by the threads and can be deleted. * - fast-reload, fix to use atomic store for config variables with nopause. * - fast-reload, reload views. * - fast-reload, when tag defines are different, it drops the queries. * - fast-reload, fix tag define check. * - fast-reload, document that tag change causes drop of queries. * - fast-reload, fix space in documentation man page. * - fast-reload, copy respip client information to query state, put views tree in module env for lookup. * - fast-reload, nicer respip view comparison. * - fast-reload, respip global set is in module env. * - fast-reload, document that respip_client_info acl info is copied. * - fast-reload, reload the respip_set. * - fast-reload, document no pause and pick up of use_response_ip boolean. * - fast-reload, fix test compile. * - fast-reload, reload local zones. * Update locking management for iter_fwd and iter_hints methods. (#1054) fast reload, move most of the locking management to iter_fwd and iter_hints methods. The caller still has the ability to handle its own locking, if desired, for atomic operations on sets of different structs. Co-authored-by: Wouter Wijngaards <wcawijngaards@users.noreply.github.com> * - fast-reload, reload access-control. * - fast-reload, reload access control interface, such as interface-action. * - fast-reload, reload tcp-connection-limit. * - fast-reload, improve comments on acl_list and tcl_list swap tree. * - fast-reload, fixup references to old tcp connection limits in open tcp connections. * - fast-reload, fixup to clean tcp connection also for different linked order. * - fast-reload, if no tcp connection limits existed, no need to remove references for that. * - fast-reload, document more options that work and do not work. * - fast-reload, reload auth_zone and rpz data. * - fast-reload, fix auth_zones_get_mem. * - fast-reload, fix compilation of testbound for the new comm_timer_get_mem reference in remote control. * - fast-reload, change use_rpz with reload. * - fast-reload, list changes in auth zones and stop zonemd callbacks for deleted auth zones. * - fast-reload, note xtree is not swapped, and why it is not swapped. * - fast-reload, for added auth zones, pick up zone transfer and zonemd tasks. * - fast-reload, unlock xfr when done with transfer pick up. * - fast-reload, unlock z when picking up the xfr for it during transfer task pick up. * - fast-reload, pick up task changes for added, deleted and modified auth zones. * - fast-reload, remove xfr of auth zone deletion without tasks. * - fast-reload, pick up zone transfer config. * - fast-reload, the main worker thread picks up the transfer tasks and also performs setup of the xfer struct. * - fast-reload, keep writelock on newzone when auth zone changes. * - fast-reload, change cachedb_enabled setting. * - fast-reload, pick up edns-strings config. * - fast-reload, note that settings are not updated. * - fast-reload, pick up dnstap config. * - fast-reload, dnstap options that need to be loaded without +p. * - fast-reload, fix auth zone reload * - fast-reload, remove debug for auth zone test. * - fast-reload, fix auth zone reload with zone transfer. * - fast-reload, fix auth zone reload lock order. * - fast-reload, remove debug from fast reload test. * - fast-reload, remove unused function. * - fast-reload, fix the worker trust anchor probe timer lock acquisition in the probe answer callback routine for trust anchor probes. * - fast-reload, reload trust anchors. * - fast-reload, fix trust anchor reload lock on autr global data and test for trust anchor reload. * - fast-reload, adjust cache sizes. * - fast-reload, reload cache sizes when changed. * - fast-reload, reload validator env changes. * - fast-reload, reload mesh changes. * - fast-reload, check for incompatible changes. * - fast-reload, improve error text for incompatible change. * - fast-reload, fix check config option compatibility. * - fast-reload, improve error text for nopause change. * - fast-reload, fix spelling of incompatible options. * - fast-reload, reload target-fetch-policy, outbound-msg-retry, max-sent-count and max-query-restarts. * - fast-reload, check nopause config change for target-fetch-policy. * - fast-reload, reload do-not-query-address, private-address and capt-exempt. * - fast-reload, check nopause config change for do-not-query-address, private-address and capt-exempt. * - fast-reload, check fast reload not possible due to interface and outgoing-interface changes. * - fast-reload, reload nat64 settings. * - fast-reload, reload settings stored in the infra structure. * - fast-reload, fix modstack lookup and remove outgoing-range check. * - fast-reload, more explanation for config parse failure. * - fast-reload, reload worker outside network changes. * - fast-reload, detect incompatible changes in network settings. * fast-reload, commit test files. * - fast-reload, fix warnings for call types in windows compile. * - fast-reload, fix warnings and comm_point_internal for tcp wouldblock calls. * - fast-reload, extend lock checks for repeat thread ids. * - fast-reload, additional test cases, cache change and tag changes. * - fast-reload, fix documentation for auth_zone_verify_zonemd_with_key. * - fast-reload, fix copy_cfg type casts and memory leak on config parse failure. * - fast-reload, fix use of WSAPoll. * Review comments for the fast reload feature (#1259) * - fast-reload review, respip set can be null from a view. * - fast-reload review, typos. * - fast-reload review, keep clang static analyzer happy. * - fast-reload review, don't forget to copy tag_actions. * - fast-reload review, less indentation. * - fast-reload review, don't leak respip_actions when reloading. * - fast-reload review, protect NULL pointer dereference in get_mem functions. * - fast-reload review, add fast_reload_most_options.tdir to test most options with high verbosity when fast reloading. * - fast-reload review, don't skip new line on long error printouts. * - fast-reload review, typo. * - fast-reload review, use new_z for consistency. * - fast-reload review, nit for unlock ordering to make eye comparison with the lock counterpart easier. * - fast-reload review, in case of error the sockets are already closed. * - fast-reload review, identation. * - fast-reload review, add static keywords. * - fast-reload review, update unbound-control usage text. * - fast-reload review, updates to the man page. * - fast-reload, the fast-reload command is experimental. * - fast-reload, fix compile of doqclient for fast reload functions. * Changelog comment for #1042 - Merge #1042: Fast Reload. The unbound-control fast_reload is added. It reads changed config in a thread, then only briefly pauses the service threads, that keep running. DNS service is only interrupted briefly, less than a second. --------- Co-authored-by: Yorgos Thessalonikefs <yorgos@nlnetlabs.nl>
278 lines
7.6 KiB
C
278 lines
7.6 KiB
C
/*
|
|
* util/storage/slabhash.c - hashtable consisting of several smaller tables.
|
|
*
|
|
* Copyright (c) 2007, NLnet Labs. All rights reserved.
|
|
*
|
|
* This software is open source.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
*
|
|
* Redistributions of source code must retain the above copyright notice,
|
|
* this list of conditions and the following disclaimer.
|
|
*
|
|
* Redistributions in binary form must reproduce the above copyright notice,
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
* and/or other materials provided with the distribution.
|
|
*
|
|
* Neither the name of the NLNET LABS nor the names of its contributors may
|
|
* be used to endorse or promote products derived from this software without
|
|
* specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
|
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
/**
|
|
* \file
|
|
*
|
|
* Implementation of hash table that consists of smaller hash tables.
|
|
* This results in a partitioned lruhash table.
|
|
* It cannot grow, but that gives it the ability to have multiple
|
|
* locks. Also this means there are multiple LRU lists.
|
|
*/
|
|
|
|
#include "config.h"
|
|
#include "util/storage/slabhash.h"
|
|
|
|
struct slabhash* slabhash_create(size_t numtables, size_t start_size,
|
|
size_t maxmem, lruhash_sizefunc_type sizefunc,
|
|
lruhash_compfunc_type compfunc, lruhash_delkeyfunc_type delkeyfunc,
|
|
lruhash_deldatafunc_type deldatafunc, void* arg)
|
|
{
|
|
size_t i;
|
|
struct slabhash* sl = (struct slabhash*)calloc(1,
|
|
sizeof(struct slabhash));
|
|
if(!sl) return NULL;
|
|
sl->size = numtables;
|
|
log_assert(sl->size > 0);
|
|
sl->array = (struct lruhash**)calloc(sl->size, sizeof(struct lruhash*));
|
|
if(!sl->array) {
|
|
free(sl);
|
|
return NULL;
|
|
}
|
|
sl->mask = (uint32_t)(sl->size - 1);
|
|
if(sl->mask == 0) {
|
|
sl->shift = 0;
|
|
} else {
|
|
log_assert( (sl->size & sl->mask) == 0
|
|
/* size must be power of 2 */ );
|
|
sl->shift = 0;
|
|
while(!(sl->mask & 0x80000000)) {
|
|
sl->mask <<= 1;
|
|
sl->shift ++;
|
|
}
|
|
}
|
|
for(i=0; i<sl->size; i++) {
|
|
sl->array[i] = lruhash_create(start_size, maxmem / sl->size,
|
|
sizefunc, compfunc, delkeyfunc, deldatafunc, arg);
|
|
if(!sl->array[i]) {
|
|
slabhash_delete(sl);
|
|
return NULL;
|
|
}
|
|
}
|
|
return sl;
|
|
}
|
|
|
|
void slabhash_delete(struct slabhash* sl)
|
|
{
|
|
if(!sl)
|
|
return;
|
|
if(sl->array) {
|
|
size_t i;
|
|
for(i=0; i<sl->size; i++)
|
|
lruhash_delete(sl->array[i]);
|
|
free(sl->array);
|
|
}
|
|
free(sl);
|
|
}
|
|
|
|
void slabhash_clear(struct slabhash* sl)
|
|
{
|
|
size_t i;
|
|
if(!sl)
|
|
return;
|
|
for(i=0; i<sl->size; i++)
|
|
lruhash_clear(sl->array[i]);
|
|
}
|
|
|
|
/** helper routine to calculate the slabhash index */
|
|
static unsigned int
|
|
slab_idx(struct slabhash* sl, hashvalue_type hash)
|
|
{
|
|
return ((hash & sl->mask) >> sl->shift);
|
|
}
|
|
|
|
void slabhash_insert(struct slabhash* sl, hashvalue_type hash,
|
|
struct lruhash_entry* entry, void* data, void* arg)
|
|
{
|
|
lruhash_insert(sl->array[slab_idx(sl, hash)], hash, entry, data, arg);
|
|
}
|
|
|
|
struct lruhash_entry* slabhash_lookup(struct slabhash* sl,
|
|
hashvalue_type hash, void* key, int wr)
|
|
{
|
|
return lruhash_lookup(sl->array[slab_idx(sl, hash)], hash, key, wr);
|
|
}
|
|
|
|
void slabhash_remove(struct slabhash* sl, hashvalue_type hash, void* key)
|
|
{
|
|
lruhash_remove(sl->array[slab_idx(sl, hash)], hash, key);
|
|
}
|
|
|
|
void slabhash_status(struct slabhash* sl, const char* id, int extended)
|
|
{
|
|
size_t i;
|
|
char num[17];
|
|
log_info("Slabhash %s: %u tables mask=%x shift=%d",
|
|
id, (unsigned)sl->size, (unsigned)sl->mask, sl->shift);
|
|
for(i=0; i<sl->size; i++) {
|
|
snprintf(num, sizeof(num), "table %u", (unsigned)i);
|
|
lruhash_status(sl->array[i], num, extended);
|
|
}
|
|
}
|
|
|
|
size_t slabhash_get_size(struct slabhash* sl)
|
|
{
|
|
size_t i, total = 0;
|
|
for(i=0; i<sl->size; i++) {
|
|
lock_quick_lock(&sl->array[i]->lock);
|
|
total += sl->array[i]->space_max;
|
|
lock_quick_unlock(&sl->array[i]->lock);
|
|
}
|
|
return total;
|
|
}
|
|
|
|
int slabhash_is_size(struct slabhash* sl, size_t size, size_t slabs)
|
|
{
|
|
/* divide by slabs and then multiply by the number of slabs,
|
|
* because if the size is not an even multiple of slabs, the
|
|
* uneven amount needs to be removed for comparison */
|
|
if(!sl) return 0;
|
|
if(sl->size != slabs) return 0;
|
|
if(slabs == 0) return 0;
|
|
if( (size/slabs)*slabs == slabhash_get_size(sl))
|
|
return 1;
|
|
return 0;
|
|
}
|
|
|
|
void slabhash_update_space_used(struct slabhash* sl, hashvalue_type hash,
|
|
void* cb_arg, int diff_size)
|
|
{
|
|
lruhash_update_space_used(sl->array[slab_idx(sl, hash)], cb_arg,
|
|
diff_size);
|
|
}
|
|
|
|
size_t slabhash_get_mem(struct slabhash* sl)
|
|
{
|
|
size_t i, total = sizeof(*sl);
|
|
total += sizeof(struct lruhash*)*sl->size;
|
|
for(i=0; i<sl->size; i++) {
|
|
total += lruhash_get_mem(sl->array[i]);
|
|
}
|
|
return total;
|
|
}
|
|
|
|
struct lruhash* slabhash_gettable(struct slabhash* sl, hashvalue_type hash)
|
|
{
|
|
return sl->array[slab_idx(sl, hash)];
|
|
}
|
|
|
|
/* test code, here to avoid linking problems with fptr_wlist */
|
|
/** delete key */
|
|
static void delkey(struct slabhash_testkey* k) {
|
|
lock_rw_destroy(&k->entry.lock); free(k);}
|
|
/** delete data */
|
|
static void deldata(struct slabhash_testdata* d) {free(d);}
|
|
|
|
size_t test_slabhash_sizefunc(void* ATTR_UNUSED(key), void* ATTR_UNUSED(data))
|
|
{
|
|
return sizeof(struct slabhash_testkey) +
|
|
sizeof(struct slabhash_testdata);
|
|
}
|
|
|
|
int test_slabhash_compfunc(void* key1, void* key2)
|
|
{
|
|
struct slabhash_testkey* k1 = (struct slabhash_testkey*)key1;
|
|
struct slabhash_testkey* k2 = (struct slabhash_testkey*)key2;
|
|
if(k1->id == k2->id)
|
|
return 0;
|
|
if(k1->id > k2->id)
|
|
return 1;
|
|
return -1;
|
|
}
|
|
|
|
void test_slabhash_delkey(void* key, void* ATTR_UNUSED(arg))
|
|
{
|
|
delkey((struct slabhash_testkey*)key);
|
|
}
|
|
|
|
void test_slabhash_deldata(void* data, void* ATTR_UNUSED(arg))
|
|
{
|
|
deldata((struct slabhash_testdata*)data);
|
|
}
|
|
|
|
void slabhash_setmarkdel(struct slabhash* sl, lruhash_markdelfunc_type md)
|
|
{
|
|
size_t i;
|
|
for(i=0; i<sl->size; i++) {
|
|
lruhash_setmarkdel(sl->array[i], md);
|
|
}
|
|
}
|
|
|
|
void slabhash_traverse(struct slabhash* sh, int wr,
|
|
void (*func)(struct lruhash_entry*, void*), void* arg)
|
|
{
|
|
size_t i;
|
|
for(i=0; i<sh->size; i++)
|
|
lruhash_traverse(sh->array[i], wr, func, arg);
|
|
}
|
|
|
|
size_t count_slabhash_entries(struct slabhash* sh)
|
|
{
|
|
size_t slab, cnt = 0;
|
|
|
|
for(slab=0; slab<sh->size; slab++) {
|
|
lock_quick_lock(&sh->array[slab]->lock);
|
|
cnt += sh->array[slab]->num;
|
|
lock_quick_unlock(&sh->array[slab]->lock);
|
|
}
|
|
return cnt;
|
|
}
|
|
|
|
void get_slabhash_stats(struct slabhash* sh, long long* num, long long* collisions)
|
|
{
|
|
size_t slab, cnt = 0, max_collisions = 0;
|
|
|
|
for(slab=0; slab<sh->size; slab++) {
|
|
lock_quick_lock(&sh->array[slab]->lock);
|
|
cnt += sh->array[slab]->num;
|
|
if (max_collisions < sh->array[slab]->max_collisions) {
|
|
max_collisions = sh->array[slab]->max_collisions;
|
|
}
|
|
lock_quick_unlock(&sh->array[slab]->lock);
|
|
}
|
|
if (num != NULL)
|
|
*num = cnt;
|
|
if (collisions != NULL)
|
|
*collisions = max_collisions;
|
|
}
|
|
|
|
void slabhash_adjust_size(struct slabhash* sl, size_t max)
|
|
{
|
|
size_t space_max = max / sl->size;
|
|
size_t i;
|
|
for(i=0; i<sl->size; i++) {
|
|
lruhash_update_space_max(sl->array[i], NULL, space_max);
|
|
}
|
|
}
|