Merge pull request #1041 from NLnetLabs/stubfwd-unshare

Stub and Forward unshare
This commit is contained in:
Wouter Wijngaards 2024-04-25 11:11:00 +02:00 committed by GitHub
commit ced9762b14
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
19 changed files with 531 additions and 214 deletions

View file

@ -1278,7 +1278,8 @@ daemon.lo daemon.o: $(srcdir)/daemon/daemon.c config.h $(srcdir)/daemon/daemon.h
$(srcdir)/util/edns.h $(srcdir)/services/listen_dnsport.h $(srcdir)/services/cache/rrset.h \
$(srcdir)/services/cache/infra.h $(srcdir)/util/rtt.h $(srcdir)/services/localzone.h \
$(srcdir)/services/authzone.h $(srcdir)/services/mesh.h $(srcdir)/services/rpz.h $(srcdir)/respip/respip.h \
$(srcdir)/util/random.h $(srcdir)/util/tube.h $(srcdir)/util/net_help.h $(srcdir)/sldns/keyraw.h
$(srcdir)/util/random.h $(srcdir)/util/tube.h $(srcdir)/util/net_help.h $(srcdir)/sldns/keyraw.h \
$(srcdir)/iterator/iter_fwd.h $(srcdir)/iterator/iter_hints.h
remote.lo remote.o: $(srcdir)/daemon/remote.c config.h $(srcdir)/daemon/remote.h $(srcdir)/daemon/worker.h \
$(srcdir)/libunbound/worker.h $(srcdir)/sldns/sbuffer.h $(srcdir)/util/data/packed_rrset.h \
$(srcdir)/util/storage/lruhash.h $(srcdir)/util/locks.h $(srcdir)/util/log.h $(srcdir)/util/netevent.h \
@ -1484,7 +1485,8 @@ context.lo context.o: $(srcdir)/libunbound/context.c config.h $(srcdir)/libunbou
$(srcdir)/util/storage/slabhash.h $(srcdir)/services/cache/infra.h $(srcdir)/util/rtt.h \
$(srcdir)/util/netevent.h $(srcdir)/dnscrypt/dnscrypt.h \
$(srcdir)/services/authzone.h $(srcdir)/services/mesh.h $(srcdir)/services/rpz.h $(srcdir)/daemon/stats.h \
$(srcdir)/util/timehist.h $(srcdir)/respip/respip.h $(srcdir)/util/edns.h
$(srcdir)/util/timehist.h $(srcdir)/respip/respip.h $(srcdir)/util/edns.h \
$(srcdir)/iterator/iter_fwd.h $(srcdir)/iterator/iter_hints.h
libunbound.lo libunbound.o: $(srcdir)/libunbound/libunbound.c $(srcdir)/libunbound/unbound.h \
$(srcdir)/libunbound/unbound-event.h config.h $(srcdir)/libunbound/context.h $(srcdir)/util/locks.h \
$(srcdir)/util/log.h $(srcdir)/util/alloc.h $(srcdir)/util/rbtree.h $(srcdir)/services/modstack.h \
@ -1496,7 +1498,8 @@ libunbound.lo libunbound.o: $(srcdir)/libunbound/libunbound.c $(srcdir)/libunbou
$(srcdir)/sldns/sbuffer.h $(srcdir)/services/cache/infra.h $(srcdir)/util/rtt.h $(srcdir)/util/netevent.h \
$(srcdir)/dnscrypt/dnscrypt.h $(srcdir)/services/cache/rrset.h \
$(srcdir)/util/storage/slabhash.h $(srcdir)/services/authzone.h $(srcdir)/services/mesh.h \
$(srcdir)/services/rpz.h $(srcdir)/daemon/stats.h $(srcdir)/util/timehist.h $(srcdir)/respip/respip.h
$(srcdir)/services/rpz.h $(srcdir)/daemon/stats.h $(srcdir)/util/timehist.h $(srcdir)/respip/respip.h \
$(srcdir)/iterator/iter_fwd.h $(srcdir)/iterator/iter_hints.h
libworker.lo libworker.o: $(srcdir)/libunbound/libworker.c config.h $(srcdir)/libunbound/libworker.h \
$(srcdir)/util/data/packed_rrset.h $(srcdir)/util/storage/lruhash.h $(srcdir)/util/locks.h $(srcdir)/util/log.h \
$(srcdir)/libunbound/context.h $(srcdir)/util/alloc.h $(srcdir)/util/rbtree.h $(srcdir)/services/modstack.h \
@ -1510,8 +1513,7 @@ libworker.lo libworker.o: $(srcdir)/libunbound/libworker.c config.h $(srcdir)/li
$(srcdir)/services/cache/rrset.h $(srcdir)/util/storage/slabhash.h $(srcdir)/services/outbound_list.h \
$(srcdir)/util/fptr_wlist.h $(srcdir)/util/tube.h $(srcdir)/util/regional.h $(srcdir)/util/random.h \
$(srcdir)/util/storage/lookup3.h $(srcdir)/util/net_help.h $(srcdir)/util/data/dname.h \
$(srcdir)/util/data/msgencode.h $(srcdir)/iterator/iter_fwd.h $(srcdir)/iterator/iter_hints.h \
$(srcdir)/sldns/str2wire.h
$(srcdir)/util/data/msgencode.h $(srcdir)/sldns/str2wire.h
unbound-host.lo unbound-host.o: $(srcdir)/smallapp/unbound-host.c config.h $(srcdir)/libunbound/unbound.h \
$(srcdir)/sldns/rrdef.h $(srcdir)/sldns/wire2str.h
asynclook.lo asynclook.o: $(srcdir)/testcode/asynclook.c config.h $(srcdir)/libunbound/unbound.h \

View file

@ -691,6 +691,7 @@ cachedb_extcache_store(struct module_qstate* qstate, struct cachedb_env* ie)
static int
cachedb_intcache_lookup(struct module_qstate* qstate, struct cachedb_env* cde)
{
uint8_t dpname_storage[LDNS_MAX_DOMAINLEN+1];
uint8_t* dpname=NULL;
size_t dpnamelen=0;
struct dns_msg* msg;
@ -699,7 +700,7 @@ cachedb_intcache_lookup(struct module_qstate* qstate, struct cachedb_env* cde)
return 0;
}
if(iter_stub_fwd_no_cache(qstate, &qstate->qinfo,
&dpname, &dpnamelen))
&dpname, &dpnamelen, dpname_storage, sizeof(dpname_storage)))
return 0; /* no cache for these queries */
msg = dns_cache_lookup(qstate->env, qstate->qinfo.qname,
qstate->qinfo.qname_len, qstate->qinfo.qtype,

View file

@ -839,6 +839,7 @@ int print_deleg_lookup(RES* ssl, struct worker* worker, uint8_t* nm,
char b[260];
struct query_info qinfo;
struct iter_hints_stub* stub;
int nolock = 0;
regional_free_all(region);
qinfo.qname = nm;
qinfo.qname_len = nmlen;
@ -850,13 +851,16 @@ int print_deleg_lookup(RES* ssl, struct worker* worker, uint8_t* nm,
if(!ssl_printf(ssl, "The following name servers are used for lookup "
"of %s\n", b))
return 0;
dp = forwards_lookup(worker->env.fwds, nm, qinfo.qclass);
dp = forwards_lookup(worker->env.fwds, nm, qinfo.qclass, nolock);
if(dp) {
if(!ssl_printf(ssl, "forwarding request:\n"))
if(!ssl_printf(ssl, "forwarding request:\n")) {
lock_rw_unlock(&worker->env.fwds->lock);
return 0;
}
print_dp_main(ssl, dp, NULL);
print_dp_details(ssl, worker, dp);
lock_rw_unlock(&worker->env.fwds->lock);
return 1;
}
@ -892,21 +896,26 @@ int print_deleg_lookup(RES* ssl, struct worker* worker, uint8_t* nm,
return 0;
continue;
}
}
}
stub = hints_lookup_stub(worker->env.hints, nm, qinfo.qclass,
dp);
dp, nolock);
if(stub) {
if(stub->noprime) {
if(!ssl_printf(ssl, "The noprime stub servers "
"are used:\n"))
"are used:\n")) {
lock_rw_unlock(&worker->env.hints->lock);
return 0;
}
} else {
if(!ssl_printf(ssl, "The stub is primed "
"with servers:\n"))
"with servers:\n")) {
lock_rw_unlock(&worker->env.hints->lock);
return 0;
}
}
print_dp_main(ssl, stub->dp, NULL);
print_dp_details(ssl, worker, stub->dp);
lock_rw_unlock(&worker->env.hints->lock);
} else {
print_dp_main(ssl, dp, msg);
print_dp_details(ssl, worker, dp);

View file

@ -91,6 +91,8 @@
#include "util/net_help.h"
#include "sldns/keyraw.h"
#include "respip/respip.h"
#include "iterator/iter_fwd.h"
#include "iterator/iter_hints.h"
#include <signal.h>
#ifdef HAVE_SYSTEMD
@ -717,6 +719,12 @@ daemon_fork(struct daemon* daemon)
fatal_exit("Could not create local zones: out of memory");
if(!local_zones_apply_cfg(daemon->local_zones, daemon->cfg))
fatal_exit("Could not set up local zones");
if(!(daemon->env->fwds = forwards_create()) ||
!forwards_apply_cfg(daemon->env->fwds, daemon->cfg))
fatal_exit("Could not set forward zones");
if(!(daemon->env->hints = hints_create()) ||
!hints_apply_cfg(daemon->env->hints, daemon->cfg))
fatal_exit("Could not set root or stub hints");
/* process raw response-ip configuration data */
if(!(daemon->respip_set = respip_set_create()))
@ -837,6 +845,10 @@ daemon_cleanup(struct daemon* daemon)
slabhash_clear(daemon->env->msg_cache);
}
daemon->old_num = daemon->num; /* save the current num */
forwards_delete(daemon->env->fwds);
daemon->env->fwds = NULL;
hints_delete(daemon->env->hints);
daemon->env->hints = NULL;
local_zones_delete(daemon->local_zones);
daemon->local_zones = NULL;
respip_set_delete(daemon->respip_set);

View file

@ -1992,12 +1992,19 @@ static int
print_root_fwds(RES* ssl, struct iter_forwards* fwds, uint8_t* root)
{
struct delegpt* dp;
dp = forwards_lookup(fwds, root, LDNS_RR_CLASS_IN);
if(!dp)
int nolock = 0;
dp = forwards_lookup(fwds, root, LDNS_RR_CLASS_IN, nolock);
if(!dp) {
return ssl_printf(ssl, "off (using root hints)\n");
}
/* if dp is returned it must be the root */
log_assert(query_dname_compare(dp->name, root)==0);
return ssl_print_name_dp(ssl, NULL, root, LDNS_RR_CLASS_IN, dp);
if(!ssl_print_name_dp(ssl, NULL, root, LDNS_RR_CLASS_IN, dp)) {
lock_rw_unlock(&fwds->lock);
return 0;
}
lock_rw_unlock(&fwds->lock);
return 1;
}
/** parse args into delegpt */
@ -2069,6 +2076,7 @@ do_forward(RES* ssl, struct worker* worker, char* args)
{
struct iter_forwards* fwd = worker->env.fwds;
uint8_t* root = (uint8_t*)"\000";
int nolock = 0;
if(!fwd) {
(void)ssl_printf(ssl, "error: structure not allocated\n");
return;
@ -2082,12 +2090,12 @@ do_forward(RES* ssl, struct worker* worker, char* args)
/* delete all the existing queries first */
mesh_delete_all(worker->env.mesh);
if(strcmp(args, "off") == 0) {
forwards_delete_zone(fwd, LDNS_RR_CLASS_IN, root);
forwards_delete_zone(fwd, LDNS_RR_CLASS_IN, root, nolock);
} else {
struct delegpt* dp;
if(!(dp = parse_delegpt(ssl, args, root)))
return;
if(!forwards_add_zone(fwd, LDNS_RR_CLASS_IN, dp)) {
if(!forwards_add_zone(fwd, LDNS_RR_CLASS_IN, dp, nolock)) {
(void)ssl_printf(ssl, "error out of memory\n");
return;
}
@ -2149,24 +2157,30 @@ do_forward_add(RES* ssl, struct worker* worker, char* args)
int insecure = 0, tls = 0;
uint8_t* nm = NULL;
struct delegpt* dp = NULL;
int nolock = 1;
if(!parse_fs_args(ssl, args, &nm, &dp, &insecure, NULL, &tls))
return;
if(tls)
dp->ssl_upstream = 1;
/* prelock forwarders for atomic operation with anchors */
lock_rw_wrlock(&fwd->lock);
if(insecure && worker->env.anchors) {
if(!anchors_add_insecure(worker->env.anchors, LDNS_RR_CLASS_IN,
nm)) {
lock_rw_unlock(&fwd->lock);
(void)ssl_printf(ssl, "error out of memory\n");
delegpt_free_mlc(dp);
free(nm);
return;
}
}
if(!forwards_add_zone(fwd, LDNS_RR_CLASS_IN, dp)) {
if(!forwards_add_zone(fwd, LDNS_RR_CLASS_IN, dp, nolock)) {
lock_rw_unlock(&fwd->lock);
(void)ssl_printf(ssl, "error out of memory\n");
free(nm);
return;
}
lock_rw_unlock(&fwd->lock);
free(nm);
send_ok(ssl);
}
@ -2178,12 +2192,16 @@ do_forward_remove(RES* ssl, struct worker* worker, char* args)
struct iter_forwards* fwd = worker->env.fwds;
int insecure = 0;
uint8_t* nm = NULL;
int nolock = 1;
if(!parse_fs_args(ssl, args, &nm, NULL, &insecure, NULL, NULL))
return;
/* prelock forwarders for atomic operation with anchors */
lock_rw_wrlock(&fwd->lock);
if(insecure && worker->env.anchors)
anchors_delete_insecure(worker->env.anchors, LDNS_RR_CLASS_IN,
nm);
forwards_delete_zone(fwd, LDNS_RR_CLASS_IN, nm);
forwards_delete_zone(fwd, LDNS_RR_CLASS_IN, nm, nolock);
lock_rw_unlock(&fwd->lock);
free(nm);
send_ok(ssl);
}
@ -2196,37 +2214,50 @@ do_stub_add(RES* ssl, struct worker* worker, char* args)
int insecure = 0, prime = 0, tls = 0;
uint8_t* nm = NULL;
struct delegpt* dp = NULL;
int nolock = 1;
if(!parse_fs_args(ssl, args, &nm, &dp, &insecure, &prime, &tls))
return;
if(tls)
dp->ssl_upstream = 1;
/* prelock forwarders and hints for atomic operation with anchors */
lock_rw_wrlock(&fwd->lock);
lock_rw_wrlock(&worker->env.hints->lock);
if(insecure && worker->env.anchors) {
if(!anchors_add_insecure(worker->env.anchors, LDNS_RR_CLASS_IN,
nm)) {
lock_rw_unlock(&fwd->lock);
lock_rw_unlock(&worker->env.hints->lock);
(void)ssl_printf(ssl, "error out of memory\n");
delegpt_free_mlc(dp);
free(nm);
return;
}
}
if(!forwards_add_stub_hole(fwd, LDNS_RR_CLASS_IN, nm)) {
if(!forwards_add_stub_hole(fwd, LDNS_RR_CLASS_IN, nm, nolock)) {
if(insecure && worker->env.anchors)
anchors_delete_insecure(worker->env.anchors,
LDNS_RR_CLASS_IN, nm);
lock_rw_unlock(&fwd->lock);
lock_rw_unlock(&worker->env.hints->lock);
(void)ssl_printf(ssl, "error out of memory\n");
delegpt_free_mlc(dp);
free(nm);
return;
}
if(!hints_add_stub(worker->env.hints, LDNS_RR_CLASS_IN, dp, !prime)) {
if(!hints_add_stub(worker->env.hints, LDNS_RR_CLASS_IN, dp, !prime,
nolock)) {
(void)ssl_printf(ssl, "error out of memory\n");
forwards_delete_stub_hole(fwd, LDNS_RR_CLASS_IN, nm);
forwards_delete_stub_hole(fwd, LDNS_RR_CLASS_IN, nm, nolock);
if(insecure && worker->env.anchors)
anchors_delete_insecure(worker->env.anchors,
LDNS_RR_CLASS_IN, nm);
lock_rw_unlock(&fwd->lock);
lock_rw_unlock(&worker->env.hints->lock);
free(nm);
return;
}
lock_rw_unlock(&fwd->lock);
lock_rw_unlock(&worker->env.hints->lock);
free(nm);
send_ok(ssl);
}
@ -2238,13 +2269,19 @@ do_stub_remove(RES* ssl, struct worker* worker, char* args)
struct iter_forwards* fwd = worker->env.fwds;
int insecure = 0;
uint8_t* nm = NULL;
int nolock = 1;
if(!parse_fs_args(ssl, args, &nm, NULL, &insecure, NULL, NULL))
return;
/* prelock forwarders and hints for atomic operation with anchors */
lock_rw_wrlock(&fwd->lock);
lock_rw_wrlock(&worker->env.hints->lock);
if(insecure && worker->env.anchors)
anchors_delete_insecure(worker->env.anchors, LDNS_RR_CLASS_IN,
nm);
forwards_delete_stub_hole(fwd, LDNS_RR_CLASS_IN, nm);
hints_delete_stub(worker->env.hints, LDNS_RR_CLASS_IN, nm);
forwards_delete_stub_hole(fwd, LDNS_RR_CLASS_IN, nm, nolock);
hints_delete_stub(worker->env.hints, LDNS_RR_CLASS_IN, nm, nolock);
lock_rw_unlock(&fwd->lock);
lock_rw_unlock(&worker->env.hints->lock);
free(nm);
send_ok(ssl);
}
@ -2673,6 +2710,7 @@ do_list_forwards(RES* ssl, struct worker* worker)
struct iter_forward_zone* z;
struct trust_anchor* a;
int insecure;
lock_rw_rdlock(&fwds->lock);
RBTREE_FOR(z, struct iter_forward_zone*, fwds->tree) {
if(!z->dp) continue; /* skip empty marker for stub */
@ -2687,9 +2725,12 @@ do_list_forwards(RES* ssl, struct worker* worker)
}
if(!ssl_print_name_dp(ssl, (insecure?"forward +i":"forward"),
z->name, z->dclass, z->dp))
z->name, z->dclass, z->dp)) {
lock_rw_unlock(&fwds->lock);
return;
}
}
lock_rw_unlock(&fwds->lock);
}
/** do the list_stubs command */
@ -2700,6 +2741,7 @@ do_list_stubs(RES* ssl, struct worker* worker)
struct trust_anchor* a;
int insecure;
char str[32];
lock_rw_rdlock(&worker->env.hints->lock);
RBTREE_FOR(z, struct iter_hints_stub*, &worker->env.hints->tree) {
/* see if it is insecure */
@ -2715,9 +2757,12 @@ do_list_stubs(RES* ssl, struct worker* worker)
snprintf(str, sizeof(str), "stub %sprime%s",
(z->noprime?"no":""), (insecure?" +i":""));
if(!ssl_print_name_dp(ssl, str, z->node.name,
z->node.dclass, z->dp))
z->node.dclass, z->dp)) {
lock_rw_unlock(&worker->env.hints->lock);
return;
}
}
lock_rw_unlock(&worker->env.hints->lock);
}
/** do the list_auth_zones command */
@ -3077,26 +3122,6 @@ execute_cmd(struct daemon_remote* rc, RES* ssl, char* cmd,
} else if(cmdcmp(p, "auth_zone_transfer", 18)) {
do_auth_zone_transfer(ssl, worker, skipwhite(p+18));
return;
} else if(cmdcmp(p, "stub_add", 8)) {
/* must always distribute this cmd */
if(rc) distribute_cmd(rc, ssl, cmd);
do_stub_add(ssl, worker, skipwhite(p+8));
return;
} else if(cmdcmp(p, "stub_remove", 11)) {
/* must always distribute this cmd */
if(rc) distribute_cmd(rc, ssl, cmd);
do_stub_remove(ssl, worker, skipwhite(p+11));
return;
} else if(cmdcmp(p, "forward_add", 11)) {
/* must always distribute this cmd */
if(rc) distribute_cmd(rc, ssl, cmd);
do_forward_add(ssl, worker, skipwhite(p+11));
return;
} else if(cmdcmp(p, "forward_remove", 14)) {
/* must always distribute this cmd */
if(rc) distribute_cmd(rc, ssl, cmd);
do_forward_remove(ssl, worker, skipwhite(p+14));
return;
} else if(cmdcmp(p, "insecure_add", 12)) {
/* must always distribute this cmd */
if(rc) distribute_cmd(rc, ssl, cmd);
@ -3107,11 +3132,6 @@ execute_cmd(struct daemon_remote* rc, RES* ssl, char* cmd,
if(rc) distribute_cmd(rc, ssl, cmd);
do_insecure_remove(ssl, worker, skipwhite(p+15));
return;
} else if(cmdcmp(p, "forward", 7)) {
/* must always distribute this cmd */
if(rc) distribute_cmd(rc, ssl, cmd);
do_forward(ssl, worker, skipwhite(p+7));
return;
} else if(cmdcmp(p, "flush_stats", 11)) {
/* must always distribute this cmd */
if(rc) distribute_cmd(rc, ssl, cmd);
@ -3153,6 +3173,16 @@ execute_cmd(struct daemon_remote* rc, RES* ssl, char* cmd,
do_data_add(ssl, worker->daemon->local_zones, skipwhite(p+10));
} else if(cmdcmp(p, "local_datas", 11)) {
do_datas_add(ssl, worker->daemon->local_zones);
} else if(cmdcmp(p, "forward_add", 11)) {
do_forward_add(ssl, worker, skipwhite(p+11));
} else if(cmdcmp(p, "forward_remove", 14)) {
do_forward_remove(ssl, worker, skipwhite(p+14));
} else if(cmdcmp(p, "forward", 7)) {
do_forward(ssl, worker, skipwhite(p+7));
} else if(cmdcmp(p, "stub_add", 8)) {
do_stub_add(ssl, worker, skipwhite(p+8));
} else if(cmdcmp(p, "stub_remove", 11)) {
do_stub_remove(ssl, worker, skipwhite(p+11));
} else if(cmdcmp(p, "view_local_zone_remove", 22)) {
do_view_zone_remove(ssl, worker, skipwhite(p+22));
} else if(cmdcmp(p, "view_local_zone", 15)) {

View file

@ -2266,18 +2266,6 @@ worker_init(struct worker* worker, struct config_file *cfg,
worker_delete(worker);
return 0;
}
if(!(worker->env.fwds = forwards_create()) ||
!forwards_apply_cfg(worker->env.fwds, cfg)) {
log_err("Could not set forward zones");
worker_delete(worker);
return 0;
}
if(!(worker->env.hints = hints_create()) ||
!hints_apply_cfg(worker->env.hints, cfg)) {
log_err("Could not set root or stub hints");
worker_delete(worker);
return 0;
}
/* one probe timer per process -- if we have 5011 anchors */
if(autr_get_num_anchors(worker->env.anchors) > 0
#ifndef THREADS_DISABLED
@ -2350,8 +2338,6 @@ worker_delete(struct worker* worker)
outside_network_quit_prepare(worker->back);
mesh_delete(worker->env.mesh);
sldns_buffer_free(worker->env.scratch_buffer);
forwards_delete(worker->env.fwds);
hints_delete(worker->env.hints);
listen_delete(worker->front);
outside_network_delete(worker->back);
comm_signal_delete(worker->comsig);

View file

@ -152,7 +152,7 @@ int ecs_whitelist_check(struct query_info* qinfo,
/* Cache by default, might be disabled after parsing EDNS option
* received from nameserver. */
if(!iter_stub_fwd_no_cache(qstate, &qstate->qinfo, NULL, NULL)) {
if(!iter_stub_fwd_no_cache(qstate, &qstate->qinfo, NULL, NULL, NULL, 0)) {
qstate->no_cache_store = 0;
}

View file

@ -71,6 +71,7 @@ forwards_create(void)
sizeof(struct iter_forwards));
if(!fwd)
return NULL;
lock_rw_init(&fwd->lock);
return fwd;
}
@ -100,6 +101,7 @@ forwards_delete(struct iter_forwards* fwd)
{
if(!fwd)
return;
lock_rw_destroy(&fwd->lock);
fwd_del_tree(fwd);
free(fwd);
}
@ -332,45 +334,64 @@ make_stub_holes(struct iter_forwards* fwd, struct config_file* cfg)
int
forwards_apply_cfg(struct iter_forwards* fwd, struct config_file* cfg)
{
if(fwd->tree) {
lock_unprotect(&fwd->lock, fwd->tree);
}
fwd_del_tree(fwd);
fwd->tree = rbtree_create(fwd_cmp);
if(!fwd->tree)
return 0;
lock_protect(&fwd->lock, fwd->tree, sizeof(*fwd->tree));
lock_rw_wrlock(&fwd->lock);
/* read forward zones */
if(!read_forwards(fwd, cfg))
if(!read_forwards(fwd, cfg)) {
lock_rw_unlock(&fwd->lock);
return 0;
if(!make_stub_holes(fwd, cfg))
}
if(!make_stub_holes(fwd, cfg)) {
lock_rw_unlock(&fwd->lock);
return 0;
}
fwd_init_parents(fwd);
lock_rw_unlock(&fwd->lock);
return 1;
}
struct delegpt*
forwards_find(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
forwards_find(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass,
int nolock)
{
rbnode_type* res = NULL;
struct iter_forward_zone* res;
struct iter_forward_zone key;
int has_dp;
key.node.key = &key;
key.dclass = qclass;
key.name = qname;
key.namelabs = dname_count_size_labels(qname, &key.namelen);
res = rbtree_search(fwd->tree, &key);
if(res) return ((struct iter_forward_zone*)res)->dp;
return NULL;
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_rdlock(&fwd->lock); }
res = (struct iter_forward_zone*)rbtree_search(fwd->tree, &key);
has_dp = res && res->dp;
if(!has_dp && !nolock) { lock_rw_unlock(&fwd->lock); }
return has_dp?res->dp:NULL;
}
struct delegpt*
forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass,
int nolock)
{
/* lookup the forward zone in the tree */
rbnode_type* res = NULL;
struct iter_forward_zone *result;
struct iter_forward_zone key;
int has_dp;
key.node.key = &key;
key.dclass = qclass;
key.name = qname;
key.namelabs = dname_count_size_labels(qname, &key.namelen);
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_rdlock(&fwd->lock); }
if(rbtree_find_less_equal(fwd->tree, &key, &res)) {
/* exact */
result = (struct iter_forward_zone*)res;
@ -378,8 +399,10 @@ forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
/* smaller element (or no element) */
int m;
result = (struct iter_forward_zone*)res;
if(!result || result->dclass != qclass)
if(!result || result->dclass != qclass) {
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return NULL;
}
/* count number of labels matched */
(void)dname_lab_cmp(result->name, result->namelabs, key.name,
key.namelabs, &m);
@ -389,20 +412,22 @@ forwards_lookup(struct iter_forwards* fwd, uint8_t* qname, uint16_t qclass)
result = result->parent;
}
}
if(result)
return result->dp;
return NULL;
has_dp = result && result->dp;
if(!has_dp && !nolock) { lock_rw_unlock(&fwd->lock); }
return has_dp?result->dp:NULL;
}
struct delegpt*
forwards_lookup_root(struct iter_forwards* fwd, uint16_t qclass)
forwards_lookup_root(struct iter_forwards* fwd, uint16_t qclass, int nolock)
{
uint8_t root = 0;
return forwards_lookup(fwd, &root, qclass);
return forwards_lookup(fwd, &root, qclass, nolock);
}
int
forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass)
/* Finds next root item in forwards lookup tree.
* Caller needs to handle locking of the forwards structure. */
static int
next_root_locked(struct iter_forwards* fwd, uint16_t* dclass)
{
struct iter_forward_zone key;
rbnode_type* n;
@ -419,7 +444,7 @@ forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass)
}
/* root not first item? search for higher items */
*dclass = p->dclass + 1;
return forwards_next_root(fwd, dclass);
return next_root_locked(fwd, dclass);
}
/* find class n in tree, we may get a direct hit, or if we don't
* this is the last item of the previous class so rbtree_next() takes
@ -447,10 +472,21 @@ forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass)
}
/* not a root node, return next higher item */
*dclass = p->dclass+1;
return forwards_next_root(fwd, dclass);
return next_root_locked(fwd, dclass);
}
}
int
forwards_next_root(struct iter_forwards* fwd, uint16_t* dclass, int nolock)
{
int ret;
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_rdlock(&fwd->lock); }
ret = next_root_locked(fwd, dclass);
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return ret;
}
size_t
forwards_get_mem(struct iter_forwards* fwd)
{
@ -458,10 +494,12 @@ forwards_get_mem(struct iter_forwards* fwd)
size_t s;
if(!fwd)
return 0;
lock_rw_rdlock(&fwd->lock);
s = sizeof(*fwd) + sizeof(*fwd->tree);
RBTREE_FOR(p, struct iter_forward_zone*, fwd->tree) {
s += sizeof(*p) + p->namelen + delegpt_get_mem(p->dp);
}
lock_rw_unlock(&fwd->lock);
return s;
}
@ -477,49 +515,78 @@ fwd_zone_find(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
}
int
forwards_add_zone(struct iter_forwards* fwd, uint16_t c, struct delegpt* dp)
forwards_add_zone(struct iter_forwards* fwd, uint16_t c, struct delegpt* dp,
int nolock)
{
struct iter_forward_zone *z;
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_wrlock(&fwd->lock); }
if((z=fwd_zone_find(fwd, c, dp->name)) != NULL) {
(void)rbtree_delete(fwd->tree, &z->node);
fwd_zone_free(z);
}
if(!forwards_insert(fwd, c, dp))
if(!forwards_insert(fwd, c, dp)) {
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 0;
}
fwd_init_parents(fwd);
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 1;
}
void
forwards_delete_zone(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
forwards_delete_zone(struct iter_forwards* fwd, uint16_t c, uint8_t* nm,
int nolock)
{
struct iter_forward_zone *z;
if(!(z=fwd_zone_find(fwd, c, nm)))
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_wrlock(&fwd->lock); }
if(!(z=fwd_zone_find(fwd, c, nm))) {
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return; /* nothing to do */
}
(void)rbtree_delete(fwd->tree, &z->node);
fwd_zone_free(z);
fwd_init_parents(fwd);
if(!nolock) { lock_rw_unlock(&fwd->lock); }
}
int
forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm,
int nolock)
{
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_wrlock(&fwd->lock); }
if(fwd_zone_find(fwd, c, nm) != NULL) {
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 1; /* already a stub zone there */
}
if(!fwd_add_stub_hole(fwd, c, nm)) {
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 0;
}
fwd_init_parents(fwd);
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return 1;
}
void
forwards_delete_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm)
forwards_delete_stub_hole(struct iter_forwards* fwd, uint16_t c,
uint8_t* nm, int nolock)
{
struct iter_forward_zone *z;
if(!(z=fwd_zone_find(fwd, c, nm)))
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_wrlock(&fwd->lock); }
if(!(z=fwd_zone_find(fwd, c, nm))) {
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return; /* nothing to do */
if(z->dp != NULL)
}
if(z->dp != NULL) {
if(!nolock) { lock_rw_unlock(&fwd->lock); }
return; /* not a stub hole */
}
(void)rbtree_delete(fwd->tree, &z->node);
fwd_zone_free(z);
fwd_init_parents(fwd);
if(!nolock) { lock_rw_unlock(&fwd->lock); }
}

View file

@ -43,6 +43,7 @@
#ifndef ITERATOR_ITER_FWD_H
#define ITERATOR_ITER_FWD_H
#include "util/rbtree.h"
#include "util/locks.h"
struct config_file;
struct delegpt;
@ -50,6 +51,11 @@ struct delegpt;
* Iterator forward zones structure
*/
struct iter_forwards {
/** lock on the forwards tree.
* When grabbing both this lock and the anchors.lock, this lock
* is grabbed first. When grabbing both this lock and the hints.lock
* this lock is grabbed first. */
lock_rw_type lock;
/**
* Zones are stored in this tree. Sort order is specially chosen.
* first sorted on qclass. Then on dname in nsec-like order, so that
@ -106,47 +112,65 @@ int forwards_apply_cfg(struct iter_forwards* fwd, struct config_file* cfg);
/**
* Find forward zone exactly by name
* The return value is contents of the forwards structure.
* Caller should lock and unlock a readlock on the forwards structure if nolock
* is set.
* Otherwise caller should unlock the readlock on the forwards structure if a
* value was returned.
* @param fwd: forward storage.
* @param qname: The qname of the query.
* @param qclass: The qclass of the query.
* @param nolock: Skip locking, locking is handled by the caller.
* @return: A delegation point or null.
*/
struct delegpt* forwards_find(struct iter_forwards* fwd, uint8_t* qname,
uint16_t qclass);
uint16_t qclass, int nolock);
/**
* Find forward zone information
* For this qname/qclass find forward zone information, returns delegation
* point with server names and addresses, or NULL if no forwarding is needed.
* The return value is contents of the forwards structure.
* Caller should lock and unlock a readlock on the forwards structure if nolock
* is set.
* Otherwise caller should unlock the readlock on the forwards structure if a
* value was returned.
*
* @param fwd: forward storage.
* @param qname: The qname of the query.
* @param qclass: The qclass of the query.
* @param nolock: Skip locking, locking is handled by the caller.
* @return: A delegation point if the query has to be forwarded to that list,
* otherwise null.
*/
struct delegpt* forwards_lookup(struct iter_forwards* fwd,
uint8_t* qname, uint16_t qclass);
struct delegpt* forwards_lookup(struct iter_forwards* fwd,
uint8_t* qname, uint16_t qclass, int nolock);
/**
* Same as forwards_lookup, but for the root only
* @param fwd: forward storage.
* @param qclass: The qclass of the query.
* @param nolock: Skip locking, locking is handled by the caller.
* @return: A delegation point if root forward exists, otherwise null.
*/
struct delegpt* forwards_lookup_root(struct iter_forwards* fwd,
uint16_t qclass);
struct delegpt* forwards_lookup_root(struct iter_forwards* fwd,
uint16_t qclass, int nolock);
/**
* Find next root item in forwards lookup tree.
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a readlock on the forwards structure.
* @param fwd: the forward storage
* @param qclass: class to look at next, or higher.
* @param nolock: Skip locking, locking is handled by the caller.
* @return false if none found, or if true stored in qclass.
*/
int forwards_next_root(struct iter_forwards* fwd, uint16_t* qclass);
int forwards_next_root(struct iter_forwards* fwd, uint16_t* qclass,
int nolock);
/**
* Get memory in use by forward storage
* Locks and unlocks the structure.
* @param fwd: forward storage.
* @return bytes in use
*/
@ -158,42 +182,56 @@ int fwd_cmp(const void* k1, const void* k2);
/**
* Add zone to forward structure. For external use since it recalcs
* the tree parents.
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param dp: delegation point with name and target nameservers for new
* forward zone. malloced.
* @param nolock: Skip locking, locking is handled by the caller.
* @return false on failure (out of memory);
*/
int forwards_add_zone(struct iter_forwards* fwd, uint16_t c,
struct delegpt* dp);
int forwards_add_zone(struct iter_forwards* fwd, uint16_t c,
struct delegpt* dp, int nolock);
/**
* Remove zone from forward structure. For external use since it
* recalcs the tree parents.
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param nm: name of zone (in uncompressed wireformat).
* @param nolock: Skip locking, locking is handled by the caller.
*/
void forwards_delete_zone(struct iter_forwards* fwd, uint16_t c, uint8_t* nm);
void forwards_delete_zone(struct iter_forwards* fwd, uint16_t c,
uint8_t* nm, int nolock);
/**
* Add stub hole (empty entry in forward table, that makes resolution skip
* a forward-zone because the stub zone should override the forward zone).
* Does not add one if not necessary.
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param nm: name of zone (in uncompressed wireformat).
* @param nolock: Skip locking, locking is handled by the caller.
* @return false on failure (out of memory);
*/
int forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c, uint8_t* nm);
int forwards_add_stub_hole(struct iter_forwards* fwd, uint16_t c,
uint8_t* nm, int nolock);
/**
* Remove stub hole, if one exists.
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a writelock on the forwards structure.
* @param fwd: the forward data structure
* @param c: class of zone
* @param nm: name of zone (in uncompressed wireformat).
* @param nolock: Skip locking, locking is handled by the caller.
*/
void forwards_delete_stub_hole(struct iter_forwards* fwd, uint16_t c,
uint8_t* nm);
uint8_t* nm, int nolock);
#endif /* ITERATOR_ITER_FWD_H */

View file

@ -57,6 +57,8 @@ hints_create(void)
sizeof(struct iter_hints));
if(!hints)
return NULL;
lock_rw_init(&hints->lock);
lock_protect(&hints->lock, &hints->tree, sizeof(hints->tree));
return hints;
}
@ -83,6 +85,7 @@ hints_delete(struct iter_hints* hints)
{
if(!hints)
return;
lock_rw_destroy(&hints->lock);
hints_del_tree(hints);
free(hints);
}
@ -438,47 +441,70 @@ read_root_hints_list(struct iter_hints* hints, struct config_file* cfg)
int
hints_apply_cfg(struct iter_hints* hints, struct config_file* cfg)
{
int nolock = 1;
lock_rw_wrlock(&hints->lock);
hints_del_tree(hints);
name_tree_init(&hints->tree);
/* read root hints */
if(!read_root_hints_list(hints, cfg))
if(!read_root_hints_list(hints, cfg)) {
lock_rw_unlock(&hints->lock);
return 0;
}
/* read stub hints */
if(!read_stubs(hints, cfg))
if(!read_stubs(hints, cfg)) {
lock_rw_unlock(&hints->lock);
return 0;
}
/* use fallback compiletime root hints */
if(!hints_lookup_root(hints, LDNS_RR_CLASS_IN)) {
if(!hints_find_root(hints, LDNS_RR_CLASS_IN, nolock)) {
struct delegpt* dp = compile_time_root_prime(cfg->do_ip4,
cfg->do_ip6);
verbose(VERB_ALGO, "no config, using builtin root hints.");
if(!dp)
if(!dp) {
lock_rw_unlock(&hints->lock);
return 0;
if(!hints_insert(hints, LDNS_RR_CLASS_IN, dp, 0))
}
if(!hints_insert(hints, LDNS_RR_CLASS_IN, dp, 0)) {
lock_rw_unlock(&hints->lock);
return 0;
}
}
name_tree_init_parents(&hints->tree);
lock_rw_unlock(&hints->lock);
return 1;
}
struct delegpt*
hints_lookup_root(struct iter_hints* hints, uint16_t qclass)
struct delegpt*
hints_find(struct iter_hints* hints, uint8_t* qname, uint16_t qclass,
int nolock)
{
struct iter_hints_stub *stub;
size_t len;
int has_dp;
int labs = dname_count_size_labels(qname, &len);
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_rdlock(&hints->lock); }
stub = (struct iter_hints_stub*)name_tree_find(&hints->tree,
qname, len, labs, qclass);
has_dp = stub && stub->dp;
if(!has_dp && !nolock) { lock_rw_unlock(&hints->lock); }
return has_dp?stub->dp:NULL;
}
struct delegpt*
hints_find_root(struct iter_hints* hints, uint16_t qclass, int nolock)
{
uint8_t rootlab = 0;
struct iter_hints_stub *stub;
stub = (struct iter_hints_stub*)name_tree_find(&hints->tree,
&rootlab, 1, 1, qclass);
if(!stub)
return NULL;
return stub->dp;
return hints_find(hints, &rootlab, qclass, nolock);
}
struct iter_hints_stub*
hints_lookup_stub(struct iter_hints* hints, uint8_t* qname,
uint16_t qclass, struct delegpt* cache_dp)
hints_lookup_stub(struct iter_hints* hints, uint8_t* qname,
uint16_t qclass, struct delegpt* cache_dp, int nolock)
{
size_t len;
int labs;
@ -486,14 +512,20 @@ hints_lookup_stub(struct iter_hints* hints, uint8_t* qname,
/* first lookup the stub */
labs = dname_count_size_labels(qname, &len);
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_rdlock(&hints->lock); }
r = (struct iter_hints_stub*)name_tree_lookup(&hints->tree, qname,
len, labs, qclass);
if(!r) return NULL;
if(!r) {
if(!nolock) { lock_rw_unlock(&hints->lock); }
return NULL;
}
/* If there is no cache (root prime situation) */
if(cache_dp == NULL) {
if(r->dp->namelabs != 1)
return r; /* no cache dp, use any non-root stub */
if(!nolock) { lock_rw_unlock(&hints->lock); }
return NULL;
}
@ -510,12 +542,18 @@ hints_lookup_stub(struct iter_hints* hints, uint8_t* qname,
if(dname_strict_subdomain(r->dp->name, r->dp->namelabs,
cache_dp->name, cache_dp->namelabs))
return r; /* need to prime this stub */
if(!nolock) { lock_rw_unlock(&hints->lock); }
return NULL;
}
int hints_next_root(struct iter_hints* hints, uint16_t* qclass)
int hints_next_root(struct iter_hints* hints, uint16_t* qclass, int nolock)
{
return name_tree_next_root(&hints->tree, qclass);
int ret;
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_rdlock(&hints->lock); }
ret = name_tree_next_root(&hints->tree, qclass);
if(!nolock) { lock_rw_unlock(&hints->lock); }
return ret;
}
size_t
@ -524,39 +562,52 @@ hints_get_mem(struct iter_hints* hints)
size_t s;
struct iter_hints_stub* p;
if(!hints) return 0;
lock_rw_rdlock(&hints->lock);
s = sizeof(*hints);
RBTREE_FOR(p, struct iter_hints_stub*, &hints->tree) {
s += sizeof(*p) + delegpt_get_mem(p->dp);
}
lock_rw_unlock(&hints->lock);
return s;
}
int
hints_add_stub(struct iter_hints* hints, uint16_t c, struct delegpt* dp,
int noprime)
int noprime, int nolock)
{
struct iter_hints_stub *z;
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_wrlock(&hints->lock); }
if((z=(struct iter_hints_stub*)name_tree_find(&hints->tree,
dp->name, dp->namelen, dp->namelabs, c)) != NULL) {
(void)rbtree_delete(&hints->tree, &z->node);
hints_stub_free(z);
}
if(!hints_insert(hints, c, dp, noprime))
if(!hints_insert(hints, c, dp, noprime)) {
if(!nolock) { lock_rw_unlock(&hints->lock); }
return 0;
}
name_tree_init_parents(&hints->tree);
if(!nolock) { lock_rw_unlock(&hints->lock); }
return 1;
}
void
hints_delete_stub(struct iter_hints* hints, uint16_t c, uint8_t* nm)
hints_delete_stub(struct iter_hints* hints, uint16_t c, uint8_t* nm,
int nolock)
{
struct iter_hints_stub *z;
size_t len;
int labs = dname_count_size_labels(nm, &len);
/* lock_() calls are macros that could be nothing, surround in {} */
if(!nolock) { lock_rw_wrlock(&hints->lock); }
if(!(z=(struct iter_hints_stub*)name_tree_find(&hints->tree,
nm, len, labs, c)))
nm, len, labs, c))) {
if(!nolock) { lock_rw_unlock(&hints->lock); }
return; /* nothing to do */
}
(void)rbtree_delete(&hints->tree, &z->node);
hints_stub_free(z);
name_tree_init_parents(&hints->tree);
if(!nolock) { lock_rw_unlock(&hints->lock); }
}

View file

@ -43,6 +43,7 @@
#ifndef ITERATOR_ITER_HINTS_H
#define ITERATOR_ITER_HINTS_H
#include "util/storage/dnstree.h"
#include "util/locks.h"
struct iter_env;
struct config_file;
struct delegpt;
@ -51,6 +52,10 @@ struct delegpt;
* Iterator hints structure
*/
struct iter_hints {
/** lock on the forwards tree.
* When grabbing both this lock and the anchors.lock, this lock
* is grabbed first. */
lock_rw_type lock;
/**
* Hints are stored in this tree. Sort order is specially chosen.
* first sorted on qclass. Then on dname in nsec-like order, so that
@ -95,42 +100,70 @@ void hints_delete(struct iter_hints* hints);
int hints_apply_cfg(struct iter_hints* hints, struct config_file* cfg);
/**
* Find root hints for the given class.
* Find hints for the given class.
* The return value is contents of the hints structure.
* Caller should lock and unlock a readlock on the hints structure if nolock
* is set.
* Otherwise caller should unlock the readlock on the hints structure if a
* value was returned.
* @param hints: hint storage.
* @param qname: the qname that generated the delegation point.
* @param qclass: class for which root hints are requested. host order.
* @param nolock: Skip locking, locking is handled by the caller.
* @return: NULL if no hints, or a ptr to stored hints.
*/
struct delegpt* hints_lookup_root(struct iter_hints* hints, uint16_t qclass);
struct delegpt* hints_find(struct iter_hints* hints, uint8_t* qname,
uint16_t qclass, int nolock);
/**
* Same as hints_lookup, but for the root only.
* @param hints: hint storage.
* @param qclass: class for which root hints are requested. host order.
* @param nolock: Skip locking, locking is handled by the caller.
* @return: NULL if no hints, or a ptr to stored hints.
*/
struct delegpt* hints_find_root(struct iter_hints* hints,
uint16_t qclass, int nolock);
/**
* Find next root hints (to cycle through all root hints).
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a readlock on the hints structure.
* @param hints: hint storage
* @param qclass: class for which root hints are sought.
* 0 means give the first available root hints class.
* x means, give class x or a higher class if any.
* returns the found class in this variable.
* @param nolock: Skip locking, locking is handled by the caller.
* @return true if a root hint class is found.
* false if not root hint class is found (qclass may have been changed).
*/
int hints_next_root(struct iter_hints* hints, uint16_t* qclass);
int hints_next_root(struct iter_hints* hints, uint16_t* qclass, int nolock);
/**
* Given a qname/qclass combination, and the delegation point from the cache
* for this qname/qclass, determine if this combination indicates that a
* stub hint exists and must be primed.
* The return value is contents of the hints structure.
* Caller should lock and unlock a readlock on the hints structure if nolock
* is set.
* Otherwise caller should unlock the readlock on the hints structure if a
* value was returned.
*
* @param hints: hint storage.
* @param qname: The qname that generated the delegation point.
* @param qclass: The qclass that generated the delegation point.
* @param dp: The cache generated delegation point.
* @param nolock: Skip locking, locking is handled by the caller.
* @return: A priming delegation point if there is a stub hint that must
* be primed, otherwise null.
*/
struct iter_hints_stub* hints_lookup_stub(struct iter_hints* hints,
uint8_t* qname, uint16_t qclass, struct delegpt* dp);
struct iter_hints_stub* hints_lookup_stub(struct iter_hints* hints,
uint8_t* qname, uint16_t qclass, struct delegpt* dp, int nolock);
/**
* Get memory in use by hints
* Locks and unlocks the structure.
* @param hints: hint storage.
* @return bytes in use
*/
@ -139,23 +172,30 @@ size_t hints_get_mem(struct iter_hints* hints);
/**
* Add stub to hints structure. For external use since it recalcs
* the tree parents.
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a writelock on the hints structure.
* @param hints: the hints data structure
* @param c: class of zone
* @param dp: delegation point with name and target nameservers for new
* hints stub. malloced.
* @param noprime: set noprime option to true or false on new hint stub.
* @param nolock: Skip locking, locking is handled by the caller.
* @return false on failure (out of memory);
*/
int hints_add_stub(struct iter_hints* hints, uint16_t c, struct delegpt* dp,
int noprime);
int noprime, int nolock);
/**
* Remove stub from hints structure. For external use since it
* recalcs the tree parents.
* Handles its own locking unless nolock is set. In that case the caller
* should lock and unlock a writelock on the hints structure.
* @param hints: the hints data structure
* @param c: class of stub zone
* @param nm: name of stub zone (in uncompressed wireformat).
* @param nolock: Skip locking, locking is handled by the caller.
*/
void hints_delete_stub(struct iter_hints* hints, uint16_t c, uint8_t* nm);
void hints_delete_stub(struct iter_hints* hints, uint16_t c,
uint8_t* nm, int nolock);
#endif /* ITERATOR_ITER_HINTS_H */

View file

@ -1284,8 +1284,17 @@ iter_get_next_root(struct iter_hints* hints, struct iter_forwards* fwd,
uint16_t* c)
{
uint16_t c1 = *c, c2 = *c;
int r1 = hints_next_root(hints, &c1);
int r2 = forwards_next_root(fwd, &c2);
int r1, r2;
int nolock = 1;
/* prelock both forwards and hints for atomic read. */
lock_rw_rdlock(&fwd->lock);
lock_rw_rdlock(&hints->lock);
r1 = hints_next_root(hints, &c1, nolock);
r2 = forwards_next_root(fwd, &c2, nolock);
lock_rw_unlock(&fwd->lock);
lock_rw_unlock(&hints->lock);
if(!r1 && !r2) /* got none, end of list */
return 0;
else if(!r1) /* got one, return that */
@ -1450,15 +1459,21 @@ int iter_dp_cangodown(struct query_info* qinfo, struct delegpt* dp)
int
iter_stub_fwd_no_cache(struct module_qstate *qstate, struct query_info *qinf,
uint8_t** retdpname, size_t* retdpnamelen)
uint8_t** retdpname, size_t* retdpnamelen, uint8_t* dpname_storage,
size_t dpname_storage_len)
{
struct iter_hints_stub *stub;
struct delegpt *dp;
int nolock = 1;
/* Check for stub. */
/* Lock both forwards and hints for atomic read. */
lock_rw_rdlock(&qstate->env->fwds->lock);
lock_rw_rdlock(&qstate->env->hints->lock);
stub = hints_lookup_stub(qstate->env->hints, qinf->qname,
qinf->qclass, NULL);
dp = forwards_lookup(qstate->env->fwds, qinf->qname, qinf->qclass);
qinf->qclass, NULL, nolock);
dp = forwards_lookup(qstate->env->fwds, qinf->qname, qinf->qclass,
nolock);
/* see if forward or stub is more pertinent */
if(stub && stub->dp && dp) {
@ -1472,7 +1487,9 @@ iter_stub_fwd_no_cache(struct module_qstate *qstate, struct query_info *qinf,
/* check stub */
if (stub != NULL && stub->dp != NULL) {
if(stub->dp->no_cache) {
int stub_no_cache = stub->dp->no_cache;
lock_rw_unlock(&qstate->env->fwds->lock);
if(stub_no_cache) {
char qname[255+1];
char dpname[255+1];
dname_str(qinf->qname, qname);
@ -1480,15 +1497,27 @@ iter_stub_fwd_no_cache(struct module_qstate *qstate, struct query_info *qinf,
verbose(VERB_ALGO, "stub for %s %s has no_cache", qname, dpname);
}
if(retdpname) {
*retdpname = stub->dp->name;
if(stub->dp->namelen > dpname_storage_len) {
verbose(VERB_ALGO, "no cache stub dpname too long");
lock_rw_unlock(&qstate->env->hints->lock);
*retdpname = NULL;
*retdpnamelen = 0;
return stub_no_cache;
}
memmove(dpname_storage, stub->dp->name,
stub->dp->namelen);
*retdpname = dpname_storage;
*retdpnamelen = stub->dp->namelen;
}
return (stub->dp->no_cache);
lock_rw_unlock(&qstate->env->hints->lock);
return stub_no_cache;
}
/* Check for forward. */
if (dp) {
if(dp->no_cache) {
int dp_no_cache = dp->no_cache;
lock_rw_unlock(&qstate->env->hints->lock);
if(dp_no_cache) {
char qname[255+1];
char dpname[255+1];
dname_str(qinf->qname, qname);
@ -1496,11 +1525,22 @@ iter_stub_fwd_no_cache(struct module_qstate *qstate, struct query_info *qinf,
verbose(VERB_ALGO, "forward for %s %s has no_cache", qname, dpname);
}
if(retdpname) {
*retdpname = dp->name;
if(dp->namelen > dpname_storage_len) {
verbose(VERB_ALGO, "no cache dpname too long");
lock_rw_unlock(&qstate->env->fwds->lock);
*retdpname = NULL;
*retdpnamelen = 0;
return dp_no_cache;
}
memmove(dpname_storage, dp->name, dp->namelen);
*retdpname = dpname_storage;
*retdpnamelen = dp->namelen;
}
return (dp->no_cache);
lock_rw_unlock(&qstate->env->fwds->lock);
return dp_no_cache;
}
lock_rw_unlock(&qstate->env->fwds->lock);
lock_rw_unlock(&qstate->env->hints->lock);
if(retdpname) {
*retdpname = NULL;
*retdpnamelen = 0;

View file

@ -407,10 +407,14 @@ int iter_dp_cangodown(struct query_info* qinfo, struct delegpt* dp);
* Used for NXDOMAIN checks, above that it is an nxdomain from a
* different server and zone. You can pass NULL to not get it.
* @param retdpnamelen: returns the length of the dpname.
* @param dpname_storage: this is where the dpname buf is stored, if any.
* So that caller can manage the buffer.
* @param dpname_storage_len: size of dpname_storage buffer.
* @return true if no_cache is set in stub or fwd.
*/
int iter_stub_fwd_no_cache(struct module_qstate *qstate,
struct query_info *qinf, uint8_t** retdpname, size_t* retdpnamelen);
struct query_info *qinf, uint8_t** retdpname, size_t* retdpnamelen,
uint8_t* dpname_storage, size_t dpname_storage_len);
/**
* Set support for IP4 and IP6 depending on outgoing interfaces

View file

@ -679,30 +679,40 @@ errinf_reply(struct module_qstate* qstate, struct iter_qstate* iq)
/** see if last resort is possible - does config allow queries to parent */
static int
can_have_last_resort(struct module_env* env, uint8_t* nm, size_t nmlen,
uint16_t qclass, struct delegpt** retdp)
can_have_last_resort(struct module_env* env, uint8_t* nm, size_t ATTR_UNUSED(nmlen),
uint16_t qclass, int* have_dp, struct delegpt** retdp,
struct regional* region)
{
struct delegpt* fwddp;
struct iter_hints_stub* stub;
int labs = dname_count_labels(nm);
struct delegpt* dp = NULL;
int nolock = 0;
/* do not process a last resort (the parent side) if a stub
* or forward is configured, because we do not want to go 'above'
* the configured servers */
if(!dname_is_root(nm) && (stub = (struct iter_hints_stub*)
name_tree_find(&env->hints->tree, nm, nmlen, labs, qclass)) &&
if(!dname_is_root(nm) &&
(dp = hints_find(env->hints, nm, qclass, nolock)) &&
/* has_parent side is turned off for stub_first, where we
* are allowed to go to the parent */
stub->dp->has_parent_side_NS) {
if(retdp) *retdp = stub->dp;
dp->has_parent_side_NS) {
if(retdp) *retdp = delegpt_copy(dp, region);
lock_rw_unlock(&env->hints->lock);
if(have_dp) *have_dp = 1;
return 0;
}
if((fwddp = forwards_find(env->fwds, nm, qclass)) &&
if(dp) {
lock_rw_unlock(&env->hints->lock);
dp = NULL;
}
if((dp = forwards_find(env->fwds, nm, qclass, nolock)) &&
/* has_parent_side is turned off for forward_first, where
* we are allowed to go to the parent */
fwddp->has_parent_side_NS) {
if(retdp) *retdp = fwddp;
dp->has_parent_side_NS) {
if(retdp) *retdp = delegpt_copy(dp, region);
lock_rw_unlock(&env->fwds->lock);
if(have_dp) *have_dp = 1;
return 0;
}
/* lock_() calls are macros that could be nothing, surround in {} */
if(dp) { lock_rw_unlock(&env->fwds->lock); }
return 1;
}
@ -878,10 +888,11 @@ prime_root(struct module_qstate* qstate, struct iter_qstate* iq, int id,
{
struct delegpt* dp;
struct module_qstate* subq;
int nolock = 0;
verbose(VERB_DETAIL, "priming . %s NS",
sldns_lookup_by_id(sldns_rr_classes, (int)qclass)?
sldns_lookup_by_id(sldns_rr_classes, (int)qclass)->name:"??");
dp = hints_lookup_root(qstate->env->hints, qclass);
dp = hints_find_root(qstate->env->hints, qclass, nolock);
if(!dp) {
verbose(VERB_ALGO, "Cannot prime due to lack of hints");
return 0;
@ -891,6 +902,7 @@ prime_root(struct module_qstate* qstate, struct iter_qstate* iq, int id,
if(!generate_sub_request((uint8_t*)"\000", 1, LDNS_RR_TYPE_NS,
qclass, qstate, id, iq, QUERYTARGETS_STATE, PRIME_RESP_STATE,
&subq, 0, 0)) {
lock_rw_unlock(&qstate->env->hints->lock);
verbose(VERB_ALGO, "could not prime root");
return 0;
}
@ -901,6 +913,7 @@ prime_root(struct module_qstate* qstate, struct iter_qstate* iq, int id,
* copy dp, it is now part of the root prime query.
* dp was part of in the fixed hints structure. */
subiq->dp = delegpt_copy(dp, subq->region);
lock_rw_unlock(&qstate->env->hints->lock);
if(!subiq->dp) {
log_err("out of memory priming root, copydp");
fptr_ok(fptr_whitelist_modenv_kill_sub(
@ -912,6 +925,8 @@ prime_root(struct module_qstate* qstate, struct iter_qstate* iq, int id,
subiq->num_target_queries = 0;
subiq->dnssec_expected = iter_indicates_dnssec(
qstate->env, subiq->dp, NULL, subq->qinfo.qclass);
} else {
lock_rw_unlock(&qstate->env->hints->lock);
}
/* this module stops, our submodule starts, and does the query. */
@ -942,18 +957,21 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
struct iter_hints_stub* stub;
struct delegpt* stub_dp;
struct module_qstate* subq;
int nolock = 0;
if(!qname) return 0;
stub = hints_lookup_stub(qstate->env->hints, qname, qclass, iq->dp);
stub = hints_lookup_stub(qstate->env->hints, qname, qclass, iq->dp,
nolock);
/* The stub (if there is one) does not need priming. */
if(!stub)
return 0;
if(!stub) return 0;
stub_dp = stub->dp;
/* if we have an auth_zone dp, and stub is equal, don't prime stub
* yet, unless we want to fallback and avoid the auth_zone */
if(!iq->auth_zone_avoid && iq->dp && iq->dp->auth_dp &&
query_dname_compare(iq->dp->name, stub_dp->name) == 0)
query_dname_compare(iq->dp->name, stub_dp->name) == 0) {
lock_rw_unlock(&qstate->env->hints->lock);
return 0;
}
/* is it a noprime stub (always use) */
if(stub->noprime) {
@ -962,13 +980,14 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
/* copy the dp out of the fixed hints structure, so that
* it can be changed when servicing this query */
iq->dp = delegpt_copy(stub_dp, qstate->region);
lock_rw_unlock(&qstate->env->hints->lock);
if(!iq->dp) {
log_err("out of memory priming stub");
errinf(qstate, "malloc failure, priming stub");
(void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
return 1; /* return 1 to make module stop, with error */
}
log_nametypeclass(VERB_DETAIL, "use stub", stub_dp->name,
log_nametypeclass(VERB_DETAIL, "use stub", iq->dp->name,
LDNS_RR_TYPE_NS, qclass);
return r;
}
@ -982,6 +1001,7 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
if(!generate_sub_request(stub_dp->name, stub_dp->namelen,
LDNS_RR_TYPE_NS, qclass, qstate, id, iq,
QUERYTARGETS_STATE, PRIME_RESP_STATE, &subq, 0, 0)) {
lock_rw_unlock(&qstate->env->hints->lock);
verbose(VERB_ALGO, "could not prime stub");
errinf(qstate, "could not generate lookup for stub prime");
(void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
@ -994,6 +1014,7 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
/* Set the initial delegation point to the hint. */
/* make copy to avoid use of stub dp by different qs/threads */
subiq->dp = delegpt_copy(stub_dp, subq->region);
lock_rw_unlock(&qstate->env->hints->lock);
if(!subiq->dp) {
log_err("out of memory priming stub, copydp");
fptr_ok(fptr_whitelist_modenv_kill_sub(
@ -1010,6 +1031,8 @@ prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
subiq->wait_priming_stub = 1;
subiq->dnssec_expected = iter_indicates_dnssec(
qstate->env, subiq->dp, NULL, subq->qinfo.qclass);
} else {
lock_rw_unlock(&qstate->env->hints->lock);
}
/* this module stops, our submodule starts, and does the query. */
@ -1182,7 +1205,7 @@ generate_ns_check(struct module_qstate* qstate, struct iter_qstate* iq, int id)
if(iq->depth == ie->max_dependency_depth)
return;
if(!can_have_last_resort(qstate->env, iq->dp->name, iq->dp->namelen,
iq->qchase.qclass, NULL))
iq->qchase.qclass, NULL, NULL, NULL))
return;
/* is this query the same as the nscheck? */
if(qstate->qinfo.qtype == LDNS_RR_TYPE_NS &&
@ -1295,6 +1318,7 @@ forward_request(struct module_qstate* qstate, struct iter_qstate* iq)
struct delegpt* dp;
uint8_t* delname = iq->qchase.qname;
size_t delnamelen = iq->qchase.qname_len;
int nolock = 0;
if(iq->refetch_glue && iq->dp) {
delname = iq->dp->name;
delnamelen = iq->dp->namelen;
@ -1303,12 +1327,13 @@ forward_request(struct module_qstate* qstate, struct iter_qstate* iq)
if( (iq->qchase.qtype == LDNS_RR_TYPE_DS || iq->refetch_glue)
&& !dname_is_root(iq->qchase.qname))
dname_remove_label(&delname, &delnamelen);
dp = forwards_lookup(qstate->env->fwds, delname, iq->qchase.qclass);
if(!dp)
return 0;
dp = forwards_lookup(qstate->env->fwds, delname, iq->qchase.qclass,
nolock);
if(!dp) return 0;
/* send recursion desired to forward addr */
iq->chase_flags |= BIT_RD;
iq->dp = delegpt_copy(dp, qstate->region);
lock_rw_unlock(&qstate->env->fwds->lock);
/* iq->dp checked by caller */
verbose(VERB_ALGO, "forwarding request");
return 1;
@ -1336,6 +1361,7 @@ static int
processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
struct iter_env* ie, int id)
{
uint8_t dpname_storage[LDNS_MAX_DOMAINLEN+1];
uint8_t* delname, *dpname=NULL;
size_t delnamelen, dpnamelen=0;
struct dns_msg* msg = NULL;
@ -1382,7 +1408,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
if (iq->refetch_glue &&
iq->dp &&
!can_have_last_resort(qstate->env, iq->dp->name,
iq->dp->namelen, iq->qchase.qclass, NULL)) {
iq->dp->namelen, iq->qchase.qclass, NULL, NULL, NULL)) {
iq->refetch_glue = 0;
}
@ -1443,7 +1469,8 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
}
}
if (iter_stub_fwd_no_cache(qstate, &iq->qchase, &dpname, &dpnamelen)) {
if (iter_stub_fwd_no_cache(qstate, &iq->qchase, &dpname, &dpnamelen,
dpname_storage, sizeof(dpname_storage))) {
/* Asked to not query cache. */
verbose(VERB_ALGO, "no-cache set, going to the network");
qstate->no_cache_lookup = 1;
@ -1574,7 +1601,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
}
if(iq->qchase.qtype == LDNS_RR_TYPE_DS || iq->refetch_glue ||
(iq->qchase.qtype == LDNS_RR_TYPE_NS && qstate->prefetch_leeway
&& can_have_last_resort(qstate->env, delname, delnamelen, iq->qchase.qclass, NULL))) {
&& can_have_last_resort(qstate->env, delname, delnamelen, iq->qchase.qclass, NULL, NULL, NULL))) {
/* remove first label from delname, root goes to hints,
* but only to fetch glue, not for qtype=DS. */
/* also when prefetching an NS record, fetch it again from
@ -1603,6 +1630,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
* root priming situation. */
if(iq->dp == NULL) {
int r;
int nolock = 0;
/* if under auth zone, no prime needed */
if(!auth_zone_delegpt(qstate, iq, delname, delnamelen))
return error_response(qstate, id,
@ -1616,12 +1644,13 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
break; /* got noprime-stub-zone, continue */
else if(r)
return 0; /* stub prime request made */
if(forwards_lookup_root(qstate->env->fwds,
iq->qchase.qclass)) {
if(forwards_lookup_root(qstate->env->fwds,
iq->qchase.qclass, nolock)) {
lock_rw_unlock(&qstate->env->fwds->lock);
/* forward zone root, no root prime needed */
/* fill in some dp - safety belt */
iq->dp = hints_lookup_root(qstate->env->hints,
iq->qchase.qclass);
iq->dp = hints_find_root(qstate->env->hints,
iq->qchase.qclass, nolock);
if(!iq->dp) {
log_err("internal error: no hints dp");
errinf(qstate, "no hints for this class");
@ -1629,6 +1658,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
LDNS_RCODE_SERVFAIL);
}
iq->dp = delegpt_copy(iq->dp, qstate->region);
lock_rw_unlock(&qstate->env->hints->lock);
if(!iq->dp) {
log_err("out of memory in safety belt");
errinf(qstate, "malloc failure, in safety belt");
@ -1668,15 +1698,13 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
if(iter_dp_is_useless(&qstate->qinfo, qstate->query_flags,
iq->dp, ie->supports_ipv4, ie->supports_ipv6,
ie->use_nat64)) {
struct delegpt* retdp = NULL;
if(!can_have_last_resort(qstate->env, iq->dp->name, iq->dp->namelen, iq->qchase.qclass, &retdp)) {
if(retdp) {
int have_dp = 0;
if(!can_have_last_resort(qstate->env, iq->dp->name, iq->dp->namelen, iq->qchase.qclass, &have_dp, &iq->dp, qstate->region)) {
if(have_dp) {
verbose(VERB_QUERY, "cache has stub "
"or fwd but no addresses, "
"fallback to config");
iq->dp = delegpt_copy(retdp,
qstate->region);
if(!iq->dp) {
if(have_dp && !iq->dp) {
log_err("out of memory in "
"stub/fwd fallback");
errinf(qstate, "malloc failure, for fallback to config");
@ -1696,10 +1724,11 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
}
if(dname_is_root(iq->dp->name)) {
/* use safety belt */
int nolock = 0;
verbose(VERB_QUERY, "Cache has root NS but "
"no addresses. Fallback to the safety belt.");
iq->dp = hints_lookup_root(qstate->env->hints,
iq->qchase.qclass);
iq->dp = hints_find_root(qstate->env->hints,
iq->qchase.qclass, nolock);
/* note deleg_msg is from previous lookup,
* but RD is on, so it is not used */
if(!iq->dp) {
@ -1708,6 +1737,7 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
LDNS_RCODE_REFUSED);
}
iq->dp = delegpt_copy(iq->dp, qstate->region);
lock_rw_unlock(&qstate->env->hints->lock);
if(!iq->dp) {
log_err("out of memory in safety belt");
errinf(qstate, "malloc failure, in safety belt, for root");
@ -1763,6 +1793,7 @@ processInitRequest2(struct module_qstate* qstate, struct iter_qstate* iq,
delnamelen = iq->qchase.qname_len;
if(iq->refetch_glue) {
struct iter_hints_stub* stub;
int nolock = 0;
if(!iq->dp) {
log_err("internal or malloc fail: no dp for refetch");
errinf(qstate, "malloc failure, no delegation info");
@ -1772,12 +1803,14 @@ processInitRequest2(struct module_qstate* qstate, struct iter_qstate* iq,
* this is above stub without stub-first. */
stub = hints_lookup_stub(
qstate->env->hints, iq->qchase.qname, iq->qchase.qclass,
iq->dp);
iq->dp, nolock);
if(!stub || !stub->dp->has_parent_side_NS ||
dname_subdomain_c(iq->dp->name, stub->dp->name)) {
delname = iq->dp->name;
delnamelen = iq->dp->namelen;
}
/* lock_() calls are macros that could be nothing, surround in {} */
if(stub) { lock_rw_unlock(&qstate->env->hints->lock); }
}
if(iq->qchase.qtype == LDNS_RR_TYPE_DS || iq->refetch_glue) {
if(!dname_is_root(delname))
@ -2081,7 +2114,7 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
log_assert(iq->dp);
if(!can_have_last_resort(qstate->env, iq->dp->name, iq->dp->namelen,
iq->qchase.qclass, NULL)) {
iq->qchase.qclass, NULL, NULL, NULL)) {
/* fail -- no more targets, no more hope of targets, no hope
* of a response. */
errinf(qstate, "all the configured stub or forward servers failed,");
@ -2091,21 +2124,24 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
return error_response_cache(qstate, id, LDNS_RCODE_SERVFAIL);
}
if(!iq->dp->has_parent_side_NS && dname_is_root(iq->dp->name)) {
struct delegpt* p = hints_lookup_root(qstate->env->hints,
iq->qchase.qclass);
if(p) {
struct delegpt* dp;
int nolock = 0;
dp = hints_find_root(qstate->env->hints,
iq->qchase.qclass, nolock);
if(dp) {
struct delegpt_addr* a;
iq->chase_flags &= ~BIT_RD; /* go to authorities */
for(ns = p->nslist; ns; ns=ns->next) {
for(ns = dp->nslist; ns; ns=ns->next) {
(void)delegpt_add_ns(iq->dp, qstate->region,
ns->name, ns->lame, ns->tls_auth_name,
ns->port);
}
for(a = p->target_list; a; a=a->next_target) {
for(a = dp->target_list; a; a=a->next_target) {
(void)delegpt_add_addr(iq->dp, qstate->region,
&a->addr, a->addrlen, a->bogus,
a->lame, a->tls_auth_name, -1, NULL);
}
lock_rw_unlock(&qstate->env->hints->lock);
}
iq->dp->has_parent_side_NS = 1;
} else if(!iq->dp->has_parent_side_NS) {
@ -2183,7 +2219,7 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
if( ((ie->supports_ipv6 && !ns->done_pside6) ||
((ie->supports_ipv4 || ie->use_nat64) && !ns->done_pside4)) &&
!can_have_last_resort(qstate->env, ns->name, ns->namelen,
iq->qchase.qclass, NULL)) {
iq->qchase.qclass, NULL, NULL, NULL)) {
log_nametypeclass(VERB_ALGO, "cannot pside lookup ns "
"because it is also a stub/forward,",
ns->name, LDNS_RR_TYPE_NS, iq->qchase.qclass);

View file

@ -53,6 +53,8 @@
#include "util/storage/slabhash.h"
#include "util/edns.h"
#include "sldns/sbuffer.h"
#include "iterator/iter_fwd.h"
#include "iterator/iter_hints.h"
int
context_finalize(struct ub_ctx* ctx)
@ -85,6 +87,12 @@ context_finalize(struct ub_ctx* ctx)
if(!auth_zones_apply_cfg(ctx->env->auth_zones, cfg, 1, &is_rpz,
ctx->env, &ctx->mods))
return UB_INITFAIL;
if(!(ctx->env->fwds = forwards_create()) ||
!forwards_apply_cfg(ctx->env->fwds, cfg))
return UB_INITFAIL;
if(!(ctx->env->hints = hints_create()) ||
!hints_apply_cfg(ctx->env->hints, cfg))
return UB_INITFAIL;
if(!edns_strings_apply_cfg(ctx->env->edns_strings, cfg))
return UB_INITFAIL;
if(!slabhash_is_size(ctx->env->msg_cache, cfg->msg_cache_size,

View file

@ -66,6 +66,8 @@
#include "services/authzone.h"
#include "services/listen_dnsport.h"
#include "sldns/sbuffer.h"
#include "iterator/iter_fwd.h"
#include "iterator/iter_hints.h"
#ifdef HAVE_PTHREAD
#include <signal.h>
#endif
@ -379,6 +381,8 @@ ub_ctx_delete(struct ub_ctx* ctx)
config_delete(ctx->env->cfg);
edns_known_options_delete(ctx->env);
edns_strings_delete(ctx->env->edns_strings);
forwards_delete(ctx->env->fwds);
hints_delete(ctx->env->hints);
auth_zones_delete(ctx->env->auth_zones);
free(ctx->env);
}

View file

@ -70,8 +70,6 @@
#include "util/data/msgreply.h"
#include "util/data/msgencode.h"
#include "util/tube.h"
#include "iterator/iter_fwd.h"
#include "iterator/iter_hints.h"
#include "sldns/sbuffer.h"
#include "sldns/str2wire.h"
#ifdef USE_DNSTAP
@ -100,8 +98,6 @@ libworker_delete_env(struct libworker* w)
!w->is_bg || w->is_bg_thread);
sldns_buffer_free(w->env->scratch_buffer);
regional_destroy(w->env->scratch);
forwards_delete(w->env->fwds);
hints_delete(w->env->hints);
ub_randfree(w->env->rnd);
free(w->env);
}
@ -159,30 +155,19 @@ libworker_setup(struct ub_ctx* ctx, int is_bg, struct ub_event_base* eb)
}
w->env->scratch = regional_create_custom(cfg->msg_buffer_size);
w->env->scratch_buffer = sldns_buffer_new(cfg->msg_buffer_size);
w->env->fwds = forwards_create();
if(w->env->fwds && !forwards_apply_cfg(w->env->fwds, cfg)) {
forwards_delete(w->env->fwds);
w->env->fwds = NULL;
}
w->env->hints = hints_create();
if(w->env->hints && !hints_apply_cfg(w->env->hints, cfg)) {
hints_delete(w->env->hints);
w->env->hints = NULL;
}
#ifdef HAVE_SSL
w->sslctx = connect_sslctx_create(NULL, NULL,
cfg->tls_cert_bundle, cfg->tls_win_cert);
if(!w->sslctx) {
/* to make the setup fail after unlock */
hints_delete(w->env->hints);
w->env->hints = NULL;
sldns_buffer_free(w->env->scratch_buffer);
w->env->scratch_buffer = NULL;
}
#endif
if(!w->is_bg || w->is_bg_thread) {
lock_basic_unlock(&ctx->cfglock);
}
if(!w->env->scratch || !w->env->scratch_buffer || !w->env->fwds ||
!w->env->hints) {
if(!w->env->scratch || !w->env->scratch_buffer) {
libworker_delete(w);
return NULL;
}

View file

@ -1416,7 +1416,7 @@ struct delegpt* dns_cache_find_delegation(struct module_env* env,
int iter_dp_is_useless(struct query_info* qinfo, uint16_t qflags,
struct delegpt* dp, int supports_ipv4, int supports_ipv6, int use_nat64);
struct iter_hints_stub* hints_lookup_stub(struct iter_hints* hints,
uint8_t* qname, uint16_t qclass, struct delegpt* dp);
uint8_t* qname, uint16_t qclass, struct delegpt* dp, int nolock);
/* Custom function to perform logic similar to the one in daemon/cachedump.c */
struct delegpt* find_delegation(struct module_qstate* qstate, char *nm, size_t nmlen);
@ -1433,6 +1433,7 @@ struct delegpt* find_delegation(struct module_qstate* qstate, char *nm, size_t n
struct query_info qinfo;
struct iter_hints_stub* stub;
uint32_t timenow = *qstate->env->now;
int nolock = 0;
regional_free_all(region);
qinfo.qname = (uint8_t*)nm;
@ -1455,9 +1456,12 @@ struct delegpt* find_delegation(struct module_qstate* qstate, char *nm, size_t n
dname_str((uint8_t*)nm, b);
continue;
}
stub = hints_lookup_stub(qstate->env->hints, qinfo.qname, qinfo.qclass, dp);
stub = hints_lookup_stub(qstate->env->hints, qinfo.qname,
qinfo.qclass, dp, nolock);
if (stub) {
return stub->dp;
struct delegpt* stubdp = delegpt_copy(stub->dp, region);
lock_rw_unlock(&qstate->env->hints->lock);
return stubdp;
} else {
return dp;
}

View file

@ -511,10 +511,10 @@ struct module_env {
/** auth zones */
struct auth_zones* auth_zones;
/** Mapping of forwarding zones to targets.
* iterator forwarder information. per-thread, created by worker */
* iterator forwarder information. */
struct iter_forwards* fwds;
/**
* iterator forwarder information. per-thread, created by worker.
* iterator stub information.
* The hints -- these aren't stored in the cache because they don't
* expire. The hints are always used to "prime" the cache. Note
* that both root hints and stub zone "hints" are stored in this