mirror of
https://github.com/NLnetLabs/unbound.git
synced 2025-12-20 23:00:56 -05:00
- Patch for CVE-2022-3204 Non-Responsive Delegation Attack.
This commit is contained in:
parent
bd3c5702a7
commit
137719522a
9 changed files with 76 additions and 1 deletions
|
|
@ -1,3 +1,6 @@
|
|||
21 September 2022: Wouter
|
||||
- Patch for CVE-2022-3204 Non-Responsive Delegation Attack.
|
||||
|
||||
1 August 2022: Wouter
|
||||
- Fix the novel ghost domain issues CVE-2022-30698 and CVE-2022-30699.
|
||||
- Tests for ghost domain fixes.
|
||||
|
|
|
|||
|
|
@ -78,6 +78,7 @@ struct delegpt* delegpt_copy(struct delegpt* dp, struct regional* region)
|
|||
if(!delegpt_add_ns(copy, region, ns->name, ns->lame,
|
||||
ns->tls_auth_name, ns->port))
|
||||
return NULL;
|
||||
copy->nslist->cache_lookup_count = ns->cache_lookup_count;
|
||||
copy->nslist->resolved = ns->resolved;
|
||||
copy->nslist->got4 = ns->got4;
|
||||
copy->nslist->got6 = ns->got6;
|
||||
|
|
@ -121,6 +122,7 @@ delegpt_add_ns(struct delegpt* dp, struct regional* region, uint8_t* name,
|
|||
ns->namelen = len;
|
||||
dp->nslist = ns;
|
||||
ns->name = regional_alloc_init(region, name, ns->namelen);
|
||||
ns->cache_lookup_count = 0;
|
||||
ns->resolved = 0;
|
||||
ns->got4 = 0;
|
||||
ns->got6 = 0;
|
||||
|
|
@ -620,6 +622,7 @@ int delegpt_add_ns_mlc(struct delegpt* dp, uint8_t* name, uint8_t lame,
|
|||
}
|
||||
ns->next = dp->nslist;
|
||||
dp->nslist = ns;
|
||||
ns->cache_lookup_count = 0;
|
||||
ns->resolved = 0;
|
||||
ns->got4 = 0;
|
||||
ns->got6 = 0;
|
||||
|
|
|
|||
|
|
@ -101,6 +101,8 @@ struct delegpt_ns {
|
|||
uint8_t* name;
|
||||
/** length of name */
|
||||
size_t namelen;
|
||||
/** number of cache lookups for the name */
|
||||
int cache_lookup_count;
|
||||
/**
|
||||
* If the name has been resolved. false if not queried for yet.
|
||||
* true if the A, AAAA queries have been generated.
|
||||
|
|
|
|||
|
|
@ -1209,6 +1209,9 @@ int iter_lookup_parent_glue_from_cache(struct module_env* env,
|
|||
struct delegpt_ns* ns;
|
||||
size_t num = delegpt_count_targets(dp);
|
||||
for(ns = dp->nslist; ns; ns = ns->next) {
|
||||
if(ns->cache_lookup_count > ITERATOR_NAME_CACHELOOKUP_MAX_PSIDE)
|
||||
continue;
|
||||
ns->cache_lookup_count++;
|
||||
/* get cached parentside A */
|
||||
akey = rrset_cache_lookup(env->rrset_cache, ns->name,
|
||||
ns->namelen, LDNS_RR_TYPE_A, qinfo->qclass,
|
||||
|
|
|
|||
|
|
@ -62,6 +62,15 @@ struct ub_packed_rrset_key;
|
|||
struct module_stack;
|
||||
struct outside_network;
|
||||
|
||||
/* max number of lookups in the cache for target nameserver names.
|
||||
* This stops, for large delegations, N*N lookups in the cache. */
|
||||
#define ITERATOR_NAME_CACHELOOKUP_MAX 3
|
||||
/* max number of lookups in the cache for parentside glue for nameserver names
|
||||
* This stops, for larger delegations, N*N lookups in the cache.
|
||||
* It is a little larger than the nonpside max, so it allows a couple extra
|
||||
* lookups of parent side glue. */
|
||||
#define ITERATOR_NAME_CACHELOOKUP_MAX_PSIDE 5
|
||||
|
||||
/**
|
||||
* Process config options and set iterator module state.
|
||||
* Sets default values if no config is found.
|
||||
|
|
|
|||
|
|
@ -1218,6 +1218,15 @@ generate_dnskey_prefetch(struct module_qstate* qstate,
|
|||
(qstate->query_flags&BIT_RD) && !(qstate->query_flags&BIT_CD)){
|
||||
return;
|
||||
}
|
||||
/* we do not generate this prefetch when the query list is full,
|
||||
* the query is fetched, if needed, when the validator wants it.
|
||||
* At that time the validator waits for it, after spawning it.
|
||||
* This means there is one state that uses cpu and a socket, the
|
||||
* spawned while this one waits, and not several at the same time,
|
||||
* if we had created the lookup here. And this helps to keep
|
||||
* the total load down, but the query still succeeds to resolve. */
|
||||
if(mesh_jostle_exceeded(qstate->env->mesh))
|
||||
return;
|
||||
|
||||
/* if the DNSKEY is in the cache this lookup will stop quickly */
|
||||
log_nametypeclass(VERB_ALGO, "schedule dnskey prefetch",
|
||||
|
|
@ -1911,6 +1920,14 @@ query_for_targets(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
return 0;
|
||||
}
|
||||
query_count++;
|
||||
/* If the mesh query list is full, exit the loop here.
|
||||
* This makes the routine spawn one query at a time,
|
||||
* and this means there is no query state load
|
||||
* increase, because the spawned state uses cpu and a
|
||||
* socket while this state waits for that spawned
|
||||
* state. Next time we can look up further targets */
|
||||
if(mesh_jostle_exceeded(qstate->env->mesh))
|
||||
break;
|
||||
}
|
||||
/* Send the A request. */
|
||||
if(ie->supports_ipv4 &&
|
||||
|
|
@ -1925,6 +1942,9 @@ query_for_targets(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
return 0;
|
||||
}
|
||||
query_count++;
|
||||
/* If the mesh query list is full, exit the loop. */
|
||||
if(mesh_jostle_exceeded(qstate->env->mesh))
|
||||
break;
|
||||
}
|
||||
|
||||
/* mark this target as in progress. */
|
||||
|
|
@ -2085,6 +2105,15 @@ processLastResort(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
}
|
||||
ns->done_pside6 = 1;
|
||||
query_count++;
|
||||
if(mesh_jostle_exceeded(qstate->env->mesh)) {
|
||||
/* Wait for the lookup; do not spawn multiple
|
||||
* lookups at a time. */
|
||||
verbose(VERB_ALGO, "try parent-side glue lookup");
|
||||
iq->num_target_queries += query_count;
|
||||
target_count_increase(iq, query_count);
|
||||
qstate->ext_state[id] = module_wait_subquery;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if(ie->supports_ipv4 && !ns->done_pside4) {
|
||||
/* Send the A request. */
|
||||
|
|
@ -2560,7 +2589,12 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
if(iq->depth < ie->max_dependency_depth
|
||||
&& iq->num_target_queries == 0
|
||||
&& (!iq->target_count || iq->target_count[TARGET_COUNT_NX]==0)
|
||||
&& iq->sent_count < TARGET_FETCH_STOP) {
|
||||
&& iq->sent_count < TARGET_FETCH_STOP
|
||||
/* if the mesh query list is full, then do not waste cpu
|
||||
* and sockets to fetch promiscuous targets. They can be
|
||||
* looked up when needed. */
|
||||
&& !mesh_jostle_exceeded(qstate->env->mesh)
|
||||
) {
|
||||
tf_policy = ie->target_fetch_policy[iq->depth];
|
||||
}
|
||||
|
||||
|
|
|
|||
3
services/cache/dns.c
vendored
3
services/cache/dns.c
vendored
|
|
@ -404,6 +404,9 @@ cache_fill_missing(struct module_env* env, uint16_t qclass,
|
|||
struct ub_packed_rrset_key* akey;
|
||||
time_t now = *env->now;
|
||||
for(ns = dp->nslist; ns; ns = ns->next) {
|
||||
if(ns->cache_lookup_count > ITERATOR_NAME_CACHELOOKUP_MAX)
|
||||
continue;
|
||||
ns->cache_lookup_count++;
|
||||
akey = rrset_cache_lookup(env->rrset_cache, ns->name,
|
||||
ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0);
|
||||
if(akey) {
|
||||
|
|
|
|||
|
|
@ -2240,3 +2240,10 @@ mesh_serve_expired_callback(void* arg)
|
|||
mesh_do_callback(mstate, LDNS_RCODE_NOERROR, msg->rep, c, &tv);
|
||||
}
|
||||
}
|
||||
|
||||
int mesh_jostle_exceeded(struct mesh_area* mesh)
|
||||
{
|
||||
if(mesh->all.count < mesh->max_reply_states)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -685,4 +685,15 @@ struct dns_msg*
|
|||
mesh_serve_expired_lookup(struct module_qstate* qstate,
|
||||
struct query_info* lookup_qinfo);
|
||||
|
||||
/**
|
||||
* See if the mesh has space for more queries. You can allocate queries
|
||||
* anyway, but this checks for the allocated space.
|
||||
* @param mesh: mesh area.
|
||||
* @return true if the query list is full.
|
||||
* It checks the number of all queries, not just number of reply states,
|
||||
* that have a client address. So that spawned queries count too,
|
||||
* that were created by the iterator, or other modules.
|
||||
*/
|
||||
int mesh_jostle_exceeded(struct mesh_area* mesh);
|
||||
|
||||
#endif /* SERVICES_MESH_H */
|
||||
|
|
|
|||
Loading…
Reference in a new issue