mirror of
https://github.com/NLnetLabs/unbound.git
synced 2026-02-15 16:48:05 -05:00
message lookup and copy to region.
git-svn-id: file:///svn/unbound/trunk@343 be551aaa-1e26-0410-a405-d3ace91eadb9
This commit is contained in:
parent
5def8556c6
commit
1a9238ca5f
8 changed files with 211 additions and 85 deletions
|
|
@ -357,8 +357,6 @@ answer_from_cache(struct worker* worker, struct lruhash_entry* e, uint16_t id,
|
|||
struct reply_info* rep = (struct reply_info*)e->data;
|
||||
uint32_t timenow = time(0);
|
||||
uint16_t udpsize = edns->udp_size;
|
||||
size_t i;
|
||||
hashvalue_t* h;
|
||||
/* see if it is possible */
|
||||
if(rep->ttl <= timenow) {
|
||||
/* the rrsets may have been updated in the meantime.
|
||||
|
|
@ -371,23 +369,8 @@ answer_from_cache(struct worker* worker, struct lruhash_entry* e, uint16_t id,
|
|||
edns->udp_size = EDNS_ADVERTISED_SIZE;
|
||||
edns->ext_rcode = 0;
|
||||
edns->bits &= EDNS_DO;
|
||||
if(!(h = (hashvalue_t*)region_alloc(worker->scratchpad,
|
||||
sizeof(hashvalue_t)*rep->rrset_count)))
|
||||
if(!rrset_array_lock(rep->ref, rep->rrset_count, timenow))
|
||||
return 0;
|
||||
/* check rrsets */
|
||||
for(i=0; i<rep->rrset_count; i++) {
|
||||
if(i>0 && rep->ref[i].key == rep->ref[i-1].key)
|
||||
continue; /* only lock items once */
|
||||
lock_rw_rdlock(&rep->ref[i].key->entry.lock);
|
||||
if(rep->ref[i].id != rep->ref[i].key->id ||
|
||||
rep->ttl <= timenow) {
|
||||
/* failure! rollback our readlocks */
|
||||
size_t j;
|
||||
for(j=0; j<=i; j++)
|
||||
lock_rw_unlock(&rep->ref[j].key->entry.lock);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
/* locked and ids and ttls are OK. */
|
||||
if(!reply_info_answer_encode(&mrentry->key, rep, id, flags,
|
||||
repinfo->c->buffer, timenow, 1, worker->scratchpad,
|
||||
|
|
@ -397,21 +380,8 @@ answer_from_cache(struct worker* worker, struct lruhash_entry* e, uint16_t id,
|
|||
}
|
||||
/* cannot send the reply right now, because blocking network syscall
|
||||
* is bad while holding locks. */
|
||||
/* unlock */
|
||||
for(i=0; i<rep->rrset_count; i++) {
|
||||
if(i>0 && rep->ref[i].key == rep->ref[i-1].key)
|
||||
continue; /* only unlock items once */
|
||||
h[i] = rep->ref[i].key->entry.hash;
|
||||
lock_rw_unlock(&rep->ref[i].key->entry.lock);
|
||||
}
|
||||
/* still holding msgreply lock to touch LRU, so cannot send reply yet*/
|
||||
/* LRU touch, with no rrset locks held */
|
||||
for(i=0; i<rep->rrset_count; i++) {
|
||||
if(i>0 && rep->ref[i].key == rep->ref[i-1].key)
|
||||
continue; /* only touch items once */
|
||||
rrset_cache_touch(worker->env.rrset_cache, rep->ref[i].key,
|
||||
h[i], rep->ref[i].id);
|
||||
}
|
||||
rrset_array_unlock_touch(worker->env.rrset_cache, worker->scratchpad,
|
||||
rep->ref, rep->rrset_count);
|
||||
region_free_all(worker->scratchpad);
|
||||
/* go and return this buffer to the client */
|
||||
return 1;
|
||||
|
|
|
|||
|
|
@ -1,3 +1,7 @@
|
|||
29 May 2007: Wouter
|
||||
- routines to lock and unlock array of rrsets moved to cache/rrset.
|
||||
- lookup message from msg cache (and copy to region).
|
||||
|
||||
25 May 2007: Wouter
|
||||
- Acknowledge use of unbound-java code in iterator. Nicer readme.
|
||||
- services/cache/dns.c DNS Cache. Hybrid cache uses msgcache and
|
||||
|
|
|
|||
112
services/cache/dns.c
vendored
112
services/cache/dns.c
vendored
|
|
@ -233,20 +233,50 @@ dns_cache_find_delegation(struct module_env* env, uint8_t* qname,
|
|||
|
||||
/** allocate rrset in region - no more locks needed */
|
||||
static struct ub_packed_rrset_key*
|
||||
copy_rrset(struct ub_packed_rrset_key* key, struct region* region)
|
||||
copy_rrset(struct ub_packed_rrset_key* key, struct region* region,
|
||||
uint32_t now)
|
||||
{
|
||||
/* lock, lrutouch rrset in cache */
|
||||
return NULL;
|
||||
struct ub_packed_rrset_key* ck = region_alloc(region,
|
||||
sizeof(struct ub_packed_rrset_key));
|
||||
struct packed_rrset_data* d;
|
||||
struct packed_rrset_data* data = (struct packed_rrset_data*)
|
||||
key->entry.data;
|
||||
size_t dsize, i;
|
||||
if(!ck)
|
||||
return NULL;
|
||||
ck->id = key->id;
|
||||
memset(&ck->entry, 0, sizeof(ck->entry));
|
||||
ck->entry.hash = key->entry.hash;
|
||||
ck->entry.key = ck;
|
||||
ck->rk = key->rk;
|
||||
ck->rk.dname = region_alloc_init(region, key->rk.dname,
|
||||
key->rk.dname_len);
|
||||
if(!ck->rk.dname)
|
||||
return NULL;
|
||||
dsize = packed_rrset_sizeof(data);
|
||||
d = (struct packed_rrset_data*)region_alloc_init(region, data, dsize);
|
||||
if(!d)
|
||||
return NULL;
|
||||
ck->entry.data = d;
|
||||
packed_rrset_ptr_fixup(d);
|
||||
/* make TTLs relative */
|
||||
for(i=0; i<d->count + d->rrsig_count; i++)
|
||||
d->rr_ttl[i] -= now;
|
||||
d->ttl -= now;
|
||||
return ck;
|
||||
}
|
||||
|
||||
/** allocate dns_msg from query_info and reply_info */
|
||||
static struct dns_msg*
|
||||
tomsg(struct msgreply_entry* e, struct reply_info* r, struct region* region)
|
||||
tomsg(struct module_env* env, struct msgreply_entry* e, struct reply_info* r,
|
||||
struct region* region, uint32_t now, struct region* scratch)
|
||||
{
|
||||
struct dns_msg* msg = (struct dns_msg*)region_alloc(region,
|
||||
sizeof(struct dns_msg));
|
||||
struct dns_msg* msg;
|
||||
size_t i;
|
||||
if(!msg)
|
||||
if(now > r->ttl)
|
||||
return NULL;
|
||||
msg = (struct dns_msg*)region_alloc(region, sizeof(struct dns_msg));
|
||||
if(!msg)
|
||||
return NULL;
|
||||
memcpy(&msg->qinfo, &e->key, sizeof(struct query_info));
|
||||
msg->qinfo.qname = region_alloc_init(region, e->key.qname,
|
||||
|
|
@ -264,46 +294,29 @@ tomsg(struct msgreply_entry* e, struct reply_info* r, struct region* region)
|
|||
msg->rep->rrset_count * sizeof(struct ub_packed_rrset_key*));
|
||||
if(!msg->rep->rrsets)
|
||||
return NULL;
|
||||
/* try to lock all of the rrsets we need */
|
||||
if(!rrset_array_lock(r->ref, r->rrset_count, now))
|
||||
return NULL;
|
||||
for(i=0; i<msg->rep->rrset_count; i++) {
|
||||
msg->rep->rrsets[i] = copy_rrset(r->rrsets[i], region);
|
||||
if(!msg->rep->rrsets[i])
|
||||
msg->rep->rrsets[i] = copy_rrset(r->rrsets[i], region, now);
|
||||
if(!msg->rep->rrsets[i]) {
|
||||
rrset_array_unlock(r->ref, r->rrset_count);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
rrset_array_unlock_touch(env->rrset_cache, scratch, r->ref,
|
||||
r->rrset_count);
|
||||
return msg;
|
||||
}
|
||||
|
||||
/** allocate dns_msg from CNAME record */
|
||||
static struct dns_msg*
|
||||
cnamemsg(uint8_t* qname, size_t qnamelen, struct ub_packed_rrset_key* rrset,
|
||||
struct packed_rrset_data* d, struct region* region)
|
||||
{
|
||||
struct dns_msg* msg = (struct dns_msg*)region_alloc(region,
|
||||
sizeof(struct dns_msg));
|
||||
if(!msg)
|
||||
return NULL;
|
||||
msg->qinfo.qnamesize = rrset->rk.dname_len;
|
||||
msg->qinfo.qname = region_alloc_init(region, rrset->rk.dname,
|
||||
rrset->rk.dname_len);
|
||||
if(!msg->qinfo.qname)
|
||||
return NULL;
|
||||
msg->qinfo.has_cd = (rrset->rk.flags&PACKED_RRSET_CD)?1:0;
|
||||
msg->qinfo.qtype = LDNS_RR_TYPE_CNAME;
|
||||
msg->qinfo.qclass = ntohs(rrset->rk.rrset_class);
|
||||
/* TODO create reply info with the CNAME */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct dns_msg*
|
||||
dns_cache_lookup(struct module_env* env,
|
||||
uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
|
||||
int has_cd, struct region* region)
|
||||
int has_cd, struct region* region, struct region* scratch)
|
||||
{
|
||||
struct lruhash_entry* e;
|
||||
struct query_info k;
|
||||
hashvalue_t h;
|
||||
uint32_t now = (uint32_t)time(NULL);
|
||||
struct ub_packed_rrset_key* rrset;
|
||||
|
||||
/* lookup first, this has both NXdomains and ANSWER responses */
|
||||
k.qname = qname;
|
||||
|
|
@ -314,32 +327,33 @@ dns_cache_lookup(struct module_env* env,
|
|||
h = query_info_hash(&k);
|
||||
e = slabhash_lookup(env->msg_cache, h, &k, 0);
|
||||
if(e) {
|
||||
/* check ttl */
|
||||
struct msgreply_entry* key = (struct msgreply_entry*)e->key;
|
||||
struct reply_info* data = (struct reply_info*)e->data;
|
||||
if(now <= data->ttl) {
|
||||
struct dns_msg* msg = tomsg(key, data, region);
|
||||
struct dns_msg* msg = tomsg(env, key, data, region, now,
|
||||
scratch);
|
||||
if(msg) {
|
||||
lock_rw_unlock(&e->lock);
|
||||
return msg;
|
||||
}
|
||||
/* could be msg==NULL; due to TTL or not all rrsets available */
|
||||
lock_rw_unlock(&e->lock);
|
||||
}
|
||||
|
||||
/* see if we have a CNAME for this domain */
|
||||
rrset = rrset_cache_lookup(env->rrset_cache, qname, qnamelen,
|
||||
LDNS_RR_TYPE_CNAME, qclass,
|
||||
(uint32_t)(has_cd?PACKED_RRSET_CD:0), now, 0);
|
||||
if(rrset) {
|
||||
struct packed_rrset_data* d = (struct packed_rrset_data*)
|
||||
rrset->entry.data;
|
||||
if(now <= d->ttl) {
|
||||
/* construct CNAME response */
|
||||
struct dns_msg* msg = cnamemsg(qname, qnamelen, rrset,
|
||||
d, region);
|
||||
lock_rw_unlock(&rrset->entry.lock);
|
||||
/* see if we have CNAME for this domain */
|
||||
k.qtype = LDNS_RR_TYPE_CNAME;
|
||||
h = query_info_hash(&k);
|
||||
e = slabhash_lookup(env->msg_cache, h, &k, 0);
|
||||
if(e) {
|
||||
struct msgreply_entry* key = (struct msgreply_entry*)e->key;
|
||||
struct reply_info* data = (struct reply_info*)e->data;
|
||||
struct dns_msg* msg = tomsg(env, key, data, region, now,
|
||||
scratch);
|
||||
if(msg) {
|
||||
lock_rw_unlock(&e->lock);
|
||||
return msg;
|
||||
}
|
||||
lock_rw_unlock(&rrset->entry.lock);
|
||||
/* could be msg==NULL; due to TTL or not all rrsets available */
|
||||
lock_rw_unlock(&e->lock);
|
||||
}
|
||||
|
||||
/* construct DS, DNSKEY messages from rrset cache. TODO */
|
||||
|
|
|
|||
4
services/cache/dns.h
vendored
4
services/cache/dns.h
vendored
|
|
@ -96,12 +96,14 @@ struct delegpt* dns_cache_find_delegation(struct module_env* env,
|
|||
* @param qclass: query class.
|
||||
* @param has_cd: if true, CD flag is turned on for lookup.
|
||||
* @param region: where to allocate result.
|
||||
* @param scratch: where to allocate temporary data.
|
||||
* @return new response message (alloced in region, rrsets do not have IDs).
|
||||
* or NULL on error or if not found in cache.
|
||||
* TTLs are made relative to the current time.
|
||||
*/
|
||||
struct dns_msg* dns_cache_lookup(struct module_env* env,
|
||||
uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
|
||||
int has_cd, struct region* region);
|
||||
int has_cd, struct region* region, struct region* scratch);
|
||||
|
||||
/** Find covering DNAME */
|
||||
|
||||
|
|
|
|||
58
services/cache/rrset.c
vendored
58
services/cache/rrset.c
vendored
|
|
@ -44,6 +44,7 @@
|
|||
#include "util/config_file.h"
|
||||
#include "util/data/packed_rrset.h"
|
||||
#include "util/data/msgreply.h"
|
||||
#include "util/region-allocator.h"
|
||||
|
||||
struct rrset_cache* rrset_cache_create(struct config_file* cfg,
|
||||
struct alloc_cache* alloc)
|
||||
|
|
@ -206,3 +207,60 @@ rrset_cache_lookup(struct rrset_cache* r, uint8_t* qname, size_t qnamelen,
|
|||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int
|
||||
rrset_array_lock(struct rrset_ref* ref, size_t count, uint32_t timenow)
|
||||
{
|
||||
size_t i;
|
||||
for(i=0; i<count; i++) {
|
||||
if(i>0 && ref[i].key == ref[i-1].key)
|
||||
continue; /* only lock items once */
|
||||
lock_rw_rdlock(&ref[i].key->entry.lock);
|
||||
if(ref[i].id != ref[i].key->id || timenow >
|
||||
((struct reply_info*)(ref[i].key->entry.data))->ttl) {
|
||||
/* failure! rollback our readlocks */
|
||||
rrset_array_unlock(ref, i+1);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
void
|
||||
rrset_array_unlock(struct rrset_ref* ref, size_t count)
|
||||
{
|
||||
size_t i;
|
||||
for(i=0; i<count; i++) {
|
||||
if(i>0 && ref[i].key == ref[i-1].key)
|
||||
continue; /* only unlock items once */
|
||||
lock_rw_unlock(&ref[i].key->entry.lock);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
rrset_array_unlock_touch(struct rrset_cache* r, struct region* scratch,
|
||||
struct rrset_ref* ref, size_t count)
|
||||
{
|
||||
hashvalue_t* h;
|
||||
size_t i;
|
||||
if(!(h = (hashvalue_t*)region_alloc(scratch,
|
||||
sizeof(hashvalue_t)*count)))
|
||||
log_warn("rrset LRU: memory allocation failed");
|
||||
else /* store hash values */
|
||||
for(i=0; i<count; i++)
|
||||
h[i] = ref[i].key->entry.hash;
|
||||
/* unlock */
|
||||
for(i=0; i<count; i++) {
|
||||
if(i>0 && ref[i].key == ref[i-1].key)
|
||||
continue; /* only unlock items once */
|
||||
lock_rw_unlock(&ref[i].key->entry.lock);
|
||||
}
|
||||
if(h) {
|
||||
/* LRU touch, with no rrset locks held */
|
||||
for(i=0; i<count; i++) {
|
||||
if(i>0 && ref[i].key == ref[i-1].key)
|
||||
continue; /* only touch items once */
|
||||
rrset_cache_touch(r, ref[i].key, h[i], ref[i].id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
38
services/cache/rrset.h
vendored
38
services/cache/rrset.h
vendored
|
|
@ -47,6 +47,7 @@
|
|||
struct config_file;
|
||||
struct alloc_cache;
|
||||
struct rrset_ref;
|
||||
struct region;
|
||||
|
||||
/**
|
||||
* The rrset cache
|
||||
|
|
@ -146,4 +147,41 @@ struct ub_packed_rrset_key* rrset_cache_lookup(struct rrset_cache* r,
|
|||
uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
|
||||
uint32_t flags, uint32_t timenow, int wr);
|
||||
|
||||
/**
|
||||
* Obtain readlock on a (sorted) list of rrset references.
|
||||
* Checks TTLs and IDs of the rrsets and rollbacks locking if not Ok.
|
||||
* @param ref: array of rrset references (key pointer and ID value).
|
||||
* duplicate references are allowed and handled.
|
||||
* @param count: size of array.
|
||||
* @param timenow: used to compare with TTL.
|
||||
* @return true on success, false on a failure, which can be that some
|
||||
* RRsets have timed out, or that they do not exist any more, the
|
||||
* RRsets have been purged from the cache.
|
||||
* If true, you hold readlocks on all the ref items.
|
||||
*/
|
||||
int rrset_array_lock(struct rrset_ref* ref, size_t count, uint32_t timenow);
|
||||
|
||||
/**
|
||||
* Unlock array (sorted) of rrset references.
|
||||
* @param ref: array of rrset references (key pointer and ID value).
|
||||
* duplicate references are allowed and handled.
|
||||
* @param count: size of array.
|
||||
*/
|
||||
void rrset_array_unlock(struct rrset_ref* ref, size_t count);
|
||||
|
||||
/**
|
||||
* Unlock array (sorted) of rrset references and at the same time
|
||||
* touch LRU on the rrsets. It needs the scratch region for temporary
|
||||
* storage as it uses the initial locks to obtain hash values.
|
||||
* @param r: the rrset cache. In this cache LRU is updated.
|
||||
* @param scratch: region for temporary storage of hash values.
|
||||
* if memory allocation fails, the lru touch fails silently,
|
||||
* but locks are released. memory errors are logged.
|
||||
* @param ref: array of rrset references (key pointer and ID value).
|
||||
* duplicate references are allowed and handled.
|
||||
* @param count: size of array.
|
||||
*/
|
||||
void rrset_array_unlock_touch(struct rrset_cache* r, struct region* scratch,
|
||||
struct rrset_ref* ref, size_t count);
|
||||
|
||||
#endif /* SERVICES_CACHE_RRSET_H */
|
||||
|
|
|
|||
|
|
@ -68,12 +68,20 @@ ub_rrset_sizefunc(void* key, void* data)
|
|||
struct ub_packed_rrset_key* k = (struct ub_packed_rrset_key*)key;
|
||||
struct packed_rrset_data* d = (struct packed_rrset_data*)data;
|
||||
size_t s = sizeof(struct ub_packed_rrset_key) + k->rk.dname_len;
|
||||
s += packed_rrset_sizeof(d);
|
||||
return s;
|
||||
}
|
||||
|
||||
size_t
|
||||
packed_rrset_sizeof(struct packed_rrset_data* d)
|
||||
{
|
||||
size_t s;
|
||||
if(d->rrsig_count > 0) {
|
||||
s += ((uint8_t*)d->rr_data[d->count+d->rrsig_count-1] -
|
||||
s = ((uint8_t*)d->rr_data[d->count+d->rrsig_count-1] -
|
||||
(uint8_t*)d) + d->rr_len[d->count+d->rrsig_count-1];
|
||||
} else {
|
||||
log_assert(d->count > 0);
|
||||
s += ((uint8_t*)d->rr_data[d->count-1] - (uint8_t*)d) +
|
||||
s = ((uint8_t*)d->rr_data[d->count-1] - (uint8_t*)d) +
|
||||
d->rr_len[d->count-1];
|
||||
}
|
||||
return s;
|
||||
|
|
@ -164,3 +172,21 @@ rrset_key_hash(struct packed_rrset_key* key)
|
|||
h = dname_query_hash(key->dname, h);
|
||||
return h;
|
||||
}
|
||||
|
||||
void
|
||||
packed_rrset_ptr_fixup(struct packed_rrset_data* data)
|
||||
{
|
||||
size_t i;
|
||||
size_t total = data->count + data->rrsig_count;
|
||||
uint8_t* nextrdata;
|
||||
/* fixup pointers in packed rrset data */
|
||||
data->rr_len = (size_t*)((uint8_t*)data +
|
||||
sizeof(struct packed_rrset_data));
|
||||
data->rr_data = (uint8_t**)&(data->rr_len[total]);
|
||||
data->rr_ttl = (uint32_t*)&(data->rr_data[total]);
|
||||
nextrdata = (uint8_t*)&(data->rr_ttl[total]);
|
||||
for(i=0; i<total; i++) {
|
||||
data->rr_data[i] = nextrdata;
|
||||
nextrdata += data->rr_len[i];
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -237,6 +237,13 @@ struct packed_rrset_list {
|
|||
void ub_packed_rrset_parsedelete(struct ub_packed_rrset_key* pkey,
|
||||
struct alloc_cache* alloc);
|
||||
|
||||
/**
|
||||
* Memory size of rrset data. RRset data must be filled in correctly.
|
||||
* @param data: data to examine.
|
||||
* @return size in bytes.
|
||||
*/
|
||||
size_t packed_rrset_sizeof(struct packed_rrset_data* data);
|
||||
|
||||
/**
|
||||
* Calculate memory size of rrset entry. For hash table usage.
|
||||
* @param key: struct ub_packed_rrset_key*.
|
||||
|
|
@ -286,4 +293,11 @@ void rrset_data_delete(void* data, void* userdata);
|
|||
*/
|
||||
hashvalue_t rrset_key_hash(struct packed_rrset_key* key);
|
||||
|
||||
/**
|
||||
* Fixup pointers in fixed data packed_rrset_data blob.
|
||||
* After a memcpy of the data for example. Will set internal pointers right.
|
||||
* @param data: rrset data structure. Otherwise correctly filled in.
|
||||
*/
|
||||
void packed_rrset_ptr_fixup(struct packed_rrset_data* data);
|
||||
|
||||
#endif /* UTIL_DATA_PACKED_RRSET_H */
|
||||
|
|
|
|||
Loading…
Reference in a new issue