fixup nasty cache overwriting bug.

git-svn-id: file:///svn/unbound/trunk@656 be551aaa-1e26-0410-a405-d3ace91eadb9
This commit is contained in:
Wouter Wijngaards 2007-10-03 19:11:50 +00:00
parent d4614cf1ce
commit 7ccfb10392
3 changed files with 8 additions and 3 deletions

View file

@ -7,6 +7,8 @@
number with parse errors. number with parse errors.
- unit test for multiple ENT case. - unit test for multiple ENT case.
- fix for cname out of validated unsec zone. - fix for cname out of validated unsec zone.
- fixup nasty id=0 reuse. Also added assertions to detect its
return (the assertion catches in the existing test cases).
1 October 2007: Wouter 1 October 2007: Wouter
- skip F77, CXX, objC tests in configure step. - skip F77, CXX, objC tests in configure step.

View file

@ -177,6 +177,7 @@ rrset_cache_update(struct rrset_cache* r, struct rrset_ref* ref,
hashvalue_t h = k->entry.hash; hashvalue_t h = k->entry.hash;
uint16_t rrset_type = ntohs(k->rk.type); uint16_t rrset_type = ntohs(k->rk.type);
int equal = 0; int equal = 0;
log_assert(ref->id != 0 && k->id != 0);
/* looks up item with a readlock - no editing! */ /* looks up item with a readlock - no editing! */
if((e=slabhash_lookup(&r->table, h, k, 0)) != 0) { if((e=slabhash_lookup(&r->table, h, k, 0)) != 0) {
/* return id and key as they will be used in the cache /* return id and key as they will be used in the cache
@ -206,6 +207,7 @@ rrset_cache_update(struct rrset_cache* r, struct rrset_ref* ref,
/* use insert to update entry to manage lruhash /* use insert to update entry to manage lruhash
* cache size values nicely. */ * cache size values nicely. */
} }
log_assert(ref->key->id != 0);
slabhash_insert(&r->table, h, &k->entry, k->entry.data, alloc); slabhash_insert(&r->table, h, &k->entry, k->entry.data, alloc);
if(e) { if(e) {
/* For NSEC, NSEC3, DNAME, when rdata is updated, update /* For NSEC, NSEC3, DNAME, when rdata is updated, update
@ -261,6 +263,7 @@ rrset_array_lock(struct rrset_ref* ref, size_t count, uint32_t timenow)
if(i>0 && ref[i].key == ref[i-1].key) if(i>0 && ref[i].key == ref[i-1].key)
continue; /* only lock items once */ continue; /* only lock items once */
lock_rw_rdlock(&ref[i].key->entry.lock); lock_rw_rdlock(&ref[i].key->entry.lock);
log_assert(ref[i].id != 0 && ref[i].key->id != 0);
if(ref[i].id != ref[i].key->id || timenow > if(ref[i].id != ref[i].key->id || timenow >
((struct packed_rrset_data*)(ref[i].key->entry.data)) ((struct packed_rrset_data*)(ref[i].key->entry.data))
->ttl) { ->ttl) {

View file

@ -596,13 +596,13 @@ repinfo_copy_rrsets(struct reply_info* dest, struct reply_info* from,
fk = from->rrsets[i]; fk = from->rrsets[i];
dk = dest->rrsets[i]; dk = dest->rrsets[i];
fd = (struct packed_rrset_data*)fk->entry.data; fd = (struct packed_rrset_data*)fk->entry.data;
dk->id = fk->id;
dk->entry.hash = fk->entry.hash; dk->entry.hash = fk->entry.hash;
dk->rk = fk->rk; dk->rk = fk->rk;
if(region) if(region) {
dk->id = fk->id;
dk->rk.dname = (uint8_t*)region_alloc_init(region, dk->rk.dname = (uint8_t*)region_alloc_init(region,
fk->rk.dname, fk->rk.dname_len); fk->rk.dname, fk->rk.dname_len);
else } else
dk->rk.dname = (uint8_t*)memdup(fk->rk.dname, dk->rk.dname = (uint8_t*)memdup(fk->rk.dname,
fk->rk.dname_len); fk->rk.dname_len);
if(!dk->rk.dname) if(!dk->rk.dname)