ids and parsing rrs.

git-svn-id: file:///svn/unbound/trunk@241 be551aaa-1e26-0410-a405-d3ace91eadb9
This commit is contained in:
Wouter Wijngaards 2007-04-16 15:21:50 +00:00
parent 558de9982d
commit 4283fec985
10 changed files with 338 additions and 10 deletions

View file

@ -126,7 +126,7 @@ daemon_init()
free(daemon);
return NULL;
}
alloc_init(&daemon->superalloc, NULL);
alloc_init(&daemon->superalloc, NULL, 0);
return daemon;
}

View file

@ -525,7 +525,8 @@ worker_init(struct worker* worker, struct config_file *cfg,
fatal_exit("could not set forwarder address");
}
}
alloc_init(&worker->alloc, &worker->daemon->superalloc);
alloc_init(&worker->alloc, &worker->daemon->superalloc,
worker->thread_num);
return 1;
}

View file

@ -1,3 +1,8 @@
16 April 2007: Wouter
- following a small change in LDNS, parsing code calculates the
memory size to allocate for rrs.
- code to handle ID creation.
13 April 2007: Wouter
- parse routines. Code that parses rrsets, rrs.

View file

@ -5,3 +5,9 @@ o profile memory allocation, and if performance issues, use special memory
allocator. For example, with caches per thread.
o #define BIT_... different on bigendian and smallendian systems so that
the htons on flags is not needed to send a message from the cache.
o possible optimization with delayed malloc of msgreply after parse
(reuse parse structures and ptrs to packet in meantime).
o possible optimization, so that precious id number resource is not depleted
by parsing of messages. Delay malloc, as above, or try to reverse release
special id numbers, and if you release the next_id number for the thread
it reuses that id number.

View file

@ -54,9 +54,9 @@ alloc_test() {
struct alloc_cache major, minor1, minor2;
int i;
alloc_init(&major, NULL);
alloc_init(&minor1, &major);
alloc_init(&minor2, &major);
alloc_init(&major, NULL, 0);
alloc_init(&minor1, &major, 0);
alloc_init(&minor2, &major, 1);
t1 = alloc_special_obtain(&minor1);
alloc_clear(&minor1);

View file

@ -43,6 +43,17 @@
#include "util/alloc.h"
#include "util/data/packed_rrset.h"
/** number of bits for ID part of uint64, rest for number of threads. */
#define THRNUM_SHIFT 48 /* for 65k threads, 2^48 rrsets per thr. */
/** setup new special type */
static void
alloc_setup_special(alloc_special_t* t)
{
memset(t, 0, sizeof(*t));
lock_rw_init(&t->entry.lock);
}
/** prealloc some entries in the cache. To minimize contention.
* Result is 1 lock per alloc_max newly created entries.
* @param alloc: the structure to fill up.
@ -55,6 +66,7 @@ prealloc(struct alloc_cache* alloc)
for(i=0; i<ALLOC_SPECIAL_MAX; i++) {
if(!(p = (alloc_special_t*)malloc(sizeof(alloc_special_t))))
fatal_exit("prealloc: out of memory");
alloc_setup_special(p);
alloc_set_special_next(p, alloc->quar);
alloc->quar = p;
alloc->num_quar++;
@ -62,10 +74,19 @@ prealloc(struct alloc_cache* alloc)
}
void
alloc_init(struct alloc_cache* alloc, struct alloc_cache* super)
alloc_init(struct alloc_cache* alloc, struct alloc_cache* super,
int thread_num)
{
memset(alloc, 0, sizeof(*alloc));
alloc->super = super;
alloc->thread_num = thread_num;
alloc->next_id = (uint64_t)thread_num; /* in steps, so that type */
alloc->next_id <<= THRNUM_SHIFT; /* of *_id is used. */
alloc->last_id = 1; /* so no 64bit constants, */
alloc->last_id <<= THRNUM_SHIFT; /* or implicit 'int' ops. */
alloc->last_id -= 1; /* for compiler portability. */
alloc->last_id |= alloc->next_id;
alloc->next_id += 1; /* because id=0 is special. */
if(!alloc->super) {
lock_quick_init(&alloc->lock);
lock_protect(&alloc->lock, alloc, sizeof(*alloc));
@ -104,6 +125,18 @@ alloc_clear(struct alloc_cache* alloc)
alloc->num_quar = 0;
}
/** get a new id */
static void
alloc_get_id(struct alloc_cache* alloc, alloc_special_t* t)
{
t->id = alloc->next_id++;
if(alloc->next_id == alloc->last_id) {
/* TODO: clear the rrset cache */
log_warn("Out of ids. Clearing cache.");
}
alloc_set_special_next(t, 0);
}
alloc_special_t*
alloc_special_obtain(struct alloc_cache* alloc)
{
@ -114,7 +147,7 @@ alloc_special_obtain(struct alloc_cache* alloc)
p = alloc->quar;
alloc->quar = alloc_special_next(p);
alloc->num_quar--;
alloc_special_clean(p);
alloc_get_id(alloc, p);
return p;
}
/* see if in global cache */
@ -128,7 +161,7 @@ alloc_special_obtain(struct alloc_cache* alloc)
}
lock_quick_unlock(&alloc->super->lock);
if(p) {
alloc_special_clean(p);
alloc_get_id(alloc, p);
return p;
}
}
@ -136,7 +169,8 @@ alloc_special_obtain(struct alloc_cache* alloc)
prealloc(alloc);
if(!(p = (alloc_special_t*)malloc(sizeof(alloc_special_t))))
fatal_exit("alloc_special_obtain: out of memory");
alloc_special_clean(p);
alloc_setup_special(p);
alloc_get_id(alloc, p);
return p;
}

View file

@ -77,6 +77,12 @@ struct alloc_cache {
alloc_special_t* quar;
/** number of items in quarantine. */
size_t num_quar;
/** thread number for id creation */
int thread_num;
/** next id number to pass out */
uint64_t next_id;
/** last id number possible */
uint64_t last_id;
};
/**
@ -84,8 +90,10 @@ struct alloc_cache {
* @param alloc: this parameter is allocated by the caller.
* @param super: super to use (init that before with super_init).
* Pass this argument NULL to init the toplevel alloc structure.
* @param thread_num: thread number for id creation of special type.
*/
void alloc_init(struct alloc_cache* alloc, struct alloc_cache* super);
void alloc_init(struct alloc_cache* alloc, struct alloc_cache* super,
int thread_num);
/**
* Free the alloc. Pushes all the cached items into the super structure.

View file

@ -278,3 +278,25 @@ dname_pkt_hash(ldns_buffer* pkt, uint8_t* dname, hashvalue_t h)
return h;
}
void dname_pkt_copy(ldns_buffer* pkt, uint8_t* to, uint8_t* dname)
{
/* copy over the dname and decompress it at the same time */
uint8_t lablen;
lablen = *dname++;
while(lablen) {
if((lablen & 0xc0) == 0xc0) {
/* follow pointer */
dname = ldns_buffer_at(pkt, (lablen&0x3f)<<8 | *dname);
lablen = *dname++;
continue;
}
log_assert(lablen <= LDNS_MAX_LABELLEN);
*to++ = lablen;
memmove(to, dname, lablen);
dname += lablen;
to += lablen;
}
/* copy last \0 */
*to = 0;
}

View file

@ -111,5 +111,12 @@ hashvalue_t dname_query_hash(uint8_t* dname, hashvalue_t h);
*/
hashvalue_t dname_pkt_hash(ldns_buffer* pkt, uint8_t* dname, hashvalue_t h);
/**
* Copy over a valid dname and decompress it.
* @param pkt: packet to resolve compression pointers.
* @param to: buffer of size from pkt_len function to hold result.
* @param dname: pointer into packet where dname starts.
*/
void dname_pkt_copy(ldns_buffer* pkt, uint8_t* to, uint8_t* dname);
#endif /* UTIL_DATA_DNAME_H */

View file

@ -43,6 +43,7 @@
#include "util/data/msgreply.h"
#include "util/storage/lookup3.h"
#include "util/log.h"
#include "util/alloc.h"
#include "util/netevent.h"
#include "util/net_help.h"
#include "util/data/dname.h"
@ -125,6 +126,8 @@ struct rrset_parse {
uint16_t rrset_class;
/** the flags for the rrset, like for packedrrset */
uint32_t flags;
/** number of RRs in the rr list */
size_t rr_count;
/** linked list of RRs in this rrset. */
struct rr_parse* rr_first;
/** last in list of RRs in this rrset. */
@ -141,6 +144,8 @@ struct rr_parse {
* its dname, type and class are the same and stored for the rrset.
*/
uint8_t* ttl_data;
/** the length of the rdata if allocated (with no dname compression)*/
size_t size;
/** next in list of RRs. */
struct rr_parse* next;
};
@ -377,6 +382,7 @@ new_rrset(struct msg_parse* msg, uint8_t* dname, size_t dnamelen,
p->type = type;
p->rrset_class = dclass;
p->flags = rrset_flags;
p->rr_count = 0;
p->rr_first = 0;
p->rr_last = 0;
return p;
@ -403,6 +409,7 @@ add_rr_to_rrset(struct rrset_parse* rrset, ldns_buffer* pkt,
rrset->rr_last->next = rr;
else rrset->rr_first = rr;
rrset->rr_last = rr;
rrset->rr_count++;
}
/* forwards */
@ -518,6 +525,235 @@ parse_packet(ldns_buffer* pkt, struct msg_parse* msg,
return 0;
}
/** copy and allocate an uncompressed dname. */
static uint8_t*
copy_uncompr(uint8_t* dname, size_t len)
{
uint8_t* p = (uint8_t*)malloc(len);
if(!p) return 0;
memmove(p, dname, len);
return p;
}
/** allocate qinfo, return 0 on error. */
static int
parse_create_qinfo(struct msg_parse* msg, struct query_info* qinf)
{
if(msg->qname) {
if(!(qinf->qname = copy_uncompr(msg->qname, msg->qname_len)))
return 0;
} else qinf->qname = 0;
qinf->qnamesize = msg->qname_len;
qinf->qtype = msg->qtype;
qinf->qclass = msg->qclass;
return 1;
}
/** allocate replyinfo, return 0 on error. */
static int
parse_create_repinfo(struct msg_parse* msg, struct reply_info** rep)
{
/* rrset_count-1 because the first ref is part of the struct. */
*rep = malloc(sizeof(struct reply_info) +
sizeof(struct rrset_ref) * (msg->rrset_count-1) +
sizeof(struct ub_packed_rrset_key*) * msg->rrset_count);
if(!*rep) return 0;
(*rep)->reply = 0; /* unused */
(*rep)->replysize = 0; /* unused */
(*rep)->flags = msg->flags;
(*rep)->qdcount = msg->qdcount;
(*rep)->ttl = 0;
(*rep)->an_numrrsets = msg->an_rrsets;
(*rep)->ns_numrrsets = msg->ns_rrsets;
(*rep)->ar_numrrsets = msg->ar_rrsets;
(*rep)->rrset_count = msg->rrset_count;
/* array starts after the refs */
(*rep)->rrsets = (struct ub_packed_rrset_key**)&
((*rep)->ref[msg->rrset_count]);
/* zero the arrays to assist cleanup in case of malloc failure */
memset( (*rep)->rrsets, 0,
sizeof(struct ub_packed_rrset_key*) * msg->rrset_count);
memset( &(*rep)->ref[0], 0,
sizeof(struct rrset_ref) * msg->rrset_count);
return 1;
}
/** allocate (special) rrset keys, return 0 on error. */
static int
parse_alloc_rrset_keys(struct msg_parse* msg, struct reply_info* rep,
struct alloc_cache* alloc)
{
size_t i;
for(i=0; i<msg->rrset_count; i++) {
rep->rrsets[i] = alloc_special_obtain(alloc);
if(!rep->rrsets[i])
return 0;
rep->rrsets[i]->entry.data = NULL;
}
return 1;
}
/** calculate the size of one rr */
static int
calc_size(ldns_buffer* pkt, uint16_t type, struct rr_parse* rr)
{
const ldns_rr_descriptor* desc;
uint16_t pkt_len; /* length of rr inside the packet */
rr->size = sizeof(uint16_t); /* the rdatalen */
ldns_buffer_set_position(pkt, (size_t)(rr->ttl_data -
ldns_buffer_begin(pkt) + 4)); /* skip ttl */
pkt_len = ldns_buffer_read_u16(pkt);
if(ldns_buffer_remaining(pkt) < pkt_len)
return 0;
desc = ldns_rr_descript(type);
if(desc->_dname_count > 0) {
int count = (int)desc->_dname_count;
int rdf = 0;
size_t len;
/* skip first part. */
while(count) {
switch(desc->_wireformat[rdf]) {
case LDNS_RDF_TYPE_DNAME:
/* decompress every domain name */
if((len = pkt_dname_len(pkt)) == 0)
return 0;
rr->size += len;
count--;
break;
case LDNS_RDF_TYPE_STR:
len = ldns_buffer_current(pkt)[0] + 1;
rr->size += len;
ldns_buffer_skip(pkt, (ssize_t)len);
break;
case LDNS_RDF_TYPE_CLASS:
case LDNS_RDF_TYPE_ALG:
case LDNS_RDF_TYPE_INT8:
ldns_buffer_skip(pkt, 1);
rr->size += 1;
break;
case LDNS_RDF_TYPE_INT16:
case LDNS_RDF_TYPE_TYPE:
case LDNS_RDF_TYPE_CERT_ALG:
ldns_buffer_skip(pkt, 2);
rr->size += 2;
break;
case LDNS_RDF_TYPE_INT32:
case LDNS_RDF_TYPE_TIME:
case LDNS_RDF_TYPE_A:
case LDNS_RDF_TYPE_PERIOD:
ldns_buffer_skip(pkt, 4);
rr->size += 4;
break;
case LDNS_RDF_TYPE_TSIGTIME:
ldns_buffer_skip(pkt, 6);
rr->size += 6;
break;
case LDNS_RDF_TYPE_AAAA:
ldns_buffer_skip(pkt, 16);
rr->size += 16;
default:
log_assert(false); /* add type above */
/* only types that appear before a domain *
* name are needed. rest is simply copied. */
}
rdf++;
}
}
/* remaining rdata */
rr->size += pkt_len;
return 1;
}
/** calculate size of rrs in rrset, 0 on parse failure */
static int
parse_rr_size(ldns_buffer* pkt, struct rrset_parse* pset, size_t* allocsize)
{
struct rr_parse* p = pset->rr_first;
*allocsize = 0;
while(p) {
if(!calc_size(pkt, ntohs(pset->type), p))
return 0;
*allocsize += p->size;
p = p->next;
}
return 1;
}
/** create rrset return 0 or rcode */
static int
parse_create_rrset(ldns_buffer* pkt, struct rrset_parse* pset,
struct packed_rrset_data** data)
{
/* calculate sizes of rr rdata */
size_t allocsize;
if(!parse_rr_size(pkt, pset, &allocsize))
return LDNS_RCODE_FORMERR;
/* allocate */
*data = malloc(sizeof(struct packed_rrset_data) + pset->rr_count*
(sizeof(size_t)+sizeof(uint8_t*)+sizeof(uint32_t)) + allocsize);
if(!*data)
return LDNS_RCODE_SERVFAIL;
return 0;
}
/**
* Copy and decompress rrs
* @param pkt: the packet for compression pointer resolution.
* @param msg: the parsed message
* @param rep: reply info to put rrs into.
* @return 0 or rcode.
*/
static int
parse_copy_decompress(ldns_buffer* pkt, struct msg_parse* msg,
struct reply_info* rep)
{
int ret;
size_t i;
struct rrset_parse *pset = msg->rrset_first;
struct packed_rrset_data* data;
log_assert(rep);
for(i=0; i<rep->rrset_count; i++) {
rep->rrsets[i]->rk.flags = pset->flags;
rep->rrsets[i]->rk.dname_len = pset->dname_len;
rep->rrsets[i]->rk.dname = malloc(pset->dname_len + 4);
if(!rep->rrsets[i]->rk.dname)
return LDNS_RCODE_SERVFAIL;
/** copy & decompress dname */
dname_pkt_copy(pkt, rep->rrsets[i]->rk.dname, pset->dname);
/** copy over type and class */
memmove(&rep->rrsets[i]->rk.dname[pset->dname_len],
&pset->type, sizeof(uint16_t));
memmove(&rep->rrsets[i]->rk.dname[pset->dname_len+2],
&pset->rrset_class, sizeof(uint16_t));
/** read data part. */
if((ret=parse_create_rrset(pkt, pset, &data)) != 0)
return ret;
rep->rrsets[i]->entry.data = (void*)data;
pset = pset->rrset_all_next;
}
return 0;
}
/** allocate and decompress message and rrsets, returns 0 or rcode. */
static int
parse_create_msg(ldns_buffer* pkt, struct msg_parse* msg,
struct alloc_cache* alloc, struct query_info* qinf,
struct reply_info** rep)
{
int ret;
log_assert(pkt && msg);
if(!parse_create_qinfo(msg, qinf))
return LDNS_RCODE_SERVFAIL;
if(!parse_create_repinfo(msg, rep))
return LDNS_RCODE_SERVFAIL;
if(!parse_alloc_rrset_keys(msg, *rep, alloc))
return LDNS_RCODE_SERVFAIL;
if((ret=parse_copy_decompress(pkt, msg, *rep)) != 0)
return ret;
return 0;
}
int reply_info_parse(ldns_buffer* pkt, struct alloc_cache* alloc,
struct query_info* qinf, struct reply_info** rep)
@ -541,6 +777,15 @@ int reply_info_parse(ldns_buffer* pkt, struct alloc_cache* alloc,
}
/* parse OK, allocate return structures */
/* this also performs dname decompression */
*rep = NULL;
if((ret = parse_create_msg(pkt, msg, alloc, qinf, rep)) != 0) {
query_info_clear(qinf);
reply_info_parsedelete(*rep, alloc);
region_free_all(region);
region_destroy(region);
return ret;
}
/* exit and cleanup */
region_free_all(region);