mirror of
https://github.com/NLnetLabs/unbound.git
synced 2025-12-27 10:10:09 -05:00
uses CNAMEs and DNAMEs from the cache.
git-svn-id: file:///svn/unbound/trunk@380 be551aaa-1e26-0410-a405-d3ace91eadb9
This commit is contained in:
parent
c8b71a8b1e
commit
2e352bc48c
5 changed files with 216 additions and 59 deletions
|
|
@ -1,3 +1,8 @@
|
|||
12 June 2007: Wouter
|
||||
- num target queries was set to 0 at a bad time. Default it to 0 and
|
||||
increase as target queries are done.
|
||||
- synthesize CNAME and DNAME responses from the cache.
|
||||
|
||||
11 June 2007: Wouter
|
||||
- replies on TCP queries have the address field set in replyinfo,
|
||||
for serviced queries, because the initiator does not know that
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ iter_new(struct module_qstate* qstate, int id)
|
|||
iq->prepend_list = NULL;
|
||||
iq->prepend_last = NULL;
|
||||
iq->dp = NULL;
|
||||
iq->num_target_queries = -1; /* default our targetQueries counter. */
|
||||
iq->num_target_queries = 0;
|
||||
iq->num_current_queries = 0;
|
||||
iq->query_restart_count = 0;
|
||||
iq->referral_count = 0;
|
||||
|
|
@ -206,7 +206,6 @@ perform_forward(struct module_qstate* qstate, enum module_ev event, int id,
|
|||
* Transition to the next state. This can be used to advance a currently
|
||||
* processing event. It cannot be used to reactivate a forEvent.
|
||||
*
|
||||
* @param qstate: query state
|
||||
* @param iq: iterator query state
|
||||
* @param nextstate The state to transition to.
|
||||
* @return true. This is so this can be called as the return value for the
|
||||
|
|
@ -214,13 +213,12 @@ perform_forward(struct module_qstate* qstate, enum module_ev event, int id,
|
|||
* implies further processing).
|
||||
*/
|
||||
static int
|
||||
next_state(struct module_qstate* qstate, struct iter_qstate* iq,
|
||||
enum iter_state nextstate)
|
||||
next_state(struct iter_qstate* iq, enum iter_state nextstate)
|
||||
{
|
||||
/* If transitioning to a "response" state, make sure that there is a
|
||||
* response */
|
||||
if(iter_state_is_responsestate(nextstate)) {
|
||||
if(qstate->reply == NULL || iq->response == NULL) {
|
||||
if(iq->response == NULL) {
|
||||
log_err("transitioning to response state sans "
|
||||
"response.");
|
||||
}
|
||||
|
|
@ -237,15 +235,14 @@ next_state(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
*
|
||||
* The response is stored in the qstate->buf buffer.
|
||||
*
|
||||
* @param qstate: query state
|
||||
* @param iq: iterator query state
|
||||
* @return false. This is so this method can be used as the return value for
|
||||
* the processState methods. (Transitioning to the final state
|
||||
*/
|
||||
static int
|
||||
final_state(struct module_qstate* qstate, struct iter_qstate* iq)
|
||||
final_state(struct iter_qstate* iq)
|
||||
{
|
||||
return next_state(qstate, iq, iq->final_state);
|
||||
return next_state(iq, iq->final_state);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -494,7 +491,8 @@ generate_sub_request(uint8_t* qname, size_t qnamelen, uint16_t qtype,
|
|||
|
||||
subiq = (struct iter_qstate*)subq->minfo[id];
|
||||
memset(subiq, 0, sizeof(*subiq));
|
||||
subiq->num_target_queries = -1; /* default our targetQueries counter. */
|
||||
subiq->num_target_queries = 0;
|
||||
subiq->num_current_queries = 0;
|
||||
outbound_list_init(&subiq->outlist);
|
||||
subiq->state = initial_state;
|
||||
subiq->final_state = final_state;
|
||||
|
|
@ -693,13 +691,13 @@ processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
/* This *is* a query restart, even if it is a cheap
|
||||
* one. */
|
||||
iq->query_restart_count++;
|
||||
return next_state(qstate, iq, INIT_REQUEST_STATE);
|
||||
return next_state(iq, INIT_REQUEST_STATE);
|
||||
}
|
||||
|
||||
/* it is an answer, response, to final state */
|
||||
verbose(VERB_ALGO, "returning answer from cache.");
|
||||
iq->response = msg;
|
||||
return final_state(qstate, iq);
|
||||
return final_state(iq);
|
||||
}
|
||||
|
||||
/* TODO attempt to forward the request */
|
||||
|
|
@ -775,7 +773,7 @@ return nextState(event, req, state, IterEventState.INIT_REQUEST_STATE);
|
|||
|
||||
/* Otherwise, set the current delegation point and move on to the
|
||||
* next state. */
|
||||
return next_state(qstate, iq, INIT_REQUEST_2_STATE);
|
||||
return next_state(iq, INIT_REQUEST_2_STATE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -809,7 +807,7 @@ processInitRequest2(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
}
|
||||
|
||||
/* most events just get forwarded to the next state. */
|
||||
return next_state(qstate, iq, INIT_REQUEST_3_STATE);
|
||||
return next_state(iq, INIT_REQUEST_3_STATE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -831,7 +829,7 @@ processInitRequest3(struct module_qstate* qstate, struct iter_qstate* iq)
|
|||
* cached referral as the response. */
|
||||
if(!(qstate->query_flags & BIT_RD)) {
|
||||
iq->response = iq->deleg_msg;
|
||||
return final_state(qstate, iq);
|
||||
return final_state(iq);
|
||||
}
|
||||
|
||||
/* After this point, unset the RD flag -- this query is going to
|
||||
|
|
@ -839,7 +837,7 @@ processInitRequest3(struct module_qstate* qstate, struct iter_qstate* iq)
|
|||
qstate->query_flags &= ~BIT_RD;
|
||||
|
||||
/* Jump to the next state. */
|
||||
return next_state(qstate, iq, QUERYTARGETS_STATE);
|
||||
return next_state(iq, QUERYTARGETS_STATE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -985,6 +983,7 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
verbose(VERB_ALGO, "processQueryTargets: targetqueries %d, "
|
||||
"currentqueries %d", iq->num_target_queries,
|
||||
iq->num_current_queries);
|
||||
qstate->ext_state[id] = module_wait_reply;
|
||||
|
||||
/* Make sure that we haven't run away */
|
||||
/* FIXME: is this check even necessary? */
|
||||
|
|
@ -1005,12 +1004,11 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
* query (or queries) for a missing name have been issued,
|
||||
* they will not be show up again. */
|
||||
if(tf_policy != 0) {
|
||||
if(!query_for_targets(qstate, iq, ie, id, tf_policy,
|
||||
&iq->num_target_queries)) {
|
||||
int extra = 0;
|
||||
if(!query_for_targets(qstate, iq, ie, id, tf_policy, &extra)) {
|
||||
return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
|
||||
}
|
||||
} else {
|
||||
iq->num_target_queries = 0;
|
||||
iq->num_target_queries += extra;
|
||||
}
|
||||
|
||||
/* Add the current set of unused targets to our queue. */
|
||||
|
|
@ -1036,13 +1034,15 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
* to distinguish between generating (a) new target
|
||||
* query, or failing. */
|
||||
if(delegpt_count_missing_targets(iq->dp) > 0) {
|
||||
int qs = 0;
|
||||
verbose(VERB_ALGO, "querying for next "
|
||||
"missing target");
|
||||
if(!query_for_targets(qstate, iq, ie, id,
|
||||
1, &iq->num_target_queries)) {
|
||||
1, &qs)) {
|
||||
return error_response(qstate, id,
|
||||
LDNS_RCODE_SERVFAIL);
|
||||
}
|
||||
iq->num_target_queries += qs;
|
||||
}
|
||||
/* Since a target query might have been made, we
|
||||
* need to check again. */
|
||||
|
|
@ -1086,7 +1086,7 @@ processQueryTargets(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
if(!outq) {
|
||||
log_err("error sending query to auth server; skip this address");
|
||||
log_addr("error for address:", &target->addr, target->addrlen);
|
||||
return next_state(qstate, iq, QUERYTARGETS_STATE);
|
||||
return next_state(iq, QUERYTARGETS_STATE);
|
||||
}
|
||||
outbound_list_insert(&iq->outlist, outq);
|
||||
iq->num_current_queries++;
|
||||
|
|
@ -1116,9 +1116,8 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
iq->num_current_queries--;
|
||||
if(iq->response == NULL) {
|
||||
verbose(VERB_ALGO, "query response was timeout");
|
||||
return next_state(qstate, iq, QUERYTARGETS_STATE);
|
||||
return next_state(iq, QUERYTARGETS_STATE);
|
||||
}
|
||||
log_assert(qstate->reply); /* need addr for lameness cache */
|
||||
type = response_type_from_server(iq->response, &qstate->qinfo, iq->dp);
|
||||
if(type == RESPONSE_TYPE_ANSWER) {
|
||||
/* ANSWER type responses terminate the query algorithm,
|
||||
|
|
@ -1134,7 +1133,7 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
|
||||
/* close down outstanding requests to be discarded */
|
||||
outbound_list_clear(&iq->outlist);
|
||||
return final_state(qstate, iq);
|
||||
return final_state(iq);
|
||||
} else if(type == RESPONSE_TYPE_REFERRAL) {
|
||||
/* REFERRAL type responses get a reset of the
|
||||
* delegation point, and back to the QUERYTARGETS_STATE. */
|
||||
|
|
@ -1152,7 +1151,7 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
|
||||
delegpt_log(iq->dp);
|
||||
iq->num_current_queries = 0;
|
||||
iq->num_target_queries = -1;
|
||||
iq->num_target_queries = 0;
|
||||
/* Count this as a referral. */
|
||||
iq->referral_count++;
|
||||
|
||||
|
|
@ -1162,7 +1161,7 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
*/
|
||||
outbound_list_clear(&iq->outlist);
|
||||
verbose(VERB_ALGO, "cleared outbound list for next round");
|
||||
return next_state(qstate, iq, QUERYTARGETS_STATE);
|
||||
return next_state(iq, QUERYTARGETS_STATE);
|
||||
} else if(type == RESPONSE_TYPE_CNAME) {
|
||||
uint8_t* sname = NULL;
|
||||
size_t snamelen = 0;
|
||||
|
|
@ -1191,7 +1190,7 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
iq->deleg_msg = NULL;
|
||||
iq->dp = NULL;
|
||||
iq->num_current_queries = 0;
|
||||
iq->num_target_queries = -1;
|
||||
iq->num_target_queries = 0;
|
||||
/* Note the query restart. */
|
||||
iq->query_restart_count++;
|
||||
|
||||
|
|
@ -1202,14 +1201,18 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
outbound_list_clear(&iq->outlist);
|
||||
verbose(VERB_ALGO, "cleared outbound list for query restart");
|
||||
/* go to INIT_REQUEST_STATE for new qname. */
|
||||
return next_state(qstate, iq, INIT_REQUEST_STATE);
|
||||
return next_state(iq, INIT_REQUEST_STATE);
|
||||
} else if(type == RESPONSE_TYPE_LAME) {
|
||||
/* Cache the LAMEness. */
|
||||
verbose(VERB_DETAIL, "query response was LAME");
|
||||
if(!infra_set_lame(qstate->env->infra_cache,
|
||||
&qstate->reply->addr, qstate->reply->addrlen,
|
||||
iq->dp->name, iq->dp->namelen, time(NULL)))
|
||||
log_err("mark host lame: out of memory");
|
||||
if(qstate->reply) {
|
||||
/* need addr for lameness cache, but we may have
|
||||
* gotten this from cache, so test to be sure */
|
||||
if(!infra_set_lame(qstate->env->infra_cache,
|
||||
&qstate->reply->addr, qstate->reply->addrlen,
|
||||
iq->dp->name, iq->dp->namelen, time(NULL)))
|
||||
log_err("mark host lame: out of memory");
|
||||
} else log_err("lame response from cache");
|
||||
} else if(type == RESPONSE_TYPE_THROWAWAY) {
|
||||
/* LAME and THROWAWAY responses are handled the same way.
|
||||
* In this case, the event is just sent directly back to
|
||||
|
|
@ -1224,7 +1227,7 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq,
|
|||
/* LAME, THROWAWAY and "unknown" all end up here.
|
||||
* Recycle to the QUERYTARGETS state to hopefully try a
|
||||
* different target. */
|
||||
return next_state(qstate, iq, QUERYTARGETS_STATE);
|
||||
return next_state(iq, QUERYTARGETS_STATE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
197
services/cache/dns.c
vendored
197
services/cache/dns.c
vendored
|
|
@ -131,18 +131,18 @@ copy_rrset(struct ub_packed_rrset_key* key, struct region* region,
|
|||
return ck;
|
||||
}
|
||||
|
||||
/** find closest NS and returns the rrset (locked) */
|
||||
/** find closest NS or DNAME and returns the rrset (locked) */
|
||||
static struct ub_packed_rrset_key*
|
||||
find_deleg_ns(struct module_env* env, uint8_t* qname, size_t qnamelen,
|
||||
uint16_t qclass, uint32_t now)
|
||||
find_closest_of_type(struct module_env* env, uint8_t* qname, size_t qnamelen,
|
||||
uint16_t qclass, uint32_t now, uint16_t searchtype)
|
||||
{
|
||||
struct ub_packed_rrset_key *rrset;
|
||||
uint8_t lablen;
|
||||
|
||||
/* snip off front part of qname until NS is found */
|
||||
/* snip off front part of qname until the type is found */
|
||||
while(qnamelen > 0) {
|
||||
if((rrset = rrset_cache_lookup(env->rrset_cache, qname,
|
||||
qnamelen, LDNS_RR_TYPE_NS, qclass, 0, now, 0)))
|
||||
qnamelen, searchtype, qclass, 0, now, 0)))
|
||||
return rrset;
|
||||
|
||||
/* snip off front label */
|
||||
|
|
@ -282,7 +282,8 @@ dns_cache_find_delegation(struct module_env* env, uint8_t* qname,
|
|||
struct delegpt* dp;
|
||||
uint32_t now = (uint32_t)time(NULL);
|
||||
|
||||
nskey = find_deleg_ns(env, qname, qnamelen, qclass, now);
|
||||
nskey = find_closest_of_type(env, qname, qnamelen, qclass, now,
|
||||
LDNS_RR_TYPE_NS);
|
||||
if(!nskey) /* hope the caller has hints to prime or something */
|
||||
return NULL;
|
||||
nsdata = (struct packed_rrset_data*)nskey->entry.data;
|
||||
|
|
@ -319,6 +320,30 @@ dns_cache_find_delegation(struct module_env* env, uint8_t* qname,
|
|||
|
||||
/** allocate dns_msg from query_info and reply_info */
|
||||
static struct dns_msg*
|
||||
gen_dns_msg(struct region* region, struct query_info* q, size_t num)
|
||||
{
|
||||
struct dns_msg* msg = (struct dns_msg*)region_alloc(region,
|
||||
sizeof(struct dns_msg));
|
||||
if(!msg)
|
||||
return NULL;
|
||||
memcpy(&msg->qinfo, q, sizeof(struct query_info));
|
||||
msg->qinfo.qname = region_alloc_init(region, q->qname, q->qname_len);
|
||||
if(!msg->qinfo.qname)
|
||||
return NULL;
|
||||
/* allocate replyinfo struct and rrset key array separately */
|
||||
msg->rep = (struct reply_info*)region_alloc(region,
|
||||
sizeof(struct reply_info) - sizeof(struct rrset_ref));
|
||||
if(!msg->rep)
|
||||
return NULL;
|
||||
msg->rep->rrsets = (struct ub_packed_rrset_key**)region_alloc(region,
|
||||
num * sizeof(struct ub_packed_rrset_key*));
|
||||
if(!msg->rep->rrsets)
|
||||
return NULL;
|
||||
return msg;
|
||||
}
|
||||
|
||||
/** generate dns_msg from cached message */
|
||||
static struct dns_msg*
|
||||
tomsg(struct module_env* env, struct msgreply_entry* e, struct reply_info* r,
|
||||
struct region* region, uint32_t now, struct region* scratch)
|
||||
{
|
||||
|
|
@ -326,25 +351,16 @@ tomsg(struct module_env* env, struct msgreply_entry* e, struct reply_info* r,
|
|||
size_t i;
|
||||
if(now > r->ttl)
|
||||
return NULL;
|
||||
msg = (struct dns_msg*)region_alloc(region, sizeof(struct dns_msg));
|
||||
msg = gen_dns_msg(region, &e->key, r->rrset_count);
|
||||
if(!msg)
|
||||
return NULL;
|
||||
memcpy(&msg->qinfo, &e->key, sizeof(struct query_info));
|
||||
msg->qinfo.qname = region_alloc_init(region, e->key.qname,
|
||||
e->key.qname_len);
|
||||
if(!msg->qinfo.qname)
|
||||
return NULL;
|
||||
/* allocate replyinfo struct and rrset key array separately */
|
||||
msg->rep = (struct reply_info*)region_alloc(region,
|
||||
sizeof(struct reply_info) - sizeof(struct rrset_ref));
|
||||
if(!msg->rep)
|
||||
return NULL;
|
||||
memcpy(msg->rep, r,
|
||||
sizeof(struct reply_info) - sizeof(struct rrset_ref));
|
||||
msg->rep->rrsets = (struct ub_packed_rrset_key**)region_alloc(region,
|
||||
msg->rep->rrset_count * sizeof(struct ub_packed_rrset_key*));
|
||||
if(!msg->rep->rrsets)
|
||||
return NULL;
|
||||
msg->rep->flags = r->flags;
|
||||
msg->rep->qdcount = r->qdcount;
|
||||
msg->rep->ttl = r->ttl;
|
||||
msg->rep->an_numrrsets = r->an_numrrsets;
|
||||
msg->rep->ns_numrrsets = r->ns_numrrsets;
|
||||
msg->rep->ar_numrrsets = r->ar_numrrsets;
|
||||
msg->rep->rrset_count = r->rrset_count;
|
||||
if(!rrset_array_lock(r->ref, r->rrset_count, now))
|
||||
return NULL;
|
||||
for(i=0; i<msg->rep->rrset_count; i++) {
|
||||
|
|
@ -359,6 +375,114 @@ tomsg(struct module_env* env, struct msgreply_entry* e, struct reply_info* r,
|
|||
return msg;
|
||||
}
|
||||
|
||||
/** synthesize CNAME response from cached CNAME item */
|
||||
static struct dns_msg*
|
||||
cname_msg(struct ub_packed_rrset_key* rrset, struct region* region,
|
||||
uint32_t now, struct query_info* q)
|
||||
{
|
||||
struct dns_msg* msg;
|
||||
struct packed_rrset_data* d = (struct packed_rrset_data*)
|
||||
rrset->entry.data;
|
||||
if(now > d->ttl)
|
||||
return NULL;
|
||||
msg = gen_dns_msg(region, q, 1); /* only the CNAME RRset */
|
||||
if(!msg)
|
||||
return NULL;
|
||||
msg->rep->flags = BIT_QR; /* reply, no AA, no error */
|
||||
msg->rep->qdcount = 1;
|
||||
msg->rep->ttl = d->ttl - now;
|
||||
msg->rep->an_numrrsets = 1;
|
||||
msg->rep->ns_numrrsets = 0;
|
||||
msg->rep->ar_numrrsets = 0;
|
||||
msg->rep->rrset_count = 1;
|
||||
msg->rep->rrsets[0] = copy_rrset(rrset, region, now);
|
||||
if(!msg->rep->rrsets[0]) /* copy CNAME */
|
||||
return NULL;
|
||||
return msg;
|
||||
}
|
||||
|
||||
/** synthesize DNAME+CNAME response from cached DNAME item */
|
||||
static struct dns_msg*
|
||||
synth_dname_msg(struct ub_packed_rrset_key* rrset, struct region* region,
|
||||
uint32_t now, struct query_info* q)
|
||||
{
|
||||
struct dns_msg* msg;
|
||||
struct ub_packed_rrset_key* ck;
|
||||
struct packed_rrset_data* newd, *d = (struct packed_rrset_data*)
|
||||
rrset->entry.data;
|
||||
uint8_t* newname, *dtarg = NULL;
|
||||
size_t newlen, dtarglen;
|
||||
if(now > d->ttl)
|
||||
return NULL;
|
||||
msg = gen_dns_msg(region, q, 2); /* DNAME + CNAME RRset */
|
||||
if(!msg)
|
||||
return NULL;
|
||||
msg->rep->flags = BIT_QR; /* reply, no AA, no error */
|
||||
msg->rep->qdcount = 1;
|
||||
msg->rep->ttl = d->ttl - now;
|
||||
msg->rep->an_numrrsets = 1;
|
||||
msg->rep->ns_numrrsets = 0;
|
||||
msg->rep->ar_numrrsets = 0;
|
||||
msg->rep->rrset_count = 1;
|
||||
msg->rep->rrsets[0] = copy_rrset(rrset, region, now);
|
||||
if(!msg->rep->rrsets[0]) /* copy DNAME */
|
||||
return NULL;
|
||||
/* synth CNAME rrset */
|
||||
get_cname_target(rrset, &dtarg, &dtarglen);
|
||||
if(!dtarg)
|
||||
return NULL;
|
||||
newlen = q->qname_len + dtarglen - rrset->rk.dname_len;
|
||||
if(newlen > LDNS_MAX_DOMAINLEN) {
|
||||
msg->rep->flags |= LDNS_RCODE_YXDOMAIN;
|
||||
return msg;
|
||||
}
|
||||
newname = (uint8_t*)region_alloc(region, newlen);
|
||||
if(!newname)
|
||||
return NULL;
|
||||
/* new name is concatenation of qname front (without DNAME owner)
|
||||
* and DNAME target name */
|
||||
memcpy(newname, q->qname, q->qname_len-rrset->rk.dname_len);
|
||||
memmove(newname+(q->qname_len-rrset->rk.dname_len), dtarg, dtarglen);
|
||||
/* create rest of CNAME rrset */
|
||||
ck = (struct ub_packed_rrset_key*)region_alloc(region,
|
||||
sizeof(struct ub_packed_rrset_key));
|
||||
if(!ck)
|
||||
return NULL;
|
||||
memset(&ck->entry, 0, sizeof(ck->entry));
|
||||
msg->rep->rrsets[1] = ck;
|
||||
ck->entry.key = ck;
|
||||
ck->rk.type = htons(LDNS_RR_TYPE_CNAME);
|
||||
ck->rk.rrset_class = rrset->rk.rrset_class;
|
||||
ck->rk.flags = 0;
|
||||
ck->rk.dname = region_alloc_init(region, q->qname, q->qname_len);
|
||||
if(!ck->rk.dname)
|
||||
return NULL;
|
||||
ck->rk.dname_len = q->qname_len;
|
||||
ck->entry.hash = rrset_key_hash(&ck->rk);
|
||||
newd = (struct packed_rrset_data*)region_alloc(region,
|
||||
sizeof(struct packed_rrset_data) + sizeof(size_t) +
|
||||
sizeof(uint8_t*) + sizeof(uint32_t) + sizeof(uint16_t)
|
||||
+ newlen);
|
||||
if(!newd)
|
||||
return NULL;
|
||||
ck->entry.data = newd;
|
||||
newd->ttl = 0; /* 0 for synthesized CNAME TTL */
|
||||
newd->count = 1;
|
||||
newd->rrsig_count = 0;
|
||||
newd->trust = rrset_trust_ans_noAA;
|
||||
newd->rr_len = (size_t*)((uint8_t*)newd +
|
||||
sizeof(struct packed_rrset_data));
|
||||
newd->rr_len[0] = newlen + sizeof(uint16_t);
|
||||
packed_rrset_ptr_fixup(newd);
|
||||
newd->rr_ttl[0] = newd->ttl;
|
||||
msg->rep->ttl = newd->ttl;
|
||||
ldns_write_uint16(newd->rr_data[0], newlen);
|
||||
memmove(newd->rr_data[0] + sizeof(uint16_t), newname, newlen);
|
||||
msg->rep->an_numrrsets ++;
|
||||
msg->rep->rrset_count ++;
|
||||
return msg;
|
||||
}
|
||||
|
||||
struct dns_msg*
|
||||
dns_cache_lookup(struct module_env* env,
|
||||
uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
|
||||
|
|
@ -368,6 +492,7 @@ dns_cache_lookup(struct module_env* env,
|
|||
struct query_info k;
|
||||
hashvalue_t h;
|
||||
uint32_t now = (uint32_t)time(NULL);
|
||||
struct ub_packed_rrset_key* rrset;
|
||||
|
||||
/* lookup first, this has both NXdomains and ANSWER responses */
|
||||
k.qname = qname;
|
||||
|
|
@ -389,8 +514,30 @@ dns_cache_lookup(struct module_env* env,
|
|||
lock_rw_unlock(&e->lock);
|
||||
}
|
||||
|
||||
/* see if we have CNAME for this domain TODO */
|
||||
/* or a DNAME exists. Check in RRset cache and synth a message. */
|
||||
/* see if a DNAME exists. Checked for first, to enforce that DNAMEs
|
||||
* are more important, the CNAME is resynthesized and thus
|
||||
* consistent with the DNAME */
|
||||
if( (rrset=find_closest_of_type(env, qname, qnamelen, qclass, now,
|
||||
LDNS_RR_TYPE_DNAME))) {
|
||||
/* synthesize a DNAME+CNAME message based on this */
|
||||
struct dns_msg* msg = synth_dname_msg(rrset, region, now, &k);
|
||||
if(msg) {
|
||||
lock_rw_unlock(&rrset->entry.lock);
|
||||
return msg;
|
||||
}
|
||||
lock_rw_unlock(&rrset->entry.lock);
|
||||
}
|
||||
|
||||
/* see if we have CNAME for this domain */
|
||||
if( (rrset=rrset_cache_lookup(env->rrset_cache, qname, qnamelen,
|
||||
LDNS_RR_TYPE_CNAME, qclass, 0, now, 0))) {
|
||||
struct dns_msg* msg = cname_msg(rrset, region, now, &k);
|
||||
if(msg) {
|
||||
lock_rw_unlock(&rrset->entry.lock);
|
||||
return msg;
|
||||
}
|
||||
lock_rw_unlock(&rrset->entry.lock);
|
||||
}
|
||||
|
||||
/* construct DS, DNSKEY messages from rrset cache. TODO */
|
||||
|
||||
|
|
|
|||
|
|
@ -197,7 +197,8 @@ get_cname_target(struct ub_packed_rrset_key* rrset, uint8_t** dname,
|
|||
{
|
||||
struct packed_rrset_data* d;
|
||||
size_t len;
|
||||
if(ntohs(rrset->rk.type) != LDNS_RR_TYPE_CNAME)
|
||||
if(ntohs(rrset->rk.type) != LDNS_RR_TYPE_CNAME &&
|
||||
ntohs(rrset->rk.type) != LDNS_RR_TYPE_DNAME)
|
||||
return;
|
||||
d = (struct packed_rrset_data*)rrset->entry.data;
|
||||
if(d->count < 1)
|
||||
|
|
|
|||
|
|
@ -309,6 +309,7 @@ void packed_rrset_ttl_add(struct packed_rrset_data* data, uint32_t add);
|
|||
* Failsafes; it will change passed dname to a valid dname or do nothing.
|
||||
* @param rrset: the rrset structure. Must be a CNAME.
|
||||
* Only first RR is used (multiple RRs are technically illegal anyway).
|
||||
* Also works on type DNAME. Returns target name.
|
||||
* @param dname: this pointer is updated to point into the cname rdata.
|
||||
* If a failsafe fails, nothing happens to the pointer (such as the
|
||||
* rdata was not a valid dname, not a CNAME, ...).
|
||||
|
|
|
|||
Loading…
Reference in a new issue