mirror of
https://github.com/NLnetLabs/unbound.git
synced 2025-12-20 23:00:56 -05:00
- Fixes to add integer overflow checks on allocation (defense in depth).
git-svn-id: file:///svn/unbound/trunk@3372 be551aaa-1e26-0410-a405-d3ace91eadb9
This commit is contained in:
parent
0a0b37be65
commit
6feb8fb6a5
12 changed files with 66 additions and 8 deletions
|
|
@ -223,6 +223,8 @@ copy_msg(struct regional* region, struct lruhash_entry* e,
|
||||||
struct query_info** k, struct reply_info** d)
|
struct query_info** k, struct reply_info** d)
|
||||||
{
|
{
|
||||||
struct reply_info* rep = (struct reply_info*)e->data;
|
struct reply_info* rep = (struct reply_info*)e->data;
|
||||||
|
if(rep->rrset_count > RR_COUNT_MAX)
|
||||||
|
return 0; /* to protect against integer overflow */
|
||||||
*d = (struct reply_info*)regional_alloc_init(region, e->data,
|
*d = (struct reply_info*)regional_alloc_init(region, e->data,
|
||||||
sizeof(struct reply_info) +
|
sizeof(struct reply_info) +
|
||||||
sizeof(struct rrset_ref) * (rep->rrset_count-1) +
|
sizeof(struct rrset_ref) * (rep->rrset_count-1) +
|
||||||
|
|
@ -470,6 +472,10 @@ load_rrset(SSL* ssl, sldns_buffer* buf, struct worker* worker)
|
||||||
log_warn("bad rrset without contents");
|
log_warn("bad rrset without contents");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
if(rr_count > RR_COUNT_MAX || rrsig_count > RR_COUNT_MAX) {
|
||||||
|
log_warn("bad rrset with too many rrs");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
d->count = (size_t)rr_count;
|
d->count = (size_t)rr_count;
|
||||||
d->rrsig_count = (size_t)rrsig_count;
|
d->rrsig_count = (size_t)rrsig_count;
|
||||||
d->security = (enum sec_status)security;
|
d->security = (enum sec_status)security;
|
||||||
|
|
@ -649,6 +655,10 @@ load_msg(SSL* ssl, sldns_buffer* buf, struct worker* worker)
|
||||||
rep.an_numrrsets = (size_t)an;
|
rep.an_numrrsets = (size_t)an;
|
||||||
rep.ns_numrrsets = (size_t)ns;
|
rep.ns_numrrsets = (size_t)ns;
|
||||||
rep.ar_numrrsets = (size_t)ar;
|
rep.ar_numrrsets = (size_t)ar;
|
||||||
|
if(an > RR_COUNT_MAX || ns > RR_COUNT_MAX || ar > RR_COUNT_MAX) {
|
||||||
|
log_warn("error too many rrsets");
|
||||||
|
return 0; /* protect against integer overflow in alloc */
|
||||||
|
}
|
||||||
rep.rrset_count = (size_t)an+(size_t)ns+(size_t)ar;
|
rep.rrset_count = (size_t)an+(size_t)ns+(size_t)ar;
|
||||||
rep.rrsets = (struct ub_packed_rrset_key**)regional_alloc_zero(
|
rep.rrsets = (struct ub_packed_rrset_key**)regional_alloc_zero(
|
||||||
region, sizeof(struct ub_packed_rrset_key*)*rep.rrset_count);
|
region, sizeof(struct ub_packed_rrset_key*)*rep.rrset_count);
|
||||||
|
|
|
||||||
|
|
@ -590,6 +590,10 @@ dns64_synth_aaaa_data(const struct ub_packed_rrset_key* fk,
|
||||||
* for the RRs themselves. Each RR has a length, TTL, pointer to wireformat
|
* for the RRs themselves. Each RR has a length, TTL, pointer to wireformat
|
||||||
* data, 2 bytes of data length, and 16 bytes of IPv6 address.
|
* data, 2 bytes of data length, and 16 bytes of IPv6 address.
|
||||||
*/
|
*/
|
||||||
|
if(fd->count > RR_COUNT_MAX) {
|
||||||
|
*dd_out = NULL;
|
||||||
|
return; /* integer overflow protection in alloc */
|
||||||
|
}
|
||||||
if (!(dd = *dd_out = regional_alloc(region,
|
if (!(dd = *dd_out = regional_alloc(region,
|
||||||
sizeof(struct packed_rrset_data)
|
sizeof(struct packed_rrset_data)
|
||||||
+ fd->count * (sizeof(size_t) + sizeof(time_t) +
|
+ fd->count * (sizeof(size_t) + sizeof(time_t) +
|
||||||
|
|
@ -713,6 +717,8 @@ dns64_adjust_a(int id, struct module_qstate* super, struct module_qstate* qstate
|
||||||
if(i<rep->an_numrrsets && fk->rk.type == htons(LDNS_RR_TYPE_A)) {
|
if(i<rep->an_numrrsets && fk->rk.type == htons(LDNS_RR_TYPE_A)) {
|
||||||
/* also sets dk->entry.hash */
|
/* also sets dk->entry.hash */
|
||||||
dns64_synth_aaaa_data(fk, fd, dk, &dd, super->region, dns64_env);
|
dns64_synth_aaaa_data(fk, fd, dk, &dd, super->region, dns64_env);
|
||||||
|
if(!dd)
|
||||||
|
return;
|
||||||
/* Delete negative AAAA record from cache stored by
|
/* Delete negative AAAA record from cache stored by
|
||||||
* the iterator module */
|
* the iterator module */
|
||||||
rrset_cache_remove(super->env->rrset_cache, dk->rk.dname,
|
rrset_cache_remove(super->env->rrset_cache, dk->rk.dname,
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,6 @@
|
||||||
|
20 March 2015: Wouter
|
||||||
|
- Fixed to add integer overflow checks on allocation (defense in depth).
|
||||||
|
|
||||||
19 March 2015: Wouter
|
19 March 2015: Wouter
|
||||||
- Add ip-transparent config option for bind to non-local addresses.
|
- Add ip-transparent config option for bind to non-local addresses.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -308,6 +308,8 @@ iter_prepend(struct iter_qstate* iq, struct dns_msg* msg,
|
||||||
if(num_an + num_ns == 0)
|
if(num_an + num_ns == 0)
|
||||||
return 1;
|
return 1;
|
||||||
verbose(VERB_ALGO, "prepending %d rrsets", (int)num_an + (int)num_ns);
|
verbose(VERB_ALGO, "prepending %d rrsets", (int)num_an + (int)num_ns);
|
||||||
|
if(num_an > RR_COUNT_MAX || num_ns > RR_COUNT_MAX ||
|
||||||
|
msg->rep->rrset_count > RR_COUNT_MAX) return 0; /* overflow */
|
||||||
sets = regional_alloc(region, (num_an+num_ns+msg->rep->rrset_count) *
|
sets = regional_alloc(region, (num_an+num_ns+msg->rep->rrset_count) *
|
||||||
sizeof(struct ub_packed_rrset_key*));
|
sizeof(struct ub_packed_rrset_key*));
|
||||||
if(!sets)
|
if(!sets)
|
||||||
|
|
@ -2549,6 +2551,12 @@ processClassResponse(struct module_qstate* qstate, int id,
|
||||||
/* copy appropriate rcode */
|
/* copy appropriate rcode */
|
||||||
to->rep->flags = from->rep->flags;
|
to->rep->flags = from->rep->flags;
|
||||||
/* copy rrsets */
|
/* copy rrsets */
|
||||||
|
if(from->rep->rrset_count > RR_COUNT_MAX ||
|
||||||
|
to->rep->rrset_count > RR_COUNT_MAX) {
|
||||||
|
log_err("malloc failed (too many rrsets) in collect ANY");
|
||||||
|
foriq->state = FINISHED_STATE;
|
||||||
|
return; /* integer overflow protection */
|
||||||
|
}
|
||||||
dest = regional_alloc(forq->region, sizeof(dest[0])*n);
|
dest = regional_alloc(forq->region, sizeof(dest[0])*n);
|
||||||
if(!dest) {
|
if(!dest) {
|
||||||
log_err("malloc failed in collect ANY");
|
log_err("malloc failed in collect ANY");
|
||||||
|
|
|
||||||
4
services/cache/dns.c
vendored
4
services/cache/dns.c
vendored
|
|
@ -366,6 +366,8 @@ dns_msg_create(uint8_t* qname, size_t qnamelen, uint16_t qtype,
|
||||||
sizeof(struct reply_info)-sizeof(struct rrset_ref));
|
sizeof(struct reply_info)-sizeof(struct rrset_ref));
|
||||||
if(!msg->rep)
|
if(!msg->rep)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
if(capacity > RR_COUNT_MAX)
|
||||||
|
return NULL; /* integer overflow protection */
|
||||||
msg->rep->flags = BIT_QR; /* with QR, no AA */
|
msg->rep->flags = BIT_QR; /* with QR, no AA */
|
||||||
msg->rep->qdcount = 1;
|
msg->rep->qdcount = 1;
|
||||||
msg->rep->rrsets = (struct ub_packed_rrset_key**)
|
msg->rep->rrsets = (struct ub_packed_rrset_key**)
|
||||||
|
|
@ -453,6 +455,8 @@ gen_dns_msg(struct regional* region, struct query_info* q, size_t num)
|
||||||
sizeof(struct reply_info) - sizeof(struct rrset_ref));
|
sizeof(struct reply_info) - sizeof(struct rrset_ref));
|
||||||
if(!msg->rep)
|
if(!msg->rep)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
if(num > RR_COUNT_MAX)
|
||||||
|
return NULL; /* integer overflow protection */
|
||||||
msg->rep->rrsets = (struct ub_packed_rrset_key**)
|
msg->rep->rrsets = (struct ub_packed_rrset_key**)
|
||||||
regional_alloc(region,
|
regional_alloc(region,
|
||||||
num * sizeof(struct ub_packed_rrset_key*));
|
num * sizeof(struct ub_packed_rrset_key*));
|
||||||
|
|
|
||||||
2
services/cache/rrset.c
vendored
2
services/cache/rrset.c
vendored
|
|
@ -304,7 +304,7 @@ rrset_array_unlock_touch(struct rrset_cache* r, struct regional* scratch,
|
||||||
{
|
{
|
||||||
hashvalue_t* h;
|
hashvalue_t* h;
|
||||||
size_t i;
|
size_t i;
|
||||||
if(!(h = (hashvalue_t*)regional_alloc(scratch,
|
if(count > RR_COUNT_MAX || !(h = (hashvalue_t*)regional_alloc(scratch,
|
||||||
sizeof(hashvalue_t)*count)))
|
sizeof(hashvalue_t)*count)))
|
||||||
log_warn("rrset LRU: memory allocation failed");
|
log_warn("rrset LRU: memory allocation failed");
|
||||||
else /* store hash values */
|
else /* store hash values */
|
||||||
|
|
|
||||||
|
|
@ -915,7 +915,10 @@ read_data_chunk(SSL* ssl, size_t len)
|
||||||
{
|
{
|
||||||
size_t got = 0;
|
size_t got = 0;
|
||||||
int r;
|
int r;
|
||||||
char* data = malloc(len+1);
|
char* data;
|
||||||
|
if(len >= 0xfffffff0)
|
||||||
|
return NULL; /* to protect against integer overflow in malloc*/
|
||||||
|
data = malloc(len+1);
|
||||||
if(!data) {
|
if(!data) {
|
||||||
if(verb) printf("out of memory\n");
|
if(verb) printf("out of memory\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
|
||||||
16
util/alloc.c
16
util/alloc.c
|
|
@ -367,8 +367,12 @@ void *unbound_stat_malloc(size_t size)
|
||||||
/** calloc with stats */
|
/** calloc with stats */
|
||||||
void *unbound_stat_calloc(size_t nmemb, size_t size)
|
void *unbound_stat_calloc(size_t nmemb, size_t size)
|
||||||
{
|
{
|
||||||
size_t s = (nmemb*size==0)?(size_t)1:nmemb*size;
|
size_t s;
|
||||||
void* res = calloc(1, s+16);
|
void* res;
|
||||||
|
if(INT_MAX/nmemb < size)
|
||||||
|
return NULL; /* integer overflow check */
|
||||||
|
s = (nmemb*size==0)?(size_t)1:nmemb*size;
|
||||||
|
res = calloc(1, s+16);
|
||||||
if(!res) return NULL;
|
if(!res) return NULL;
|
||||||
log_info("stat %p=calloc(%u, %u)", res+16, (unsigned)nmemb, (unsigned)size);
|
log_info("stat %p=calloc(%u, %u)", res+16, (unsigned)nmemb, (unsigned)size);
|
||||||
unbound_mem_alloc += s;
|
unbound_mem_alloc += s;
|
||||||
|
|
@ -503,8 +507,12 @@ void *unbound_stat_malloc_lite(size_t size, const char* file, int line,
|
||||||
void *unbound_stat_calloc_lite(size_t nmemb, size_t size, const char* file,
|
void *unbound_stat_calloc_lite(size_t nmemb, size_t size, const char* file,
|
||||||
int line, const char* func)
|
int line, const char* func)
|
||||||
{
|
{
|
||||||
size_t req = nmemb * size;
|
size_t req;
|
||||||
void* res = malloc(req+lite_pad*2+sizeof(size_t));
|
void* res;
|
||||||
|
if(INT_MAX/nmemb < size)
|
||||||
|
return NULL; /* integer overflow check */
|
||||||
|
req = nmemb * size;
|
||||||
|
res = malloc(req+lite_pad*2+sizeof(size_t));
|
||||||
if(!res) return NULL;
|
if(!res) return NULL;
|
||||||
memmove(res, lite_pre, lite_pad);
|
memmove(res, lite_pre, lite_pad);
|
||||||
memmove(res+lite_pad, &req, sizeof(size_t));
|
memmove(res+lite_pad, &req, sizeof(size_t));
|
||||||
|
|
|
||||||
|
|
@ -87,7 +87,7 @@ construct_reply_info_base(struct regional* region, uint16_t flags, size_t qd,
|
||||||
/* rrset_count-1 because the first ref is part of the struct. */
|
/* rrset_count-1 because the first ref is part of the struct. */
|
||||||
size_t s = sizeof(struct reply_info) - sizeof(struct rrset_ref) +
|
size_t s = sizeof(struct reply_info) - sizeof(struct rrset_ref) +
|
||||||
sizeof(struct ub_packed_rrset_key*) * total;
|
sizeof(struct ub_packed_rrset_key*) * total;
|
||||||
if(total >= 0xffffff) return NULL; /* sanity check on numRRS*/
|
if(total >= RR_COUNT_MAX) return NULL; /* sanity check on numRRS*/
|
||||||
if(region)
|
if(region)
|
||||||
rep = (struct reply_info*)regional_alloc(region, s);
|
rep = (struct reply_info*)regional_alloc(region, s);
|
||||||
else rep = (struct reply_info*)malloc(s +
|
else rep = (struct reply_info*)malloc(s +
|
||||||
|
|
@ -278,7 +278,11 @@ parse_create_rrset(sldns_buffer* pkt, struct rrset_parse* pset,
|
||||||
struct packed_rrset_data** data, struct regional* region)
|
struct packed_rrset_data** data, struct regional* region)
|
||||||
{
|
{
|
||||||
/* allocate */
|
/* allocate */
|
||||||
size_t s = sizeof(struct packed_rrset_data) +
|
size_t s;
|
||||||
|
if(pset->rr_count > RR_COUNT_MAX || pset->rrsig_count > RR_COUNT_MAX ||
|
||||||
|
pset->size > RR_COUNT_MAX)
|
||||||
|
return 0; /* protect against integer overflow */
|
||||||
|
s = sizeof(struct packed_rrset_data) +
|
||||||
(pset->rr_count + pset->rrsig_count) *
|
(pset->rr_count + pset->rrsig_count) *
|
||||||
(sizeof(size_t)+sizeof(uint8_t*)+sizeof(time_t)) +
|
(sizeof(size_t)+sizeof(uint8_t*)+sizeof(time_t)) +
|
||||||
pset->size;
|
pset->size;
|
||||||
|
|
|
||||||
|
|
@ -58,6 +58,12 @@ typedef uint64_t rrset_id_t;
|
||||||
* from the SOA in the answer section from a direct SOA query or ANY query. */
|
* from the SOA in the answer section from a direct SOA query or ANY query. */
|
||||||
#define PACKED_RRSET_SOA_NEG 0x4
|
#define PACKED_RRSET_SOA_NEG 0x4
|
||||||
|
|
||||||
|
/** number of rrs and rrsets for integer overflow protection. More than
|
||||||
|
* this is not really possible (64K packet has much less RRs and RRsets) in
|
||||||
|
* a message. And this is small enough that also multiplied there is no
|
||||||
|
* integer overflow. */
|
||||||
|
#define RR_COUNT_MAX 0xffffff
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The identifying information for an RRset.
|
* The identifying information for an RRset.
|
||||||
*/
|
*/
|
||||||
|
|
|
||||||
|
|
@ -1079,6 +1079,8 @@ int rrset_canonical_equal(struct regional* region,
|
||||||
fd.rr_data = fdata;
|
fd.rr_data = fdata;
|
||||||
rbtree_init(&sortree1, &canonical_tree_compare);
|
rbtree_init(&sortree1, &canonical_tree_compare);
|
||||||
rbtree_init(&sortree2, &canonical_tree_compare);
|
rbtree_init(&sortree2, &canonical_tree_compare);
|
||||||
|
if(d1->count > RR_COUNT_MAX || d2->count > RR_COUNT_MAX)
|
||||||
|
return 1; /* protection against integer overflow */
|
||||||
rrs1 = regional_alloc(region, sizeof(struct canon_rr)*d1->count);
|
rrs1 = regional_alloc(region, sizeof(struct canon_rr)*d1->count);
|
||||||
rrs2 = regional_alloc(region, sizeof(struct canon_rr)*d2->count);
|
rrs2 = regional_alloc(region, sizeof(struct canon_rr)*d2->count);
|
||||||
if(!rrs1 || !rrs2) return 1; /* alloc failure */
|
if(!rrs1 || !rrs2) return 1; /* alloc failure */
|
||||||
|
|
@ -1135,6 +1137,8 @@ rrset_canonical(struct regional* region, sldns_buffer* buf,
|
||||||
sizeof(rbtree_t));
|
sizeof(rbtree_t));
|
||||||
if(!*sortree)
|
if(!*sortree)
|
||||||
return 0;
|
return 0;
|
||||||
|
if(d->count > RR_COUNT_MAX)
|
||||||
|
return 0; /* integer overflow protection */
|
||||||
rrs = regional_alloc(region, sizeof(struct canon_rr)*d->count);
|
rrs = regional_alloc(region, sizeof(struct canon_rr)*d->count);
|
||||||
if(!rrs) {
|
if(!rrs) {
|
||||||
*sortree = NULL;
|
*sortree = NULL;
|
||||||
|
|
|
||||||
|
|
@ -226,6 +226,8 @@ val_new_getmsg(struct module_qstate* qstate, struct val_qstate* vq)
|
||||||
sizeof(struct reply_info) - sizeof(struct rrset_ref));
|
sizeof(struct reply_info) - sizeof(struct rrset_ref));
|
||||||
if(!vq->chase_reply)
|
if(!vq->chase_reply)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
if(vq->orig_msg->rep->rrset_count > RR_COUNT_MAX)
|
||||||
|
return NULL; /* protect against integer overflow */
|
||||||
vq->chase_reply->rrsets = regional_alloc_init(qstate->region,
|
vq->chase_reply->rrsets = regional_alloc_init(qstate->region,
|
||||||
vq->orig_msg->rep->rrsets, sizeof(struct ub_packed_rrset_key*)
|
vq->orig_msg->rep->rrsets, sizeof(struct ub_packed_rrset_key*)
|
||||||
* vq->orig_msg->rep->rrset_count);
|
* vq->orig_msg->rep->rrset_count);
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue