diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..63c390606 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,16 @@ +sudo: false +language: c +compiler: + - gcc +addons: + apt: + packages: + - libssl-dev + - libevent-dev + - libexpat-dev + - clang +script: + - ./configure --enable-debug --disable-flto + - make + - make test + - (cd testdata; ../testcode/mini_tdir.sh exe clang-analysis.tdir ; if grep -e "warning:" -e "error:" result.clang-analysis ; then exit 1; else exit 0; fi) diff --git a/config.h.in b/config.h.in index 7c3309683..74c14d161 100644 --- a/config.h.in +++ b/config.h.in @@ -86,6 +86,10 @@ if you don't. */ #undef HAVE_DECL_ARC4RANDOM_UNIFORM +/* Define to 1 if you have the declaration of `evsignal_assign', and to 0 if + you don't. */ +#undef HAVE_DECL_EVSIGNAL_ASSIGN + /* Define to 1 if you have the declaration of `inet_ntop', and to 0 if you don't. */ #undef HAVE_DECL_INET_NTOP @@ -166,6 +170,9 @@ /* Define to 1 if you have the `ERR_load_crypto_strings' function. */ #undef HAVE_ERR_LOAD_CRYPTO_STRINGS +/* Define to 1 if you have the `event_assign' function. */ +#undef HAVE_EVENT_ASSIGN + /* Define to 1 if you have the `event_base_free' function. */ #undef HAVE_EVENT_BASE_FREE diff --git a/configure b/configure index b4c91f8d5..777649c66 100755 --- a/configure +++ b/configure @@ -19013,6 +19013,35 @@ _ACEOF fi done # only in libev. (tested on 4.00) + for ac_func in event_assign +do : + ac_fn_c_check_func "$LINENO" "event_assign" "ac_cv_func_event_assign" +if test "x$ac_cv_func_event_assign" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_EVENT_ASSIGN 1 +_ACEOF + +fi +done + # in libevent, for thread-safety + ac_fn_c_check_decl "$LINENO" "evsignal_assign" "ac_cv_have_decl_evsignal_assign" "$ac_includes_default +#ifdef HAVE_EVENT_H +# include +#else +# include \"event2/event.h\" +#endif + +" +if test "x$ac_cv_have_decl_evsignal_assign" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_EVSIGNAL_ASSIGN $ac_have_decl +_ACEOF + PC_LIBEVENT_DEPENDENCY="libevent" if test -n "$BAK_LDFLAGS_SET"; then diff --git a/configure.ac b/configure.ac index ea1783b70..abbecf0ba 100644 --- a/configure.ac +++ b/configure.ac @@ -1200,6 +1200,14 @@ large outgoing port ranges. ]) AC_CHECK_FUNCS([event_base_get_method]) # only in libevent 1.4.3 and later AC_CHECK_FUNCS([ev_loop]) # only in libev. (tested on 3.51) AC_CHECK_FUNCS([ev_default_loop]) # only in libev. (tested on 4.00) + AC_CHECK_FUNCS([event_assign]) # in libevent, for thread-safety + AC_CHECK_DECLS([evsignal_assign], [], [], [AC_INCLUDES_DEFAULT +#ifdef HAVE_EVENT_H +# include +#else +# include "event2/event.h" +#endif + ]) PC_LIBEVENT_DEPENDENCY="libevent" AC_SUBST(PC_LIBEVENT_DEPENDENCY) if test -n "$BAK_LDFLAGS_SET"; then diff --git a/daemon/unbound.c b/daemon/unbound.c index 4a508c138..6cc8225f5 100644 --- a/daemon/unbound.c +++ b/daemon/unbound.c @@ -443,7 +443,8 @@ perform_setup(struct daemon* daemon, struct config_file* cfg, int debug_mode, } } #endif - if(cfg->tls_session_ticket_keys.first) { + if(cfg->tls_session_ticket_keys.first && + cfg->tls_session_ticket_keys.first->str[0] != 0) { if(!listen_sslctx_setup_ticket_keys(daemon->listen_sslctx, cfg->tls_session_ticket_keys.first)) { fatal_exit("could not set session ticket SSL_CTX"); } diff --git a/daemon/worker.c b/daemon/worker.c index 433b96fd5..fc93817a0 100644 --- a/daemon/worker.c +++ b/daemon/worker.c @@ -660,10 +660,7 @@ answer_from_cache(struct worker* worker, struct query_info* qinfo, if(!reply_check_cname_chain(qinfo, rep)) { /* cname chain invalid, redo iterator steps */ verbose(VERB_ALGO, "Cache reply: cname chain broken"); - bail_out: - rrset_array_unlock_touch(worker->env.rrset_cache, - worker->scratchpad, rep->ref, rep->rrset_count); - return 0; + goto bail_out; } } /* check security status of the cached answer */ @@ -758,6 +755,11 @@ answer_from_cache(struct worker* worker, struct query_info* qinfo, } /* go and return this buffer to the client */ return 1; + +bail_out: + rrset_array_unlock_touch(worker->env.rrset_cache, + worker->scratchpad, rep->ref, rep->rrset_count); + return 0; } /** Reply to client and perform prefetch to keep cache up to date. diff --git a/doc/Changelog b/doc/Changelog index 7a07b999a..5c06e446e 100644 --- a/doc/Changelog +++ b/doc/Changelog @@ -1,3 +1,72 @@ +25 April 2019: Wouter + - Fix wrong query name in local zone redirect answers with a CNAME, + the copy of the local alias is in unpacked form. + +18 April 2019: Ralph + - Scrub RRs from answer section when reusing NXDOMAIN message for + subdomain answers. + - For harden-below-nxdomain: do not consider a name to be non-exitent + when message contains a CNAME record. + +18 April 2019: Wouter + - travis build file. + +16 April 2019: Wouter + - Better braces in if statement in TCP fastopen code. + - iana portlist updated. + +15 April 2019: Wouter + - Fix tls write event for read state change to re-call SSL_write and + not resume the TLS handshake. + +11 April 2019: George + - Update python documentation for init_standard(). + - Typos. + +11 April 2019: Wouter + - Fix that auth zone uses correct network type for sockets for + SOA serial probes. This fixes that probes fail because earlier + probe addresses are unreachable. + - Fix that auth zone fails over to next master for timeout in tcp. + - Squelch SSL read and write connection reset by peer and broken pipe + messages. Verbosity 2 and higher enables them. + +8 April 2019: Wouter + - Fix to use event_assign with libevent for thread-safety. + - verbose information about auth zone lookup process, also lookup + start, timeout and fail. + - Fix #17: Add python module example from Jan Janak, that is a + plugin for the Unbound DNS resolver to resolve DNS records in + multicast DNS [RFC 6762] via Avahi. The plugin communicates + with Avahi via DBus. The comment section at the beginning of + the file contains detailed documentation. + - Fix to wipe ssl ticket keys from memory with explicit_bzero, + if available. + +5 April 2019: Wouter + - Fix to reinit event structure for accepted TCP (and TLS) sockets. + +4 April 2019: Wouter + - Fix spelling error in log output for event method. + +3 April 2019: Wouter + - Move goto label in answer_from_cache to the end of the function + where it is more visible. + - Fix auth-zone NSEC3 response for wildcard nodata answers, + include the closest encloser in the answer. + +2 April 2019: Wouter + - Fix auth-zone NSEC3 response for empty nonterminals with exact + match nsec3 records. + - Fix for out of bounds integers, thanks to OSTIF audit. It is in + allocation debug code. + - Fix for auth zone nsec3 ent fix for wildcard nodata. + +25 March 2019: Wouter + - Fix that tls-session-ticket-keys: "" on its own in unbound.conf + disables the tls session ticker key calls into the OpenSSL API. + - Fix crash if tls-servic-pem not filled in when necessary. + 21 March 2019: Wouter - Fix #4240: Fix whitespace cleanup in example.conf. diff --git a/doc/unbound.conf.5.in b/doc/unbound.conf.5.in index 326e6fcb2..c14ee2785 100644 --- a/doc/unbound.conf.5.in +++ b/doc/unbound.conf.5.in @@ -828,7 +828,7 @@ Can be given multiple times, for different domains. .TP .B qname\-minimisation: \fI Send minimum amount of information to upstream servers to enhance privacy. -Only sent minimum required labels of the QNAME and set QTYPE to A when +Only send minimum required labels of the QNAME and set QTYPE to A when possible. Best effort approach; full QNAME and original QTYPE will be sent when upstream replies with a RCODE other than NOERROR, except when receiving NXDOMAIN from a DNSSEC signed zone. Default is yes. diff --git a/iterator/iter_utils.c b/iterator/iter_utils.c index be7965a60..2ab55ceb4 100644 --- a/iterator/iter_utils.c +++ b/iterator/iter_utils.c @@ -1211,6 +1211,19 @@ iter_scrub_ds(struct dns_msg* msg, struct ub_packed_rrset_key* ns, uint8_t* z) } } +void +iter_scrub_nxdomain(struct dns_msg* msg) +{ + if(msg->rep->an_numrrsets == 0) + return; + + memmove(msg->rep->rrsets, msg->rep->rrsets+msg->rep->an_numrrsets, + sizeof(struct ub_packed_rrset_key*) * + (msg->rep->rrset_count-msg->rep->an_numrrsets)); + msg->rep->rrset_count -= msg->rep->an_numrrsets; + msg->rep->an_numrrsets = 0; +} + void iter_dec_attempts(struct delegpt* dp, int d) { struct delegpt_addr* a; diff --git a/iterator/iter_utils.h b/iterator/iter_utils.h index ccfb28022..f771930bb 100644 --- a/iterator/iter_utils.h +++ b/iterator/iter_utils.h @@ -334,6 +334,13 @@ int iter_get_next_root(struct iter_hints* hints, struct iter_forwards* fwd, void iter_scrub_ds(struct dns_msg* msg, struct ub_packed_rrset_key* ns, uint8_t* z); +/** + * Prepare an NXDOMAIN message to be used for a subdomain answer by removing all + * RRs from the ANSWER section. + * @param msg: the response to scrub. + */ +void iter_scrub_nxdomain(struct dns_msg* msg); + /** * Remove query attempts from all available ips. For 0x20. * @param dp: delegpt. diff --git a/iterator/iterator.c b/iterator/iterator.c index c73fb5177..c906c2714 100644 --- a/iterator/iterator.c +++ b/iterator/iterator.c @@ -2718,8 +2718,15 @@ processQueryResponse(struct module_qstate* qstate, struct iter_qstate* iq, && !(iq->chase_flags & BIT_RD)) { if(FLAGS_GET_RCODE(iq->response->rep->flags) != LDNS_RCODE_NOERROR) { - if(qstate->env->cfg->qname_minimisation_strict) - return final_state(iq); + if(qstate->env->cfg->qname_minimisation_strict) { + if(FLAGS_GET_RCODE(iq->response->rep->flags) == + LDNS_RCODE_NXDOMAIN) { + iter_scrub_nxdomain(iq->response); + return final_state(iq); + } + return error_response(qstate, id, + LDNS_RCODE_SERVFAIL); + } /* Best effort qname-minimisation. * Stop minimising and send full query when * RCODE is not NOERROR. */ diff --git a/pythonmod/doc/examples/example0-1.py b/pythonmod/doc/examples/example0-1.py index 5ae48d166..7904f73a5 100644 --- a/pythonmod/doc/examples/example0-1.py +++ b/pythonmod/doc/examples/example0-1.py @@ -1,8 +1,11 @@ - def init(id, cfg): log_info("pythonmod: init called, module id is %d port: %d script: %s" % (id, cfg.port, cfg.python_script)) return True +def init_standard(id, env): + log_info("pythonmod: init called, module id is %d port: %d script: %s" % (id, env.cfg.port, env.cfg.python_script)) + return True + def deinit(id): log_info("pythonmod: deinit called, module id is %d" % id) return True diff --git a/pythonmod/doc/examples/example0.rst b/pythonmod/doc/examples/example0.rst index 8fff41f33..693972a14 100644 --- a/pythonmod/doc/examples/example0.rst +++ b/pythonmod/doc/examples/example0.rst @@ -54,6 +54,25 @@ Script file must contain four compulsory functions: return True +.. function:: init_standard(id, env) + + Initialize module internals, like database etc. + Called just once on module load. + + *Preferred* over the init() function above as this function's signature is the + same as the C counterpart and allows for extra functionality during init. + The previously accessible configuration options can now be found in env.cfg. + + :param id: module identifier (integer) + :param env: :class:`module_env` module environment + +:: + + def init_standard(id, env): + log_info("pythonmod: init called, module id is %d port: %d script: %s" % (id, env.cfg.port, env.cfg.python_script)) + return True + + .. function:: deinit(id) Deinitialize module internals. diff --git a/pythonmod/examples/avahi-resolver.py b/pythonmod/examples/avahi-resolver.py new file mode 100644 index 000000000..b1d4e36fc --- /dev/null +++ b/pythonmod/examples/avahi-resolver.py @@ -0,0 +1,567 @@ +#!/usr/bin/env python3 +# +# A plugin for the Unbound DNS resolver to resolve DNS records in +# multicast DNS [RFC 6762] via Avahi. +# +# Copyright (C) 2018-2019 Internet Real-Time Lab, Columbia University +# http://www.cs.columbia.edu/irt/ +# +# Written by Jan Janak +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation files +# (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, +# publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, +# subject to the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# +# +# Dependendies: +# Unbound with pythonmodule configured for Python 3 +# dnspython [http://www.dnspython.org] +# pydbus [https://github.com/LEW21/pydbus] +# +# To enable Python 3 support, configure Unbound as follows: +# PYTHON_VERSION=3 ./configure --with-pythonmodule +# +# The plugin in meant to be used as a fallback resolver that resolves +# records in multicast DNS if the upstream server cannot be reached or +# provides no answer (NXDOMAIN). +# +# mDNS requests for negative records, i.e., records for which Avahi +# returns no answer (NXDOMAIN), are expensive. Since there is no +# single authoritative server in mDNS, such requests terminate only +# via a timeout. The timeout is about a second (if MDNS_TIMEOUT is not +# configured), or the value configured via MDNS_TIMEOUT. The +# corresponding Unbound thread will be blocked for this amount of +# time. For this reason, it is important to configure an appropriate +# number of threads in unbound.conf and limit the RR types and names +# that will be resolved via Avahi via the environment variables +# described later. +# +# An example unbound.conf with the plugin enabled: +# +# | server: +# | module-config: "validator python iterator" +# | num-threads: 32 +# | cache-max-negative-ttl: 60 +# | cache-max-ttl: 60 +# +# +# The plugin can also be run interactively. Provide the name and +# record type to be resolved as command line arguments and the +# resolved record will be printed to standard output: +# +# $ ./avahi-resolver.py voip-phx4.phxnet.org A +# voip-phx4.phxnet.org. 120 IN A 10.4.3.2 +# +# +# The behavior of the plugin can be controlled via the following +# environment variables: +# +# DBUS_SYSTEM_BUS_ADDRESS +# +# The address of the system DBus bus, in the format expected by DBus, +# e.g., unix:path=/run/avahi/system-bus.sock +# +# +# DEBUG +# +# Set this environment variable to "yes", "true", "on", or "1" to +# enable debugging. In debugging mode, the plugin will output a lot +# more information about what it is doing either to the standard +# output (when run interactively) or to Unbound via log_info and +# log_error. +# +# By default debugging is disabled. +# +# +# MDNS_TTL +# +# Avahi does not provide the TTL value for the records it returns. +# This environment variable can be used to configure the TTL value for +# such records. +# +# The default value is 120 seconds. +# +# +# MDNS_TIMEOUT +# +# The maximum amount of time (in milliseconds) an Avahi request is +# allowed to run. This value sets the time it takes to resolve +# negative (non-existent) records in Avahi. If unset, the request +# terminates when Avahi sends the "AllForNow" signal, telling the +# client that more records are unlikely to arrive. This takes roughly +# about one second. You may need to configure a longer value here on +# slower networks, e.g., networks that relay mDNS packets such as +# MANETs. +# +# +# MDNS_GETONE +# +# If set to "true", "1", or "on", an Avahi request will terminate as +# soon as at least one record has been found. If there are multiple +# nodes in the mDNS network publishing the same record, only one (or +# subset) will be returned. +# +# If set to "false", "0", or "off", the plugin will gather records for +# MDNS_TIMEOUT and return all records found. This is only useful in +# networks where multiple nodes are known to publish different records +# under the same name and the client needs to be able to obtain them +# all. When configured this way, all Avahi requests will always take +# MDNS_TIMEOUT to complete! +# +# This option is set to true by default. +# +# +# MDNS_REJECT_TYPES +# +# A comma-separated list of record types that will NOT be resolved in +# mDNS via Avahi. Use this environment variable to prevent specific +# record types from being resolved via Avahi. For example, if your +# network does not support IPv6, you can put AAAA on this list. +# +# The default value is an empty list. +# +# Example: MDNS_REJECT_TYPES=aaaa,mx,soa +# +# +# MDNS_ACCEPT_TYPES +# +# If set, a record type will be resolved via Avahi if and only if it +# is present on this comma-separated list. In other words, this is a +# whitelist. +# +# The default value is an empty list which means all record types will +# be resolved via Avahi. +# +# Example: MDNS_ACCEPT_TYPES=a,ptr,txt,srv,aaaa,cname +# +# +# MDNS_REJECT_NAMES +# +# If the name being resolved matches the regular expression in this +# environment variable, the name will NOT be resolved via Avahi. In +# other words, this environment variable provides a blacklist. +# +# The default value is empty--no names will be reject. +# +# Example: MDNS_REJECT_NAMES=(^|\.)example\.com\.$ +# +# +# MDNS_ACCEPT_NAMES +# +# If set to a regular expression, a name will be resolved via Avahi if +# and only if it matches the regular expression. In other words, this +# variable provides a whitelist. +# +# The default value is empty--all names will be resolved via Avahi. +# +# Example: MDNS_ACCEPT_NAMES=^.*\.example\.com\.$ +# + +import os +import re +import array +import threading +import traceback +import dns.rdata +import dns.rdatatype +import dns.rdataclass +from queue import Queue +from gi.repository import GLib +from pydbus import SystemBus + + +IF_UNSPEC = -1 +PROTO_UNSPEC = -1 + +sysbus = None +avahi = None +trampoline = dict() +thread_local = threading.local() +dbus_thread = None +dbus_loop = None + + +def str2bool(v): + if v.lower() in ['false', 'no', '0', 'off', '']: + return False + return True + + +def dbg(msg): + if DEBUG != False: + log_info('avahi-resolver: %s' % msg) + + +# +# Although pydbus has an internal facility for handling signals, we +# cannot use that with Avahi. When responding from an internal cache, +# Avahi sends the first signal very quickly, before pydbus has had a +# chance to subscribe for the signal. This will result in lost signal +# and missed data: +# +# https://github.com/LEW21/pydbus/issues/87 +# +# As a workaround, we subscribe to all signals before creating a +# record browser and do our own signal matching and dispatching via +# the following function. +# +def signal_dispatcher(connection, sender, path, interface, name, args): + o = trampoline.get(path, None) + if o is None: + return + + if name == 'ItemNew': o.itemNew(*args) + elif name == 'ItemRemove': o.itemRemove(*args) + elif name == 'AllForNow': o.allForNow(*args) + elif name == 'Failure': o.failure(*args) + + +class RecordBrowser: + def __init__(self, callback, name, type_, timeout=None, getone=True): + self.callback = callback + self.records = [] + self.error = None + self.getone = getone + + self.timer = None if timeout is None else GLib.timeout_add(timeout, self.timedOut) + + self.browser_path = avahi.RecordBrowserNew(IF_UNSPEC, PROTO_UNSPEC, name, dns.rdataclass.IN, type_, 0) + trampoline[self.browser_path] = self + self.browser = sysbus.get('.Avahi', self.browser_path) + self.dbg('Created RecordBrowser(name=%s, type=%s, getone=%s, timeout=%s)' + % (name, dns.rdatatype.to_text(type_), getone, timeout)) + + def dbg(self, msg): + dbg('[%s] %s' % (self.browser_path, msg)) + + def _done(self): + del trampoline[self.browser_path] + self.dbg('Freeing') + self.browser.Free() + + if self.timer is not None: + self.dbg('Removing timer') + GLib.source_remove(self.timer) + + self.callback(self.records, self.error) + + def itemNew(self, interface, protocol, name, class_, type_, rdata, flags): + self.dbg('Got signal ItemNew') + self.records.append((name, class_, type_, rdata)) + if self.getone: + self._done() + + def itemRemove(self, interface, protocol, name, class_, type_, rdata, flags): + self.dbg('Got signal ItemRemove') + self.records.remove((name, class_, type_, rdata)) + + def failure(self, error): + self.dbg('Got signal Failure') + self.error = Exception(error) + self._done() + + def allForNow(self): + self.dbg('Got signal AllForNow') + if self.timer is None: + self._done() + + def timedOut(self): + self.dbg('Timed out') + self._done() + return False + + +# +# This function runs the main event loop for DBus (GLib). This +# function must be run in a dedicated worker thread. +# +def dbus_main(): + global sysbus, avahi, dbus_loop + + dbg('Connecting to system DBus') + sysbus = SystemBus() + + dbg('Subscribing to .Avahi.RecordBrowser signals') + sysbus.con.signal_subscribe('org.freedesktop.Avahi', + 'org.freedesktop.Avahi.RecordBrowser', + None, None, None, 0, signal_dispatcher) + + avahi = sysbus.get('.Avahi', '/') + + dbg("Connected to Avahi Daemon: %s (API %s) [%s]" + % (avahi.GetVersionString(), avahi.GetAPIVersion(), avahi.GetHostNameFqdn())) + + dbg('Starting DBus main loop') + dbus_loop = GLib.MainLoop() + dbus_loop.run() + + +# +# This function must be run in the DBus worker thread. It creates a +# new RecordBrowser instance and once it has finished doing it thing, +# it will send the result back to the original thread via the queue. +# +def start_resolver(queue, *args, **kwargs): + try: + RecordBrowser(lambda *v: queue.put_nowait(v), *args, **kwargs) + except Exception as e: + queue.put_nowait((None, e)) + + return False + + +# +# To resolve a request, we setup a queue, post a task to the DBus +# worker thread, and wait for the result (or error) to arrive over the +# queue. If the worker thread reports an error, raise the error as an +# exception. +# +def resolve(*args, **kwargs): + try: + queue = thread_local.queue + except AttributeError: + dbg('Creating new per-thread queue') + queue = Queue() + thread_local.queue = queue + + GLib.idle_add(lambda: start_resolver(queue, *args, **kwargs)) + + records, error = queue.get() + queue.task_done() + + if error is not None: + raise error + + return records + + +def parse_type_list(lst): + return list(map(dns.rdatatype.from_text, [v.strip() for v in lst.split(',') if len(v)])) + + +def init(*args, **kwargs): + global dbus_thread, DEBUG + global MDNS_TTL, MDNS_GETONE, MDNS_TIMEOUT + global MDNS_REJECT_TYPES, MDNS_ACCEPT_TYPES + global MDNS_REJECT_NAMES, MDNS_ACCEPT_NAMES + + DEBUG = str2bool(os.environ.get('DEBUG', str(False))) + + MDNS_TTL = int(os.environ.get('MDNS_TTL', 120)) + dbg("TTL for records from Avahi: %d" % MDNS_TTL) + + MDNS_REJECT_TYPES = parse_type_list(os.environ.get('MDNS_REJECT_TYPES', '')) + if MDNS_REJECT_TYPES: + dbg('Types NOT resolved via Avahi: %s' % MDNS_REJECT_TYPES) + + MDNS_ACCEPT_TYPES = parse_type_list(os.environ.get('MDNS_ACCEPT_TYPES', '')) + if MDNS_ACCEPT_TYPES: + dbg('ONLY resolving the following types via Avahi: %s' % MDNS_ACCEPT_TYPES) + + v = os.environ.get('MDNS_REJECT_NAMES', None) + MDNS_REJECT_NAMES = re.compile(v, flags=re.I | re.S) if v is not None else None + if MDNS_REJECT_NAMES is not None: + dbg('Names NOT resolved via Avahi: %s' % MDNS_REJECT_NAMES.pattern) + + v = os.environ.get('MDNS_ACCEPT_NAMES', None) + MDNS_ACCEPT_NAMES = re.compile(v, flags=re.I | re.S) if v is not None else None + if MDNS_ACCEPT_NAMES is not None: + dbg('ONLY resolving the following names via Avahi: %s' % MDNS_ACCEPT_NAMES.pattern) + + v = os.environ.get('MDNS_TIMEOUT', None) + MDNS_TIMEOUT = int(v) if v is not None else None + if MDNS_TIMEOUT is not None: + dbg('Avahi request timeout: %s' % MDNS_TIMEOUT) + + MDNS_GETONE = str2bool(os.environ.get('MDNS_GETONE', str(True))) + dbg('Terminate Avahi requests on first record: %s' % MDNS_GETONE) + + dbus_thread = threading.Thread(target=dbus_main) + dbus_thread.daemon = True + dbus_thread.start() + + +def deinit(*args, **kwargs): + dbus_loop.quit() + dbus_thread.join() + return True + + +def inform_super(id, qstate, superqstate, qdata): + return True + + +def get_rcode(msg): + if not msg: + return RCODE_SERVFAIL + + return msg.rep.flags & 0xf + + +def rr2text(rec, ttl): + name, class_, type_, rdata = rec + wire = array.array('B', rdata).tostring() + return '%s. %d %s %s %s' % ( + name, + ttl, + dns.rdataclass.to_text(class_), + dns.rdatatype.to_text(type_), + dns.rdata.from_wire(class_, type_, wire, 0, len(wire), None)) + + +def operate(id, event, qstate, qdata): + qi = qstate.qinfo + name = qi.qname_str + type_ = qi.qtype + type_str = dns.rdatatype.to_text(type_) + class_ = qi.qclass + class_str = dns.rdataclass.to_text(class_) + rc = get_rcode(qstate.return_msg) + + if event == MODULE_EVENT_NEW or event == MODULE_EVENT_PASS: + qstate.ext_state[id] = MODULE_WAIT_MODULE + return True + + if event != MODULE_EVENT_MODDONE: + log_err("avahi-resolver: Unexpected event %d" % event) + qstate.ext_state[id] = MODULE_ERROR + return True + + qstate.ext_state[id] = MODULE_FINISHED + + # Only resolve via Avahi if we got NXDOMAIn from the upstream DNS + # server, or if we could not reach the upstream DNS server. If we + # got some records for the name from the upstream DNS server + # already, do not resolve the record in Avahi. + if rc != RCODE_NXDOMAIN and rc != RCODE_SERVFAIL: + return True + + dbg("Got request for '%s %s %s'" % (name, class_str, type_str)) + + # Avahi only supports the IN class + if class_ != RR_CLASS_IN: + dbg('Rejected, Avahi only supports the IN class') + return True + + # Avahi does not support meta queries (e.g., ANY) + if dns.rdatatype.is_metatype(type_): + dbg('Rejected, Avahi does not support the type %s' % type_str) + return True + + # If we have a type blacklist and the requested type is on the + # list, reject it. + if MDNS_REJECT_TYPES and type_ in MDNS_REJECT_TYPES: + dbg('Rejected, type %s is on the blacklist' % type_str) + return True + + # If we have a type whitelist and if the requested type is not on + # the list, reject it. + if MDNS_ACCEPT_TYPES and type_ not in MDNS_ACCEPT_TYPES: + dbg('Rejected, type %s is not on the whitelist' % type_str) + return True + + # If we have a name blacklist and if the requested name matches + # the blacklist, reject it. + if MDNS_REJECT_NAMES is not None: + if MDNS_REJECT_NAMES.search(name): + dbg('Rejected, name %s is on the blacklist' % name) + return True + + # If we have a name whitelist and if the requested name does not + # match the whitelist, reject it. + if MDNS_ACCEPT_NAMES is not None: + if not MDNS_ACCEPT_NAMES.search(name): + dbg('Rejected, name %s is not on the whitelist' % name) + return True + + dbg("Resolving '%s %s %s' via Avahi" % (name, class_str, type_str)) + + recs = resolve(name, type_, getone=MDNS_GETONE, timeout=MDNS_TIMEOUT) + + if not recs: + dbg('Result: Not found (NXDOMAIN)') + qstate.return_rcode = RCODE_NXDOMAIN + return True + + m = DNSMessage(name, type_, class_, PKT_QR | PKT_RD | PKT_RA) + for r in recs: + s = rr2text(r, MDNS_TTL) + dbg('Result: %s' % s) + m.answer.append(s) + + if not m.set_return_msg(qstate): + raise Exception("Error in set_return_msg") + + if not storeQueryInCache(qstate, qstate.return_msg.qinfo, qstate.return_msg.rep, 0): + raise Exception("Error in storeQueryInCache") + + qstate.return_msg.rep.security = 2 + qstate.return_rcode = RCODE_NOERROR + return True + + +# +# It does not appear to be sufficient to check __name__ to determine +# whether we are being run in interactive mode. As a workaround, try +# to import module unboundmodule and if that fails, assume we're being +# run in interactive mode. +# +try: + import unboundmodule + embedded = True +except ImportError: + embedded = False + +if __name__ == '__main__' and not embedded: + import sys + + def log_info(msg): + print(msg) + + def log_err(msg): + print('ERROR: %s' % msg, file=sys.stderr) + + if len(sys.argv) != 3: + print('Usage: %s ' % sys.argv[0]) + sys.exit(2) + + name = sys.argv[1] + type_str = sys.argv[2] + + try: + type_ = dns.rdatatype.from_text(type_str) + except dns.rdatatype.UnknownRdatatype: + log_err('Unsupported DNS record type "%s"' % type_str) + sys.exit(2) + + if dns.rdatatype.is_metatype(type_): + log_err('Meta record type "%s" cannot be resolved via Avahi' % type_str) + sys.exit(2) + + init() + try: + recs = resolve(name, type_, getone=MDNS_GETONE, timeout=MDNS_TIMEOUT) + if not len(recs): + print('%s not found (NXDOMAIN)' % name) + sys.exit(1) + + for r in recs: + print(rr2text(r, MDNS_TTL)) + finally: + deinit() diff --git a/pythonmod/examples/edns.py b/pythonmod/examples/edns.py index 37ce9e279..ca1bb8da7 100644 --- a/pythonmod/examples/edns.py +++ b/pythonmod/examples/edns.py @@ -78,7 +78,7 @@ def init_standard(id, env): extra functionality during init. ..note:: This function is preferred by unbound over the old init function. ..note:: The previously accessible configuration options can now be found in - env.cgf. + env.cfg. """ log_info("python: inited script {}".format(env.cfg.python_script)) diff --git a/pythonmod/examples/inplace_callbacks.py b/pythonmod/examples/inplace_callbacks.py index 776add8c2..768c2d013 100644 --- a/pythonmod/examples/inplace_callbacks.py +++ b/pythonmod/examples/inplace_callbacks.py @@ -275,7 +275,7 @@ def init_standard(id, env): ..note:: This function is preferred by unbound over the old init function. ..note:: The previously accessible configuration options can now be found in - env.cgf. + env.cfg. """ log_info("python: inited script {}".format(env.cfg.python_script)) diff --git a/services/authzone.c b/services/authzone.c index 2649867ed..cabb00a84 100644 --- a/services/authzone.c +++ b/services/authzone.c @@ -2042,11 +2042,13 @@ auth_xfer_delete(struct auth_xfer* xfr) if(xfr->task_probe) { auth_free_masters(xfr->task_probe->masters); comm_point_delete(xfr->task_probe->cp); + comm_timer_delete(xfr->task_probe->timer); free(xfr->task_probe); } if(xfr->task_transfer) { auth_free_masters(xfr->task_transfer->masters); comm_point_delete(xfr->task_transfer->cp); + comm_timer_delete(xfr->task_transfer->timer); if(xfr->task_transfer->chunks_first) { auth_chunks_delete(xfr->task_transfer); } @@ -2746,6 +2748,7 @@ az_nsec3_insert(struct auth_zone* z, struct regional* region, * that is an exact match that should exist for it. * If that does not exist, a higher exact match + nxproof is enabled * (for some sort of opt-out empty nonterminal cases). + * nodataproof: search for exact match and include that instead. * ceproof: include ce proof NSEC3 (omitted for wildcard replies). * nxproof: include denial of the qname. * wcproof: include denial of wildcard (wildcard.ce). @@ -2753,7 +2756,8 @@ az_nsec3_insert(struct auth_zone* z, struct regional* region, static int az_add_nsec3_proof(struct auth_zone* z, struct regional* region, struct dns_msg* msg, uint8_t* cenm, size_t cenmlen, uint8_t* qname, - size_t qname_len, int ceproof, int nxproof, int wcproof) + size_t qname_len, int nodataproof, int ceproof, int nxproof, + int wcproof) { int algo; size_t iter, saltlen; @@ -2764,6 +2768,19 @@ az_add_nsec3_proof(struct auth_zone* z, struct regional* region, /* find parameters of nsec3 proof */ if(!az_nsec3_param(z, &algo, &iter, &salt, &saltlen)) return 1; /* no nsec3 */ + if(nodataproof) { + /* see if the node has a hash of itself for the nodata + * proof nsec3, this has to be an exact match nsec3. */ + struct auth_data* match; + match = az_nsec3_find_exact(z, qname, qname_len, algo, + iter, salt, saltlen); + if(match) { + if(!az_nsec3_insert(z, region, msg, match)) + return 0; + /* only nodata NSEC3 needed, no CE or others. */ + return 1; + } + } /* find ce that has an NSEC3 */ if(ceproof) { node = az_nsec3_find_ce(z, &cenm, &cenmlen, &no_exact_ce, @@ -2916,7 +2933,7 @@ az_generate_notype_answer(struct auth_zone* z, struct regional* region, /* DNSSEC denial NSEC3 */ if(!az_add_nsec3_proof(z, region, msg, node->name, node->namelen, msg->qinfo.qname, - msg->qinfo.qname_len, 1, 0, 0)) + msg->qinfo.qname_len, 1, 1, 0, 0)) return 0; } return 1; @@ -2943,7 +2960,7 @@ az_generate_referral_answer(struct auth_zone* z, struct regional* region, } else { if(!az_add_nsec3_proof(z, region, msg, ce->name, ce->namelen, msg->qinfo.qname, - msg->qinfo.qname_len, 1, 0, 0)) + msg->qinfo.qname_len, 1, 1, 0, 0)) return 0; } } @@ -2982,6 +2999,7 @@ az_generate_wildcard_answer(struct auth_zone* z, struct query_info* qinfo, struct auth_data* wildcard, struct auth_data* node) { struct auth_rrset* rrset, *nsec; + int insert_ce = 0; if((rrset=az_domain_rrset(wildcard, qinfo->qtype)) != NULL) { /* wildcard has type, add it */ if(!msg_add_rrset_an(z, region, msg, wildcard, rrset)) @@ -3008,6 +3026,10 @@ az_generate_wildcard_answer(struct auth_zone* z, struct query_info* qinfo, /* call other notype routine for dnssec notype denials */ if(!az_generate_notype_answer(z, region, msg, wildcard)) return 0; + /* because the notype, there is no positive data with an + * RRSIG that indicates the wildcard position. Thus the + * wildcard qname denial needs to have a CE nsec3. */ + insert_ce = 1; } /* ce and node for dnssec denial of wildcard original name */ @@ -3019,7 +3041,7 @@ az_generate_wildcard_answer(struct auth_zone* z, struct query_info* qinfo, dname_remove_label(&wildup, &wilduplen); if(!az_add_nsec3_proof(z, region, msg, wildup, wilduplen, msg->qinfo.qname, - msg->qinfo.qname_len, 0, 1, 0)) + msg->qinfo.qname_len, 0, insert_ce, 1, 0)) return 0; } @@ -3045,7 +3067,7 @@ az_generate_nxdomain_answer(struct auth_zone* z, struct regional* region, } else if(ce) { if(!az_add_nsec3_proof(z, region, msg, ce->name, ce->namelen, msg->qinfo.qname, - msg->qinfo.qname_len, 1, 1, 1)) + msg->qinfo.qname_len, 0, 1, 1, 1)) return 0; } return 1; @@ -4953,6 +4975,9 @@ xfr_process_chunk_list(struct auth_xfer* xfr, struct module_env* env, static void xfr_transfer_disown(struct auth_xfer* xfr) { + /* remove timer (from this worker's event base) */ + comm_timer_delete(xfr->task_transfer->timer); + xfr->task_transfer->timer = NULL; /* remove the commpoint */ comm_point_delete(xfr->task_transfer->cp); xfr->task_transfer->cp = NULL; @@ -5035,6 +5060,8 @@ xfr_transfer_init_fetch(struct auth_xfer* xfr, struct module_env* env) socklen_t addrlen = 0; struct auth_master* master = xfr->task_transfer->master; char *auth_name = NULL; + struct timeval t; + int timeout; if(!master) return 0; if(master->allow_notify) return 0; /* only for notify */ @@ -5060,25 +5087,46 @@ xfr_transfer_init_fetch(struct auth_xfer* xfr, struct module_env* env) comm_point_delete(xfr->task_transfer->cp); xfr->task_transfer->cp = NULL; } + if(!xfr->task_transfer->timer) { + xfr->task_transfer->timer = comm_timer_create(env->worker_base, + auth_xfer_transfer_timer_callback, xfr); + if(!xfr->task_transfer->timer) { + log_err("malloc failure"); + return 0; + } + } + timeout = AUTH_TRANSFER_TIMEOUT; +#ifndef S_SPLINT_S + t.tv_sec = timeout/1000; + t.tv_usec = (timeout%1000)*1000; +#endif if(master->http) { /* perform http fetch */ /* store http port number into sockaddr, * unless someone used unbound's host@port notation */ + xfr->task_transfer->on_ixfr = 0; if(strchr(master->host, '@') == NULL) sockaddr_store_port(&addr, addrlen, master->port); xfr->task_transfer->cp = outnet_comm_point_for_http( env->outnet, auth_xfer_transfer_http_callback, xfr, - &addr, addrlen, AUTH_TRANSFER_TIMEOUT, master->ssl, - master->host, master->file); + &addr, addrlen, -1, master->ssl, master->host, + master->file); if(!xfr->task_transfer->cp) { - char zname[255+1]; + char zname[255+1], as[256]; dname_str(xfr->name, zname); + addr_to_str(&addr, addrlen, as, sizeof(as)); verbose(VERB_ALGO, "cannot create http cp " - "connection for %s to %s", zname, - master->host); + "connection for %s to %s", zname, as); return 0; } + comm_timer_set(xfr->task_transfer->timer, &t); + if(verbosity >= VERB_ALGO) { + char zname[255+1], as[256]; + dname_str(xfr->name, zname); + addr_to_str(&addr, addrlen, as, sizeof(as)); + verbose(VERB_ALGO, "auth zone %s transfer next HTTP fetch from %s started", zname, as); + } return 1; } @@ -5092,15 +5140,24 @@ xfr_transfer_init_fetch(struct auth_xfer* xfr, struct module_env* env) /* connect on fd */ xfr->task_transfer->cp = outnet_comm_point_for_tcp(env->outnet, auth_xfer_transfer_tcp_callback, xfr, &addr, addrlen, - env->scratch_buffer, AUTH_TRANSFER_TIMEOUT, + env->scratch_buffer, -1, auth_name != NULL, auth_name); if(!xfr->task_transfer->cp) { - char zname[255+1]; - dname_str(xfr->name, zname); + char zname[255+1], as[256]; + dname_str(xfr->name, zname); + addr_to_str(&addr, addrlen, as, sizeof(as)); verbose(VERB_ALGO, "cannot create tcp cp connection for " - "xfr %s to %s", zname, master->host); + "xfr %s to %s", zname, as); return 0; } + comm_timer_set(xfr->task_transfer->timer, &t); + if(verbosity >= VERB_ALGO) { + char zname[255+1], as[256]; + dname_str(xfr->name, zname); + addr_to_str(&addr, addrlen, as, sizeof(as)); + verbose(VERB_ALGO, "auth zone %s transfer next %s fetch from %s started", zname, + (xfr->task_transfer->on_ixfr?"IXFR":"AXFR"), as); + } return 1; } @@ -5118,6 +5175,11 @@ xfr_transfer_nexttarget_or_end(struct auth_xfer* xfr, struct module_env* env) * and we may then get an instant cache response, * and that calls the callback just like a full * lookup and lookup failures also call callback */ + if(verbosity >= VERB_ALGO) { + char zname[255+1]; + dname_str(xfr->name, zname); + verbose(VERB_ALGO, "auth zone %s transfer next target lookup", zname); + } lock_basic_unlock(&xfr->lock); return; } @@ -5136,6 +5198,11 @@ xfr_transfer_nexttarget_or_end(struct auth_xfer* xfr, struct module_env* env) /* failed to fetch, next master */ xfr_transfer_nextmaster(xfr); } + if(verbosity >= VERB_ALGO) { + char zname[255+1]; + dname_str(xfr->name, zname); + verbose(VERB_ALGO, "auth zone %s transfer failed, wait", zname); + } /* we failed to fetch the zone, move to wait task * use the shorter retry timeout */ @@ -5233,7 +5300,25 @@ void auth_xfer_transfer_lookup_callback(void* arg, int rcode, sldns_buffer* buf, if(answer) { xfr_master_add_addrs(xfr->task_transfer-> lookup_target, answer, wanted_qtype); + } else { + if(verbosity >= VERB_ALGO) { + char zname[255+1]; + dname_str(xfr->name, zname); + verbose(VERB_ALGO, "auth zone %s host %s type %s transfer lookup has nodata", zname, xfr->task_transfer->lookup_target->host, (xfr->task_transfer->lookup_aaaa?"AAAA":"A")); + } } + } else { + if(verbosity >= VERB_ALGO) { + char zname[255+1]; + dname_str(xfr->name, zname); + verbose(VERB_ALGO, "auth zone %s host %s type %s transfer lookup has no answer", zname, xfr->task_transfer->lookup_target->host, (xfr->task_transfer->lookup_aaaa?"AAAA":"A")); + } + } + } else { + if(verbosity >= VERB_ALGO) { + char zname[255+1]; + dname_str(xfr->name, zname); + verbose(VERB_ALGO, "auth zone %s host %s type %s transfer lookup failed", zname, xfr->task_transfer->lookup_target->host, (xfr->task_transfer->lookup_aaaa?"AAAA":"A")); } } if(xfr->task_transfer->lookup_target->list && @@ -5618,6 +5703,46 @@ process_list_end_transfer(struct auth_xfer* xfr, struct module_env* env) xfr_transfer_nexttarget_or_end(xfr, env); } +/** callback for the task_transfer timer */ +void +auth_xfer_transfer_timer_callback(void* arg) +{ + struct auth_xfer* xfr = (struct auth_xfer*)arg; + struct module_env* env; + int gonextonfail = 1; + log_assert(xfr->task_transfer); + lock_basic_lock(&xfr->lock); + env = xfr->task_transfer->env; + if(env->outnet->want_to_quit) { + lock_basic_unlock(&xfr->lock); + return; /* stop on quit */ + } + + verbose(VERB_ALGO, "xfr stopped, connection timeout to %s", + xfr->task_transfer->master->host); + + /* see if IXFR caused the failure, if so, try AXFR */ + if(xfr->task_transfer->on_ixfr) { + xfr->task_transfer->ixfr_possible_timeout_count++; + if(xfr->task_transfer->ixfr_possible_timeout_count >= + NUM_TIMEOUTS_FALLBACK_IXFR) { + verbose(VERB_ALGO, "xfr to %s, fallback " + "from IXFR to AXFR (because of timeouts)", + xfr->task_transfer->master->host); + xfr->task_transfer->ixfr_fail = 1; + gonextonfail = 0; + } + } + + /* delete transferred data from list */ + auth_chunks_delete(xfr->task_transfer); + comm_point_delete(xfr->task_transfer->cp); + xfr->task_transfer->cp = NULL; + if(gonextonfail) + xfr_transfer_nextmaster(xfr); + xfr_transfer_nexttarget_or_end(xfr, env); +} + /** callback for task_transfer tcp connections */ int auth_xfer_transfer_tcp_callback(struct comm_point* c, void* arg, int err, @@ -5634,6 +5759,8 @@ auth_xfer_transfer_tcp_callback(struct comm_point* c, void* arg, int err, lock_basic_unlock(&xfr->lock); return 0; /* stop on quit */ } + /* stop the timer */ + comm_timer_disable(xfr->task_transfer->timer); if(err != NETEVENT_NOERROR) { /* connection failed, closed, or timeout */ @@ -5714,6 +5841,8 @@ auth_xfer_transfer_http_callback(struct comm_point* c, void* arg, int err, return 0; /* stop on quit */ } verbose(VERB_ALGO, "auth zone transfer http callback"); + /* stop the timer */ + comm_timer_disable(xfr->task_transfer->timer); if(err != NETEVENT_NOERROR && err != NETEVENT_DONE) { /* connection failed, closed, or timeout */ @@ -5853,14 +5982,26 @@ xfr_probe_send_probe(struct auth_xfer* xfr, struct module_env* env, xfr->task_probe->id = (uint16_t)(ub_random(env->rnd)&0xffff); xfr_create_soa_probe_packet(xfr, env->scratch_buffer, xfr->task_probe->id); + /* we need to remove the cp if we have a different ip4/ip6 type now */ + if(xfr->task_probe->cp && + ((xfr->task_probe->cp_is_ip6 && !addr_is_ip6(&addr, addrlen)) || + (!xfr->task_probe->cp_is_ip6 && addr_is_ip6(&addr, addrlen))) + ) { + comm_point_delete(xfr->task_probe->cp); + xfr->task_probe->cp = NULL; + } if(!xfr->task_probe->cp) { + if(addr_is_ip6(&addr, addrlen)) + xfr->task_probe->cp_is_ip6 = 1; + else xfr->task_probe->cp_is_ip6 = 0; xfr->task_probe->cp = outnet_comm_point_for_udp(env->outnet, auth_xfer_probe_udp_callback, xfr, &addr, addrlen); if(!xfr->task_probe->cp) { - char zname[255+1]; + char zname[255+1], as[256]; dname_str(xfr->name, zname); + addr_to_str(&addr, addrlen, as, sizeof(as)); verbose(VERB_ALGO, "cannot create udp cp for " - "probe %s to %s", zname, master->host); + "probe %s to %s", zname, as); return 0; } } @@ -5876,12 +6017,20 @@ xfr_probe_send_probe(struct auth_xfer* xfr, struct module_env* env, /* send udp packet */ if(!comm_point_send_udp_msg(xfr->task_probe->cp, env->scratch_buffer, (struct sockaddr*)&addr, addrlen)) { - char zname[255+1]; + char zname[255+1], as[256]; dname_str(xfr->name, zname); + addr_to_str(&addr, addrlen, as, sizeof(as)); verbose(VERB_ALGO, "failed to send soa probe for %s to %s", - zname, master->host); + zname, as); return 0; } + if(verbosity >= VERB_ALGO) { + char zname[255+1], as[256]; + dname_str(xfr->name, zname); + addr_to_str(&addr, addrlen, as, sizeof(as)); + verbose(VERB_ALGO, "auth zone %s soa probe sent to %s", zname, + as); + } xfr->task_probe->timeout = timeout; #ifndef S_SPLINT_S t.tv_sec = timeout/1000; @@ -5906,6 +6055,11 @@ auth_xfer_probe_timer_callback(void* arg) return; /* stop on quit */ } + if(verbosity >= VERB_ALGO) { + char zname[255+1]; + dname_str(xfr->name, zname); + verbose(VERB_ALGO, "auth zone %s soa probe timeout", zname); + } if(xfr->task_probe->timeout <= AUTH_PROBE_TIMEOUT_STOP) { /* try again with bigger timeout */ if(xfr_probe_send_probe(xfr, env, xfr->task_probe->timeout*2)) { @@ -6093,6 +6247,11 @@ xfr_probe_send_or_end(struct auth_xfer* xfr, struct module_env* env) * and we may then get an instant cache response, * and that calls the callback just like a full * lookup and lookup failures also call callback */ + if(verbosity >= VERB_ALGO) { + char zname[255+1]; + dname_str(xfr->name, zname); + verbose(VERB_ALGO, "auth zone %s probe next target lookup", zname); + } lock_basic_unlock(&xfr->lock); return; } @@ -6101,9 +6260,19 @@ xfr_probe_send_or_end(struct auth_xfer* xfr, struct module_env* env) /* probe of list has ended. Create or refresh the list of of * allow_notify addrs */ probe_copy_masters_for_allow_notify(xfr); + if(verbosity >= VERB_ALGO) { + char zname[255+1]; + dname_str(xfr->name, zname); + verbose(VERB_ALGO, "auth zone %s probe: notify addrs updated", zname); + } if(xfr->task_probe->only_lookup) { /* only wanted lookups for copy, stop probe and start wait */ xfr->task_probe->only_lookup = 0; + if(verbosity >= VERB_ALGO) { + char zname[255+1]; + dname_str(xfr->name, zname); + verbose(VERB_ALGO, "auth zone %s probe: finished only_lookup", zname); + } xfr_probe_disown(xfr); if(xfr->task_nextprobe->worker == NULL) xfr_set_timeout(xfr, env, 0, 0); @@ -6125,13 +6294,22 @@ xfr_probe_send_or_end(struct auth_xfer* xfr, struct module_env* env) /* done with probe sequence, wait */ if(xfr->task_probe->have_new_lease) { /* if zone not updated, start the wait timer again */ - verbose(VERB_ALGO, "auth_zone unchanged, new lease, wait"); + if(verbosity >= VERB_ALGO) { + char zname[255+1]; + dname_str(xfr->name, zname); + verbose(VERB_ALGO, "auth_zone %s unchanged, new lease, wait", zname); + } xfr_probe_disown(xfr); if(xfr->have_zone) xfr->lease_time = *env->now; if(xfr->task_nextprobe->worker == NULL) xfr_set_timeout(xfr, env, 0, 0); } else { + if(verbosity >= VERB_ALGO) { + char zname[255+1]; + dname_str(xfr->name, zname); + verbose(VERB_ALGO, "auth zone %s soa probe failed, wait to retry", zname); + } /* we failed to send this as well, move to the wait task, * use the shorter retry timeout */ xfr_probe_disown(xfr); @@ -6176,7 +6354,25 @@ void auth_xfer_probe_lookup_callback(void* arg, int rcode, sldns_buffer* buf, if(answer) { xfr_master_add_addrs(xfr->task_probe-> lookup_target, answer, wanted_qtype); + } else { + if(verbosity >= VERB_ALGO) { + char zname[255+1]; + dname_str(xfr->name, zname); + verbose(VERB_ALGO, "auth zone %s host %s type %s probe lookup has nodata", zname, xfr->task_probe->lookup_target->host, (xfr->task_probe->lookup_aaaa?"AAAA":"A")); + } } + } else { + if(verbosity >= VERB_ALGO) { + char zname[255+1]; + dname_str(xfr->name, zname); + verbose(VERB_ALGO, "auth zone %s host %s type %s probe lookup has no address", zname, xfr->task_probe->lookup_target->host, (xfr->task_probe->lookup_aaaa?"AAAA":"A")); + } + } + } else { + if(verbosity >= VERB_ALGO) { + char zname[255+1]; + dname_str(xfr->name, zname); + verbose(VERB_ALGO, "auth zone %s host %s type %s probe lookup failed", zname, xfr->task_probe->lookup_target->host, (xfr->task_probe->lookup_aaaa?"AAAA":"A")); } } if(xfr->task_probe->lookup_target->list && diff --git a/services/authzone.h b/services/authzone.h index 4706803a8..a695bd029 100644 --- a/services/authzone.h +++ b/services/authzone.h @@ -327,6 +327,8 @@ struct auth_probe { /** the SOA probe udp event. * on the workers event base. */ struct comm_point* cp; + /** is the cp for ip6 or ip4 */ + int cp_is_ip6; /** timeout for packets. * on the workers event base. */ struct comm_timer* timer; @@ -398,6 +400,9 @@ struct auth_transfer { /** the transfer (TCP) to the master. * on the workers event base. */ struct comm_point* cp; + /** timeout for the transfer. + * on the workers event base. */ + struct comm_timer* timer; }; /** list of addresses */ @@ -647,6 +652,8 @@ int auth_xfer_transfer_http_callback(struct comm_point* c, void* arg, int err, struct comm_reply* repinfo); /** xfer probe timeout callback, part of task_probe */ void auth_xfer_probe_timer_callback(void* arg); +/** xfer transfer timeout callback, part of task_transfer */ +void auth_xfer_transfer_timer_callback(void* arg); /** mesh callback for task_probe on lookup of host names */ void auth_xfer_probe_lookup_callback(void* arg, int rcode, struct sldns_buffer* buf, enum sec_status sec, char* why_bogus, diff --git a/services/cache/dns.c b/services/cache/dns.c index 088efbcf8..aa4efec73 100644 --- a/services/cache/dns.c +++ b/services/cache/dns.c @@ -40,6 +40,7 @@ */ #include "config.h" #include "iterator/iter_delegpt.h" +#include "iterator/iter_utils.h" #include "validator/val_nsec.h" #include "validator/val_utils.h" #include "services/cache/dns.h" @@ -914,12 +915,15 @@ dns_cache_lookup(struct module_env* env, struct dns_msg* msg; if(FLAGS_GET_RCODE(data->flags) == LDNS_RCODE_NXDOMAIN && data->security == sec_status_secure + && (data->an_numrrsets == 0 || + ntohs(data->rrsets[0]->rk.type) != LDNS_RR_TYPE_CNAME) && (msg=tomsg(env, &k, data, region, now, scratch))){ lock_rw_unlock(&e->lock); msg->qinfo.qname=qname; msg->qinfo.qname_len=qnamelen; /* check that DNSSEC really works out */ msg->rep->security = sec_status_unchecked; + iter_scrub_nxdomain(msg); return msg; } lock_rw_unlock(&e->lock); diff --git a/services/listen_dnsport.c b/services/listen_dnsport.c index e74d1abcf..b73a0cc95 100644 --- a/services/listen_dnsport.c +++ b/services/listen_dnsport.c @@ -851,13 +851,16 @@ create_tcp_accept_sock(struct addrinfo *addr, int v6only, int* noproto, #ifdef ENOPROTOOPT /* squelch ENOPROTOOPT: freebsd server mode with kernel support disabled, except when verbosity enabled for debugging */ - if(errno != ENOPROTOOPT || verbosity >= 3) + if(errno != ENOPROTOOPT || verbosity >= 3) { #endif if(errno == EPERM) { log_warn("Setting TCP Fast Open as server failed: %s ; this could likely be because sysctl net.inet.tcp.fastopen.enabled, net.inet.tcp.fastopen.server_enable, or net.ipv4.tcp_fastopen is disabled", strerror(errno)); } else { log_err("Setting TCP Fast Open as server failed: %s", strerror(errno)); } +#ifdef ENOPROTOOPT + } +#endif } #endif return s; diff --git a/services/mesh.c b/services/mesh.c index bee0f76a4..d96289e3a 100644 --- a/services/mesh.c +++ b/services/mesh.c @@ -1341,21 +1341,14 @@ int mesh_state_add_reply(struct mesh_state* s, struct edns_data* edns, log_assert(qinfo->local_alias->rrset->rk.dname == sldns_buffer_at(rep->c->buffer, LDNS_HEADER_SIZE)); - d = regional_alloc_init(s->s.region, dsrc, - sizeof(struct packed_rrset_data) - + sizeof(size_t) + sizeof(uint8_t*) + sizeof(time_t)); + /* the rrset is not packed, like in the cache, but it is + * individualy allocated with an allocator from localzone. */ + d = regional_alloc_zero(s->s.region, sizeof(*d)); if(!d) return 0; r->local_alias->rrset->entry.data = d; - d->rr_len = (size_t*)((uint8_t*)d + - sizeof(struct packed_rrset_data)); - d->rr_data = (uint8_t**)&(d->rr_len[1]); - d->rr_ttl = (time_t*)&(d->rr_data[1]); - d->rr_len[0] = dsrc->rr_len[0]; - d->rr_ttl[0] = dsrc->rr_ttl[0]; - d->rr_data[0] = regional_alloc_init(s->s.region, - dsrc->rr_data[0], d->rr_len[0]); - if(!d->rr_data[0]) + if(!rrset_insert_rr(s->s.region, d, dsrc->rr_data[0], + dsrc->rr_len[0], dsrc->rr_ttl[0], "CNAME local alias")) return 0; } else r->local_alias = NULL; diff --git a/testdata/auth_nsec3_ent.rpl b/testdata/auth_nsec3_ent.rpl new file mode 100644 index 000000000..5730c8d52 --- /dev/null +++ b/testdata/auth_nsec3_ent.rpl @@ -0,0 +1,224 @@ +; config options +server: + target-fetch-policy: "0 0 0 0 0" + +auth-zone: + name: "unbound-auth-test.nlnetlabs.nl." + ## zonefile (or none). + ## zonefile: "example.com.zone" + ## master by IP address or hostname + ## can list multiple masters, each on one line. + ## master: + ## url for http fetch + ## url: + ## queries from downstream clients get authoritative answers. + ## for-downstream: yes + for-downstream: yes + ## queries are used to fetch authoritative answers from this zone, + ## instead of unbound itself sending queries there. + ## for-upstream: yes + for-upstream: yes + ## on failures with for-upstream, fallback to sending queries to + ## the authority servers + ## fallback-enabled: no + + ## this line generates zonefile: \n"/tmp/xxx.example.com"\n + zonefile: +TEMPFILE_NAME unbound-auth-test.nlnetlabs.nl + ## this is the inline file /tmp/xxx.unbound-auth-test.nlnetlabs.nl + ## the tempfiles are deleted when the testrun is over. +TEMPFILE_CONTENTS unbound-auth-test.nlnetlabs.nl +;; Zone: unbound-auth-test.nlnetlabs.nl. +; +unbound-auth-test.nlnetlabs.nl. 3600 IN SOA ns.nlnetlabs.nl. ralph.nlnetlabs.nl. 1554201247 14400 3600 604800 3600 +unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG SOA 13 3 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. NLFcC2oet+HC+1dhT4D/2JJFIcMiRtTM81KwvT7u8ybF3iDE4bnyrILvQk8DsizpYKwk+D3J3tMC3TV5+//qFw== +; +unbound-auth-test.nlnetlabs.nl. 3600 IN NS ns.nlnetlabs.nl. +unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG NS 13 3 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. Gm0UF77ljiInG4/HZ6Tkzx7z9N45WwwmbBt9KxeN3z1BkdBLiy10Du71ZBFLP71b+USs1rv5SJQ0hteZFbl8sg== +unbound-auth-test.nlnetlabs.nl. 3600 IN DNSKEY 256 3 13 S3Da9HqpFj0pEbI8WXOdkvN3vgZ6qxNSz4XyKkmWWAG28kq5T+/lWp36DUDvnMI9wJNuixzUHtgZ6oZoAaVrPg== ;{id = 15486 (zsk), size = 256b} +unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG DNSKEY 13 3 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. 1cLFaDb6kP8KnRJujW1ieHUdS5Tgdv59TCZ+FloCRJMJBwQAow6UKAIY7HHlTb8IHTajyUrjlxX/dN8S/5VwuA== +unbound-auth-test.nlnetlabs.nl. 3600 IN NSEC3PARAM 1 0 1 - +unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG NSEC3PARAM 13 3 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. GWgtJArNpfJ4ifoinUBUVRTlkk0CMemdozhMKY13dk3EQMP0jb4g49PcTAgEP2dBUs9efttQVQQpmFPyTGfN1w== +tvdhfml24jp7cott1qijj9812qu9ibh3.unbound-auth-test.nlnetlabs.nl. 3600 IN NSEC3 1 0 1 - 41pcah2j3fr8k99gj5pveh4igrjfc871 NS SOA RRSIG DNSKEY NSEC3PARAM ;{ flags: -, from: unbound-auth-test.nlnetlabs.nl. to: b.b.unbound-auth-test.nlnetlabs.nl.} +tvdhfml24jp7cott1qijj9812qu9ibh3.unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG NSEC3 13 4 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. DzwQTaZj4j29eHXEKllIFcq4yNWA7VMqkh8+gCrBO+GEek9+hGxL6ANsU0Hv6glyBmPDeYUZcy4xy0EEj1R4hQ== +; +;; Empty nonterminal: b.unbound-auth-test.nlnetlabs.nl. +apejmh1fqds9gir0nnsf4d5gtno10tg1.unbound-auth-test.nlnetlabs.nl. 3600 IN NSEC3 1 0 1 - dbs0aj50410urbvt3ghfr644n7h06gs5 ;{ flags: -, from: b.unbound-auth-test.nlnetlabs.nl. to: c.b.unbound-auth-test.nlnetlabs.nl.} +apejmh1fqds9gir0nnsf4d5gtno10tg1.unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG NSEC3 13 4 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. m9B0W8xDZF6ml/m8OujrZZBiF1O0wAeKciK/5FMT/hCjHR0hMrbXBPg/ZntpVJD/Pko2HcBvWKu87U721yTHyQ== +; +;; Empty nonterminal: a.b.unbound-auth-test.nlnetlabs.nl. +toqivctpt4pdcp5g19neqt19fvtgbgeu.unbound-auth-test.nlnetlabs.nl. 3600 IN NSEC3 1 0 1 - tvdhfml24jp7cott1qijj9812qu9ibh3 ;{ flags: -, from: a.b.unbound-auth-test.nlnetlabs.nl. to: unbound-auth-test.nlnetlabs.nl.} +toqivctpt4pdcp5g19neqt19fvtgbgeu.unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG NSEC3 13 4 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. Jr1oPPs+DGBVV13n4gG4AGVFsleItluLbtCIyQDcYZEA+e5JMkrLzfW3rXqXaUSUauR4iEu5FmTfs4GTsumdUw== +; +*.a.b.unbound-auth-test.nlnetlabs.nl. 3600 IN TXT "*.a.b" +*.a.b.unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG TXT 13 5 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. NrMUaNzZp88lXit/HLL/iDBHspDSfoM//K+/0VwUYRZjmVJQQHCHtHBGgR4NgrLi3ffvCAWq2LNGxDm+YMSl3g== +jrtu61ssgd18lfjglqrbbs5b2vmbh6cl.unbound-auth-test.nlnetlabs.nl. 3600 IN NSEC3 1 0 1 - k8r2bchsbehs5dbu5d6ivjfnmjb3jc8s TXT RRSIG ;{ flags: -, from: *.a.b.unbound-auth-test.nlnetlabs.nl. to: *.c.b.unbound-auth-test.nlnetlabs.nl.} +jrtu61ssgd18lfjglqrbbs5b2vmbh6cl.unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG NSEC3 13 4 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. kLIhE9+iz1OybJwXbtRJZst+Mk5u4OAtpZGWSwJUfqD6dXAk+h6msKAR18jpPeL7cCjXjIAKIv3x4oYRkl+uKw== +; +;; Empty nonterminal: b.b.unbound-auth-test.nlnetlabs.nl. +41pcah2j3fr8k99gj5pveh4igrjfc871.unbound-auth-test.nlnetlabs.nl. 3600 IN NSEC3 1 0 1 - apejmh1fqds9gir0nnsf4d5gtno10tg1 ;{ flags: -, from: b.b.unbound-auth-test.nlnetlabs.nl. to: b.unbound-auth-test.nlnetlabs.nl.} +41pcah2j3fr8k99gj5pveh4igrjfc871.unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG NSEC3 13 4 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. XlIjnuF313w0GXn6vymrAcsyuxZSaN6IShFjxQ5T2HUFePHBNvtRkL+TtMQZNlR8nTR3+MWcON0cOZIGjVCCjg== +; +*.b.b.unbound-auth-test.nlnetlabs.nl. 3600 IN TXT "*.b.b" +*.b.b.unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG TXT 13 5 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. FkS3ceWpoHyOKaa8OtywIl148Bwo0vkzBd263vqYe0puhuRa6IvNEk5ERdwfWt9eNEq+6IlizPT/dYxA2fXYXA== +ft7dasbom0copm9e2ak9k151dj08kjfs.unbound-auth-test.nlnetlabs.nl. 3600 IN NSEC3 1 0 1 - jrtu61ssgd18lfjglqrbbs5b2vmbh6cl TXT RRSIG ;{ flags: -, from: *.b.b.unbound-auth-test.nlnetlabs.nl. to: *.a.b.unbound-auth-test.nlnetlabs.nl.} +ft7dasbom0copm9e2ak9k151dj08kjfs.unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG NSEC3 13 4 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. 5QhLGohTRLQSGC8vstzDjqcwfrbOnLUG2OelSjvsZFy1smsWUxJBCQXQdx1+JX7xamZHlZESQtS+cELuZUqpvA== +; +;; Empty nonterminal: c.b.unbound-auth-test.nlnetlabs.nl. +dbs0aj50410urbvt3ghfr644n7h06gs5.unbound-auth-test.nlnetlabs.nl. 3600 IN NSEC3 1 0 1 - ft7dasbom0copm9e2ak9k151dj08kjfs ;{ flags: -, from: c.b.unbound-auth-test.nlnetlabs.nl. to: *.b.b.unbound-auth-test.nlnetlabs.nl.} +dbs0aj50410urbvt3ghfr644n7h06gs5.unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG NSEC3 13 4 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. hjk1foJWW68JK3O1Ktf0ZogoXVrMDw3mHVBBYTrpaBKX1gWR5icmJiOCYZWYx3z88PUnGkfH+kx4oDUjioqN+Q== +; +*.c.b.unbound-auth-test.nlnetlabs.nl. 3600 IN TXT "*.c.b" +*.c.b.unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG TXT 13 5 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. b7rFR5tlx5Y5SQqNdYBtfD6DrkNx9h79GCmnZfWrUzRz+A256k2v08IPRJDK+WxEHuYHjfNnVWxjRr9M1OW2Iw== +k8r2bchsbehs5dbu5d6ivjfnmjb3jc8s.unbound-auth-test.nlnetlabs.nl. 3600 IN NSEC3 1 0 1 - toqivctpt4pdcp5g19neqt19fvtgbgeu TXT RRSIG ;{ flags: -, from: *.c.b.unbound-auth-test.nlnetlabs.nl. to: a.b.unbound-auth-test.nlnetlabs.nl.} +k8r2bchsbehs5dbu5d6ivjfnmjb3jc8s.unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG NSEC3 13 4 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. 34BS1ajedCNdfXgUfxTyiAK1ichfFLshhJ3TnfplrUps0UsZaQLEG+EIlP4wTBtro2c6V8YCSmOuxuce4gYoDw== +; +TEMPFILE_END + +stub-zone: + name: "." + stub-addr: 193.0.14.129 # K.ROOT-SERVERS.NET. +CONFIG_END + +SCENARIO_BEGIN Test authority zone with NSEC3 empty nonterminal +; with exact match NSEC3 in existence (eg. not a CE-proof) + +; K.ROOT-SERVERS.NET. +RANGE_BEGIN 0 100 + ADDRESS 193.0.14.129 +ENTRY_BEGIN +MATCH opcode qtype qname +ADJUST copy_id +REPLY QR NOERROR +SECTION QUESTION +. IN NS +SECTION ANSWER +. IN NS K.ROOT-SERVERS.NET. +SECTION ADDITIONAL +K.ROOT-SERVERS.NET. IN A 193.0.14.129 +ENTRY_END + +ENTRY_BEGIN +MATCH opcode subdomain +ADJUST copy_id copy_query +REPLY QR NOERROR +SECTION QUESTION +com. IN NS +SECTION AUTHORITY +com. IN NS a.gtld-servers.net. +SECTION ADDITIONAL +a.gtld-servers.net. IN A 192.5.6.30 +ENTRY_END +RANGE_END + +; a.gtld-servers.net. +RANGE_BEGIN 0 100 + ADDRESS 192.5.6.30 +ENTRY_BEGIN +MATCH opcode qtype qname +ADJUST copy_id +REPLY QR NOERROR +SECTION QUESTION +com. IN NS +SECTION ANSWER +com. IN NS a.gtld-servers.net. +SECTION ADDITIONAL +a.gtld-servers.net. IN A 192.5.6.30 +ENTRY_END + +ENTRY_BEGIN +MATCH opcode subdomain +ADJUST copy_id copy_query +REPLY QR NOERROR +SECTION QUESTION +example.com. IN NS +SECTION AUTHORITY +example.com. IN NS ns.example.com. +SECTION ADDITIONAL +ns.example.com. IN A 1.2.3.44 +ENTRY_END +RANGE_END + +; ns.example.net. +RANGE_BEGIN 0 100 + ADDRESS 1.2.3.44 +ENTRY_BEGIN +MATCH opcode qtype qname +ADJUST copy_id +REPLY QR NOERROR +SECTION QUESTION +example.net. IN NS +SECTION ANSWER +example.net. IN NS ns.example.net. +SECTION ADDITIONAL +ns.example.net. IN A 1.2.3.44 +ENTRY_END + +ENTRY_BEGIN +MATCH opcode qtype qname +ADJUST copy_id +REPLY QR NOERROR +SECTION QUESTION +ns.example.net. IN A +SECTION ANSWER +ns.example.net. IN A 1.2.3.44 +SECTION AUTHORITY +example.net. IN NS ns.example.net. +ENTRY_END + +ENTRY_BEGIN +MATCH opcode qtype qname +ADJUST copy_id +REPLY QR NOERROR +SECTION QUESTION +ns.example.net. IN AAAA +SECTION AUTHORITY +example.net. IN NS ns.example.net. +SECTION ADDITIONAL +www.example.net. IN A 1.2.3.44 +ENTRY_END + +ENTRY_BEGIN +MATCH opcode qtype qname +ADJUST copy_id +REPLY QR NOERROR +SECTION QUESTION +example.com. IN NS +SECTION ANSWER +example.com. IN NS ns.example.net. +ENTRY_END + +ENTRY_BEGIN +MATCH opcode qtype qname +ADJUST copy_id +REPLY QR NOERROR +SECTION QUESTION +www.example.com. IN A +SECTION ANSWER +www.example.com. IN A 10.20.30.40 +ENTRY_END +RANGE_END + +STEP 1 QUERY +ENTRY_BEGIN +REPLY RD DO +SECTION QUESTION +a.b.unbound-auth-test.nlnetlabs.nl. IN TXT +ENTRY_END + +; recursion happens here. +STEP 20 CHECK_ANSWER +ENTRY_BEGIN +MATCH all +REPLY QR AA RD RA DO NOERROR +SECTION QUESTION +a.b.unbound-auth-test.nlnetlabs.nl. IN TXT +SECTION ANSWER +SECTION AUTHORITY +unbound-auth-test.nlnetlabs.nl. 3600 IN SOA ns.nlnetlabs.nl. ralph.nlnetlabs.nl. 1554201247 14400 3600 604800 3600 +unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG SOA 13 3 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. NLFcC2oet+HC+1dhT4D/2JJFIcMiRtTM81KwvT7u8ybF3iDE4bnyrILv Qk8DsizpYKwk+D3J3tMC3TV5+//qFw== +toqivctpt4pdcp5g19neqt19fvtgbgeu.unbound-auth-test.nlnetlabs.nl. 3600 IN NSEC3 1 0 1 - TVDHFML24JP7COTT1QIJJ9812QU9IBH3 +toqivctpt4pdcp5g19neqt19fvtgbgeu.unbound-auth-test.nlnetlabs.nl. 3600 IN RRSIG NSEC3 13 4 3600 20190430103407 20190402103407 15486 unbound-auth-test.nlnetlabs.nl. Jr1oPPs+DGBVV13n4gG4AGVFsleItluLbtCIyQDcYZEA+e5JMkrLzfW3 rXqXaUSUauR4iEu5FmTfs4GTsumdUw== +ENTRY_END + +SCENARIO_END diff --git a/testdata/auth_nsec3_wild.rpl b/testdata/auth_nsec3_wild.rpl index acfe63bae..1aeeebacc 100644 --- a/testdata/auth_nsec3_wild.rpl +++ b/testdata/auth_nsec3_wild.rpl @@ -200,4 +200,31 @@ i6pi4e3o98e7vtkpjfhqn7g77d3mjcnv.test-ns-signed.dev.internet.nl. 3600 IN NSEC3 1 i6pi4e3o98e7vtkpjfhqn7g77d3mjcnv.test-ns-signed.dev.internet.nl. 3600 IN RRSIG NSEC3 8 5 3600 20190205132351 20190108132351 32784 test-ns-signed.dev.internet.nl. xLysIqn3r3rdHE3GvwVjZwUyuFClhkhgrQdwyc66RuHKE3MfSuhVr9cHTCJzhipF5TwQTbUpLOr74r99bzdiIY8Xkgjy2M0nc76v1ObSGJdPPjGTevbhDOnavUURwOR/q0NqqO2iPrgFjOVMZ+8uwRJtCty2iAVZfVG+qDzs8hU= ENTRY_END +; Check that the reply for a wildcard nodata answer contains the NSEC3s. +; qname denial NSEC3, closest encloser NSEC3, and type bitmap NSEC3. +STEP 30 QUERY +ENTRY_BEGIN +REPLY RD DO +SECTION QUESTION +something.a.b.test-ns-signed.dev.internet.nl. IN AAAA +ENTRY_END + +STEP 40 CHECK_ANSWER +ENTRY_BEGIN +MATCH all +REPLY QR AA RD RA DO NOERROR +SECTION QUESTION +something.a.b.test-ns-signed.dev.internet.nl. IN AAAA +SECTION ANSWER +SECTION AUTHORITY +test-ns-signed.dev.internet.nl. 3600 IN SOA ns.nlnetlabs.nl. ralph.nlnetlabs.nl. 4 14400 3600 604800 3600 +test-ns-signed.dev.internet.nl. 3600 IN RRSIG SOA 8 4 3600 20190205132351 20190108132351 32784 test-ns-signed.dev.internet.nl. ybb0Hc7NC+QOFEEv4cX2+Umlk+miiOAHmeP2Uwvg6lqfxkk+3g7yWBEKMinXjLKz0odWZ6fki6M/3yBPQX8SV0OCRY5gYvAHAjbxAIHozIM+5iwOkRQhNF1DRgQ3BLjL93f6T5e5Z4y1812iOpu4GYswXW/UTOZACXz2UiaCPAg= ;{id = 32784} +7ag3p2pfrvq09dpn63cvga8ub1rnrrg1.test-ns-signed.dev.internet.nl. 3600 IN NSEC3 1 0 1 - 93stp7o7i5n9gb83uu7vv6h8qltk14ig TXT RRSIG +7ag3p2pfrvq09dpn63cvga8ub1rnrrg1.test-ns-signed.dev.internet.nl. 3600 IN RRSIG NSEC3 8 5 3600 20190205132351 20190108132351 32784 test-ns-signed.dev.internet.nl. gtxoiTa3FRUqoRLvkWSxmWQ+DfijVd26gpKH3+GmGIcNB/sr/Cf8kERRwVVHvgzYIcvdJcys5b2LUXnZJwcdAlx7efZPWgNZzWxJrw6ES25LCWJOrp31isWn9FlAZGIbnpyEXxD2apBSmtyPnKbTgU6lHHS9jrsYHu4G8Zouv3k= ;{id = 32784} +fee0c2kfhi6bnljce6vehaenqq3pbupu.test-ns-signed.dev.internet.nl. 3600 IN NSEC3 1 0 1 - i6pi4e3o98e7vtkpjfhqn7g77d3mjcnv +fee0c2kfhi6bnljce6vehaenqq3pbupu.test-ns-signed.dev.internet.nl. 3600 IN RRSIG NSEC3 8 5 3600 20190205132351 20190108132351 32784 test-ns-signed.dev.internet.nl. WIb3ISP1nlafbyWoWa4z7sG5IS+V86PyvEMHdD/64hgsFkrCu483XK7VNnBz28SL/631JXA1R19O+UxeWhTUyctp8QSt6cEZcMPY8b7yG97rNFNvhSw75rSXXt+JwgIYHPHQV5oqPtVmEpQM5SfJd+hs+Nn1bJcWB3UaESNNAMQ= ;{id = 32784} +i6pi4e3o98e7vtkpjfhqn7g77d3mjcnv.test-ns-signed.dev.internet.nl. 3600 IN NSEC3 1 0 1 - kl94uofq16t2vlq0bmampf6e4o9k5hbi A AAAA RRSIG +i6pi4e3o98e7vtkpjfhqn7g77d3mjcnv.test-ns-signed.dev.internet.nl. 3600 IN RRSIG NSEC3 8 5 3600 20190205132351 20190108132351 32784 test-ns-signed.dev.internet.nl. xLysIqn3r3rdHE3GvwVjZwUyuFClhkhgrQdwyc66RuHKE3MfSuhVr9cHTCJzhipF5TwQTbUpLOr74r99bzdiIY8Xkgjy2M0nc76v1ObSGJdPPjGTevbhDOnavUURwOR/q0NqqO2iPrgFjOVMZ+8uwRJtCty2iAVZfVG+qDzs8hU= ;{id = 32784} +ENTRY_END + SCENARIO_END diff --git a/util/alloc.c b/util/alloc.c index 908b1f423..7e9618931 100644 --- a/util/alloc.c +++ b/util/alloc.c @@ -376,6 +376,7 @@ void *unbound_stat_malloc(size_t size) { void* res; if(size == 0) size = 1; + log_assert(size <= SIZE_MAX-16); res = malloc(size+16); if(!res) return NULL; unbound_mem_alloc += size; @@ -398,6 +399,7 @@ void *unbound_stat_calloc(size_t nmemb, size_t size) if(nmemb != 0 && INT_MAX/nmemb < size) return NULL; /* integer overflow check */ s = (nmemb*size==0)?(size_t)1:nmemb*size; + log_assert(s <= SIZE_MAX-16); res = calloc(1, s+16); if(!res) return NULL; log_info("stat %p=calloc(%u, %u)", res+16, (unsigned)nmemb, (unsigned)size); @@ -447,6 +449,7 @@ void *unbound_stat_realloc(void *ptr, size_t size) /* nothing changes */ return ptr; } + log_assert(size <= SIZE_MAX-16); res = malloc(size+16); if(!res) return NULL; unbound_mem_alloc += size; @@ -521,7 +524,9 @@ void *unbound_stat_malloc_lite(size_t size, const char* file, int line, const char* func) { /* [prefix .. len .. actual data .. suffix] */ - void* res = malloc(size+lite_pad*2+sizeof(size_t)); + void* res; + log_assert(size <= SIZE_MAX-(lite_pad*2+sizeof(size_t))); + res = malloc(size+lite_pad*2+sizeof(size_t)); if(!res) return NULL; memmove(res, lite_pre, lite_pad); memmove(res+lite_pad, &size, sizeof(size_t)); @@ -538,6 +543,7 @@ void *unbound_stat_calloc_lite(size_t nmemb, size_t size, const char* file, if(nmemb != 0 && INT_MAX/nmemb < size) return NULL; /* integer overflow check */ req = nmemb * size; + log_assert(req <= SIZE_MAX-(lite_pad*2+sizeof(size_t))); res = malloc(req+lite_pad*2+sizeof(size_t)); if(!res) return NULL; memmove(res, lite_pre, lite_pad); diff --git a/util/data/msgreply.h b/util/data/msgreply.h index a455c4d2b..8d75f9b12 100644 --- a/util/data/msgreply.h +++ b/util/data/msgreply.h @@ -157,7 +157,7 @@ struct reply_info { time_t prefetch_ttl; /** - * Reply TTL extended with serve exipred TTL, to limit time to serve + * Reply TTL extended with serve expired TTL, to limit time to serve * expired message. */ time_t serve_expired_ttl; diff --git a/util/fptr_wlist.c b/util/fptr_wlist.c index 02f85e8dc..94d23fa3a 100644 --- a/util/fptr_wlist.c +++ b/util/fptr_wlist.c @@ -127,6 +127,7 @@ fptr_whitelist_comm_timer(void (*fptr)(void*)) #endif else if(fptr == &auth_xfer_timer) return 1; else if(fptr == &auth_xfer_probe_timer_callback) return 1; + else if(fptr == &auth_xfer_transfer_timer_callback) return 1; return 0; } diff --git a/util/iana_ports.inc b/util/iana_ports.inc index 5ead47f0f..6873ca9ab 100644 --- a/util/iana_ports.inc +++ b/util/iana_ports.inc @@ -4864,6 +4864,7 @@ 8805, 8807, 8808, +8809, 8873, 8880, 8883, diff --git a/util/net_help.c b/util/net_help.c index 2b1be9246..13bcdf808 100644 --- a/util/net_help.c +++ b/util/net_help.c @@ -802,6 +802,16 @@ void* listen_sslctx_create(char* key, char* pem, char* verifypem) log_crypto_err("could not SSL_CTX_new"); return NULL; } + if(!key || key[0] == 0) { + log_err("error: no tls-service-key file specified"); + SSL_CTX_free(ctx); + return NULL; + } + if(!pem || pem[0] == 0) { + log_err("error: no tls-service-pem file specified"); + SSL_CTX_free(ctx); + return NULL; + } if(!listen_sslctx_setup(ctx)) { SSL_CTX_free(ctx); return NULL; @@ -1235,7 +1245,12 @@ listen_sslctx_delete_ticket_keys(void) struct tls_session_ticket_key *key; if(!ticket_keys) return; for(key = ticket_keys; key->key_name != NULL; key++) { - memset(key->key_name, 0xdd, 80); /* wipe key data from memory*/ + /* wipe key data from memory*/ +#ifdef HAVE_EXPLICIT_BZERO + explicit_bzero(key->key_name, 80); +#else + memset(key->key_name, 0xdd, 80); +#endif free(key->key_name); } free(ticket_keys); diff --git a/util/netevent.c b/util/netevent.c index f33e44058..b8b2a0900 100644 --- a/util/netevent.c +++ b/util/netevent.c @@ -178,7 +178,7 @@ comm_base_create(int sigs) } ub_comm_base_now(b); ub_get_event_sys(b->eb->base, &evnm, &evsys, &evmethod); - verbose(VERB_ALGO, "%s %s user %s method.", evnm, evsys, evmethod); + verbose(VERB_ALGO, "%s %s uses %s method.", evnm, evsys, evmethod); return b; } @@ -926,6 +926,14 @@ comm_point_tcp_accept_callback(int fd, short event, void* arg) } /* accept incoming connection. */ c_hdl = c->tcp_free; + /* clear leftover flags from previous use, and then set the + * correct event base for the event structure for libevent */ + ub_event_free(c_hdl->ev->ev); + c_hdl->ev->ev = ub_event_new(c_hdl->ev->base->eb->base, -1, UB_EV_PERSIST | UB_EV_READ | UB_EV_TIMEOUT, comm_point_tcp_handle_callback, c_hdl); + if(!c_hdl->ev->ev) { + log_warn("could not ub_event_new, dropped tcp"); + return; + } log_assert(fd != -1); (void)fd; new_fd = comm_point_perform_accept(c, &c_hdl->repinfo.addr, @@ -1184,6 +1192,10 @@ ssl_handle_read(struct comm_point* c) comm_point_listen_for_rw(c, 0, 1); return 1; } else if(want == SSL_ERROR_SYSCALL) { +#ifdef ECONNRESET + if(errno == ECONNRESET && verbosity < 2) + return 0; /* silence reset by peer */ +#endif if(errno != 0) log_err("SSL_read syscall: %s", strerror(errno)); @@ -1228,6 +1240,10 @@ ssl_handle_read(struct comm_point* c) comm_point_listen_for_rw(c, 0, 1); return 1; } else if(want == SSL_ERROR_SYSCALL) { +#ifdef ECONNRESET + if(errno == ECONNRESET && verbosity < 2) + return 0; /* silence reset by peer */ +#endif if(errno != 0) log_err("SSL_read syscall: %s", strerror(errno)); @@ -1288,13 +1304,17 @@ ssl_handle_write(struct comm_point* c) if(want == SSL_ERROR_ZERO_RETURN) { return 0; /* closed */ } else if(want == SSL_ERROR_WANT_READ) { - c->ssl_shake_state = comm_ssl_shake_read; + c->ssl_shake_state = comm_ssl_shake_hs_read; comm_point_listen_for_rw(c, 1, 0); return 1; /* wait for read condition */ } else if(want == SSL_ERROR_WANT_WRITE) { ub_winsock_tcp_wouldblock(c->ev->ev, UB_EV_WRITE); return 1; /* write more later */ } else if(want == SSL_ERROR_SYSCALL) { +#ifdef EPIPE + if(errno == EPIPE && verbosity < 2) + return 0; /* silence 'broken pipe' */ +#endif if(errno != 0) log_err("SSL_write syscall: %s", strerror(errno)); @@ -1322,13 +1342,17 @@ ssl_handle_write(struct comm_point* c) if(want == SSL_ERROR_ZERO_RETURN) { return 0; /* closed */ } else if(want == SSL_ERROR_WANT_READ) { - c->ssl_shake_state = comm_ssl_shake_read; + c->ssl_shake_state = comm_ssl_shake_hs_read; comm_point_listen_for_rw(c, 1, 0); return 1; /* wait for read condition */ } else if(want == SSL_ERROR_WANT_WRITE) { ub_winsock_tcp_wouldblock(c->ev->ev, UB_EV_WRITE); return 1; /* write more later */ } else if(want == SSL_ERROR_SYSCALL) { +#ifdef EPIPE + if(errno == EPIPE && verbosity < 2) + return 0; /* silence 'broken pipe' */ +#endif if(errno != 0) log_err("SSL_write syscall: %s", strerror(errno)); @@ -1738,6 +1762,16 @@ comm_point_tcp_handle_callback(int fd, short event, void* arg) } #endif + if(event&UB_EV_TIMEOUT) { + verbose(VERB_QUERY, "tcp took too long, dropped"); + reclaim_tcp_handler(c); + if(!c->tcp_do_close) { + fptr_ok(fptr_whitelist_comm_point(c->callback)); + (void)(*c->callback)(c, c->cb_arg, + NETEVENT_TIMEOUT, NULL); + } + return; + } if(event&UB_EV_READ) { int has_tcpq = (c->tcp_req_info != NULL); if(!comm_point_tcp_handle_read(fd, c, 0)) { @@ -1768,16 +1802,6 @@ comm_point_tcp_handle_callback(int fd, short event, void* arg) tcp_req_info_read_again(fd, c); return; } - if(event&UB_EV_TIMEOUT) { - verbose(VERB_QUERY, "tcp took too long, dropped"); - reclaim_tcp_handler(c); - if(!c->tcp_do_close) { - fptr_ok(fptr_whitelist_comm_point(c->callback)); - (void)(*c->callback)(c, c->cb_arg, - NETEVENT_TIMEOUT, NULL); - } - return; - } log_err("Ignored event %d for tcphdl.", event); } @@ -1826,6 +1850,10 @@ ssl_http_read_more(struct comm_point* c) comm_point_listen_for_rw(c, 0, 1); return 1; } else if(want == SSL_ERROR_SYSCALL) { +#ifdef ECONNRESET + if(errno == ECONNRESET && verbosity < 2) + return 0; /* silence reset by peer */ +#endif if(errno != 0) log_err("SSL_read syscall: %s", strerror(errno)); @@ -2268,12 +2296,16 @@ ssl_http_write_more(struct comm_point* c) if(want == SSL_ERROR_ZERO_RETURN) { return 0; /* closed */ } else if(want == SSL_ERROR_WANT_READ) { - c->ssl_shake_state = comm_ssl_shake_read; + c->ssl_shake_state = comm_ssl_shake_hs_read; comm_point_listen_for_rw(c, 1, 0); return 1; /* wait for read condition */ } else if(want == SSL_ERROR_WANT_WRITE) { return 1; /* write more later */ } else if(want == SSL_ERROR_SYSCALL) { +#ifdef EPIPE + if(errno == EPIPE && verbosity < 2) + return 0; /* silence 'broken pipe' */ +#endif if(errno != 0) log_err("SSL_write syscall: %s", strerror(errno)); @@ -2382,6 +2414,16 @@ comm_point_http_handle_callback(int fd, short event, void* arg) log_assert(c->type == comm_http); ub_comm_base_now(c->ev->base); + if(event&UB_EV_TIMEOUT) { + verbose(VERB_QUERY, "http took too long, dropped"); + reclaim_http_handler(c); + if(!c->tcp_do_close) { + fptr_ok(fptr_whitelist_comm_point(c->callback)); + (void)(*c->callback)(c, c->cb_arg, + NETEVENT_TIMEOUT, NULL); + } + return; + } if(event&UB_EV_READ) { if(!comm_point_http_handle_read(fd, c)) { reclaim_http_handler(c); @@ -2406,16 +2448,6 @@ comm_point_http_handle_callback(int fd, short event, void* arg) } return; } - if(event&UB_EV_TIMEOUT) { - verbose(VERB_QUERY, "http took too long, dropped"); - reclaim_http_handler(c); - if(!c->tcp_do_close) { - fptr_ok(fptr_whitelist_comm_point(c->callback)); - (void)(*c->callback)(c, c->cb_arg, - NETEVENT_TIMEOUT, NULL); - } - return; - } log_err("Ignored event %d for httphdl.", event); } @@ -3138,8 +3170,8 @@ comm_point_stop_listening(struct comm_point* c) void comm_point_start_listening(struct comm_point* c, int newfd, int msec) { - verbose(VERB_ALGO, "comm point start listening %d", - c->fd==-1?newfd:c->fd); + verbose(VERB_ALGO, "comm point start listening %d (%d msec)", + c->fd==-1?newfd:c->fd, msec); if(c->type == comm_tcp_accept && !c->tcp_free) { /* no use to start listening no free slots. */ return; diff --git a/util/ub_event.c b/util/ub_event.c index 78481a982..e097fbc40 100644 --- a/util/ub_event.c +++ b/util/ub_event.c @@ -295,11 +295,18 @@ ub_event_new(struct ub_event_base* base, int fd, short bits, if (!ev) return NULL; +#ifndef HAVE_EVENT_ASSIGN event_set(ev, fd, NATIVE_BITS(bits), NATIVE_BITS_CB(cb), arg); if (event_base_set(AS_EVENT_BASE(base), ev) != 0) { free(ev); return NULL; } +#else + if (event_assign(ev, AS_EVENT_BASE(base), fd, bits, cb, arg) != 0) { + free(ev); + return NULL; + } +#endif return AS_UB_EVENT(ev); } @@ -312,11 +319,18 @@ ub_signal_new(struct ub_event_base* base, int fd, if (!ev) return NULL; +#if !HAVE_DECL_EVSIGNAL_ASSIGN signal_set(ev, fd, NATIVE_BITS_CB(cb), arg); if (event_base_set(AS_EVENT_BASE(base), ev) != 0) { free(ev); return NULL; } +#else + if (evsignal_assign(ev, AS_EVENT_BASE(base), fd, cb, arg) != 0) { + free(ev); + return NULL; + } +#endif return AS_UB_EVENT(ev); }