bind9/bin/tests/system/mirror/tests.sh

554 lines
26 KiB
Bash
Raw Normal View History

#!/bin/sh
#
# Copyright (C) Internet Systems Consortium, Inc. ("ISC")
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# See the COPYRIGHT file distributed with this work for additional
# information regarding copyright ownership.
SYSTEMTESTTOP=..
. $SYSTEMTESTTOP/conf.sh
DIGOPTS="-p ${PORT} -b 10.53.0.1 +dnssec +time=2 +tries=1 +multi"
RNDCCMD="$RNDC -c $SYSTEMTESTTOP/common/rndc.conf -p ${CONTROLPORT} -s"
# Wait until the transfer of the given zone to ns3 either completes successfully
# or is aborted by a verification failure or a REFUSED response from the master.
# Note that matching on any transfer status is deliberately avoided because some
# checks performed by this test cause transfer attempts to end with the "IXFR
# failed" status, which is followed by an AXFR retry and this test needs to
# check what the result of the latter transfer attempt is.
wait_for_transfer() {
zone=$1
for i in 1 2 3 4 5 6 7 8 9 10; do
# Wait until a "freeing transfer context" message is logged
# after one of the transfer results we are looking for is
# logged. This is needed to prevent races when checking for
# "mirror zone is now in use" messages.
nextpartpeek ns3/named.run | \
awk "matched; /'$zone\/IN'.*Transfer status: (success|verify failure|REFUSED)/ {matched=1}" | \
grep "'$zone/IN'.*freeing transfer context" > /dev/null && return
sleep 1
done
echo_i "exceeded time limit waiting for proof of '$zone' being transferred to appear in ns3/named.run"
ret=1
}
# Wait until loading the given zone on the given server either completes
# successfully for the specified serial number or fails.
wait_for_load() {
zone=$1
serial=$2
log=$3
for i in 1 2 3 4 5 6 7 8 9 10; do
# Wait until a "zone_postload: (...): done" message is logged
# after one of the loading-related messages we are looking for
# is logged. This is needed to prevent races when checking for
# "mirror zone is now in use" messages.
nextpartpeek $log | \
awk "matched; /$zone.*(loaded serial $serial|unable to load)/ {matched=1}" | \
grep "zone_postload: zone $zone/IN: done" > /dev/null && return
sleep 1
done
echo_i "exceeded time limit waiting for proof of '$zone' being loaded to appear in $log"
ret=1
}
# Trigger a reload of ns2 and wait until loading the given zone completes.
reload_zone() {
zone=$1
serial=$2
rndc_reload ns2 10.53.0.2
wait_for_load $zone $serial ns2/named.run
}
status=0
n=0
ORIGINAL_SERIAL=`awk '$2 == "SOA" {print $5}' ns2/verify.db.in`
UPDATED_SERIAL_BAD=`expr ${ORIGINAL_SERIAL} + 1`
UPDATED_SERIAL_GOOD=`expr ${ORIGINAL_SERIAL} + 2`
n=`expr $n + 1`
echo_i "checking that an unsigned mirror zone is rejected ($n)"
ret=0
wait_for_transfer verify-unsigned
$DIG $DIGOPTS @10.53.0.3 +norec verify-unsigned SOA > dig.out.ns3.test$n 2>&1 || ret=1
grep "ANSWER: 0" dig.out.ns3.test$n > /dev/null || ret=1
grep "${ORIGINAL_SERIAL}.*; serial" dig.out.ns3.test$n > /dev/null && ret=1
Log a message when a transferred mirror zone comes into effect Log a message when a mirror zone is successfully transferred and verified, but only if no database for that zone was yet loaded at the time the transfer was initiated. This could have been implemented in a simpler manner, e.g. by modifying zone_replacedb(), but (due to the calling order of the functions involved in finalizing a zone transfer) that would cause the resulting logs to suggest that a mirror zone comes into effect before its transfer is finished, which would be confusing given the nature of mirror zones and the fact that no message is logged upon successful mirror zone verification. Once the dns_zone_replacedb() call in axfr_finalize() is made, it becomes impossible to determine whether the transferred zone had a database attached before the transfer was started. Thus, that check is instead performed when the transfer context is first created and the result of this check is passed around in a field of the transfer context structure. If it turns out to be desired, the relevant log message is then emitted just before the transfer context is freed. Taking this approach means that the log message added by this commit is not timed precisely, i.e. mirror zone data may be used before this message is logged. However, that can only be fixed by logging the message inside zone_replacedb(), which causes arguably more dire issues discussed above. dns_zone_isloaded() is not used to double-check that transferred zone data was correctly loaded since the 'shutdown_result' field of the zone transfer context will not be set to ISC_R_SUCCESS unless axfr_finalize() succeeds (and that in turn will not happen unless dns_zone_replacedb() succeeds).
2019-01-16 09:31:48 -05:00
nextpartpeek ns3/named.run | grep "verify-unsigned.*Zone contains no DNSSEC keys" > /dev/null || ret=1
nextpartpeek ns3/named.run | grep "verify-unsigned.*mirror zone is now in use" > /dev/null && ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that a mirror zone signed using an untrusted key is rejected ($n)"
ret=0
nextpartreset ns3/named.run
wait_for_transfer verify-untrusted
$DIG $DIGOPTS @10.53.0.3 +norec verify-untrusted SOA > dig.out.ns3.test$n 2>&1 || ret=1
grep "ANSWER: 0" dig.out.ns3.test$n > /dev/null || ret=1
grep "${ORIGINAL_SERIAL}.*; serial" dig.out.ns3.test$n > /dev/null && ret=1
nextpartpeek ns3/named.run | grep "verify-untrusted.*No trusted DNSKEY found" > /dev/null || ret=1
Log a message when a transferred mirror zone comes into effect Log a message when a mirror zone is successfully transferred and verified, but only if no database for that zone was yet loaded at the time the transfer was initiated. This could have been implemented in a simpler manner, e.g. by modifying zone_replacedb(), but (due to the calling order of the functions involved in finalizing a zone transfer) that would cause the resulting logs to suggest that a mirror zone comes into effect before its transfer is finished, which would be confusing given the nature of mirror zones and the fact that no message is logged upon successful mirror zone verification. Once the dns_zone_replacedb() call in axfr_finalize() is made, it becomes impossible to determine whether the transferred zone had a database attached before the transfer was started. Thus, that check is instead performed when the transfer context is first created and the result of this check is passed around in a field of the transfer context structure. If it turns out to be desired, the relevant log message is then emitted just before the transfer context is freed. Taking this approach means that the log message added by this commit is not timed precisely, i.e. mirror zone data may be used before this message is logged. However, that can only be fixed by logging the message inside zone_replacedb(), which causes arguably more dire issues discussed above. dns_zone_isloaded() is not used to double-check that transferred zone data was correctly loaded since the 'shutdown_result' field of the zone transfer context will not be set to ISC_R_SUCCESS unless axfr_finalize() succeeds (and that in turn will not happen unless dns_zone_replacedb() succeeds).
2019-01-16 09:31:48 -05:00
nextpartpeek ns3/named.run | grep "verify-untrusted.*mirror zone is now in use" > /dev/null && ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that a mirror zone signed using a CSK without the SEP bit set is accepted ($n)"
ret=0
nextpartreset ns3/named.run
wait_for_transfer verify-csk
$DIG $DIGOPTS @10.53.0.3 +norec verify-csk SOA > dig.out.ns3.test$n 2>&1 || ret=1
grep "ANSWER: 0" dig.out.ns3.test$n > /dev/null && ret=1
grep "${ORIGINAL_SERIAL}.*; serial" dig.out.ns3.test$n > /dev/null || ret=1
nextpartpeek ns3/named.run | grep "verify-csk.*mirror zone is now in use" > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that an AXFR of an incorrectly signed mirror zone is rejected ($n)"
ret=0
nextpartreset ns3/named.run
wait_for_transfer verify-axfr
$DIG $DIGOPTS @10.53.0.3 +norec verify-axfr SOA > dig.out.ns3.test$n 2>&1 || ret=1
grep "ANSWER: 0" dig.out.ns3.test$n > /dev/null || ret=1
grep "${UPDATED_SERIAL_BAD}.*; serial" dig.out.ns3.test$n > /dev/null && ret=1
Log a message when a transferred mirror zone comes into effect Log a message when a mirror zone is successfully transferred and verified, but only if no database for that zone was yet loaded at the time the transfer was initiated. This could have been implemented in a simpler manner, e.g. by modifying zone_replacedb(), but (due to the calling order of the functions involved in finalizing a zone transfer) that would cause the resulting logs to suggest that a mirror zone comes into effect before its transfer is finished, which would be confusing given the nature of mirror zones and the fact that no message is logged upon successful mirror zone verification. Once the dns_zone_replacedb() call in axfr_finalize() is made, it becomes impossible to determine whether the transferred zone had a database attached before the transfer was started. Thus, that check is instead performed when the transfer context is first created and the result of this check is passed around in a field of the transfer context structure. If it turns out to be desired, the relevant log message is then emitted just before the transfer context is freed. Taking this approach means that the log message added by this commit is not timed precisely, i.e. mirror zone data may be used before this message is logged. However, that can only be fixed by logging the message inside zone_replacedb(), which causes arguably more dire issues discussed above. dns_zone_isloaded() is not used to double-check that transferred zone data was correctly loaded since the 'shutdown_result' field of the zone transfer context will not be set to ISC_R_SUCCESS unless axfr_finalize() succeeds (and that in turn will not happen unless dns_zone_replacedb() succeeds).
2019-01-16 09:31:48 -05:00
nextpartpeek ns3/named.run | grep "No correct RSASHA256 signature for verify-axfr SOA" > /dev/null || ret=1
nextpartpeek ns3/named.run | grep "verify-axfr.*mirror zone is now in use" > /dev/null && ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that an AXFR of an updated, correctly signed mirror zone is accepted ($n)"
ret=0
nextpart ns3/named.run > /dev/null
cat ns2/verify-axfr.db.good.signed > ns2/verify-axfr.db.signed
reload_zone verify-axfr ${UPDATED_SERIAL_GOOD}
$RNDCCMD 10.53.0.3 retransfer verify-axfr > /dev/null 2>&1
wait_for_transfer verify-axfr
$DIG $DIGOPTS @10.53.0.3 +norec verify-axfr SOA > dig.out.ns3.test$n 2>&1 || ret=1
grep "ANSWER: 0" dig.out.ns3.test$n > /dev/null && ret=1
grep "${UPDATED_SERIAL_GOOD}.*; serial" dig.out.ns3.test$n > /dev/null || ret=1
Log a message when a transferred mirror zone comes into effect Log a message when a mirror zone is successfully transferred and verified, but only if no database for that zone was yet loaded at the time the transfer was initiated. This could have been implemented in a simpler manner, e.g. by modifying zone_replacedb(), but (due to the calling order of the functions involved in finalizing a zone transfer) that would cause the resulting logs to suggest that a mirror zone comes into effect before its transfer is finished, which would be confusing given the nature of mirror zones and the fact that no message is logged upon successful mirror zone verification. Once the dns_zone_replacedb() call in axfr_finalize() is made, it becomes impossible to determine whether the transferred zone had a database attached before the transfer was started. Thus, that check is instead performed when the transfer context is first created and the result of this check is passed around in a field of the transfer context structure. If it turns out to be desired, the relevant log message is then emitted just before the transfer context is freed. Taking this approach means that the log message added by this commit is not timed precisely, i.e. mirror zone data may be used before this message is logged. However, that can only be fixed by logging the message inside zone_replacedb(), which causes arguably more dire issues discussed above. dns_zone_isloaded() is not used to double-check that transferred zone data was correctly loaded since the 'shutdown_result' field of the zone transfer context will not be set to ISC_R_SUCCESS unless axfr_finalize() succeeds (and that in turn will not happen unless dns_zone_replacedb() succeeds).
2019-01-16 09:31:48 -05:00
nextpartpeek ns3/named.run | grep "verify-axfr.*mirror zone is now in use" > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that an IXFR of an incorrectly signed mirror zone is rejected ($n)"
nextpartreset ns3/named.run
ret=0
wait_for_transfer verify-ixfr
# Sanity check: the initial, properly signed version of the zone should have
# been announced as coming into effect.
nextpart ns3/named.run | grep "verify-ixfr.*mirror zone is now in use" > /dev/null || ret=1
# Make a copy of the original zone file for reuse in journal tests below.
cp ns2/verify-ixfr.db.signed ns3/verify-journal.db.mirror
# Wait 1 second so that the zone file timestamp changes and the subsequent
# invocation of "rndc reload" triggers a zone reload.
sleep 1
cat ns2/verify-ixfr.db.bad.signed > ns2/verify-ixfr.db.signed
reload_zone verify-ixfr ${UPDATED_SERIAL_BAD}
# Make a copy of the bad zone journal for reuse in journal tests below.
cp ns2/verify-ixfr.db.signed.jnl ns3/verify-journal.db.bad.mirror.jnl
# Trigger IXFR.
$RNDCCMD 10.53.0.3 refresh verify-ixfr > /dev/null 2>&1
wait_for_transfer verify-ixfr
# Ensure the transfer was incremental as expected.
if [ `nextpartpeek ns3/named.run | grep "verify-ixfr.*got incremental response" | wc -l` -eq 0 ]; then
echo_i "failed: did not get an incremental response"
ret=1
fi
# Ensure the new, bad version of the zone was not accepted.
$DIG $DIGOPTS @10.53.0.3 +norec verify-ixfr SOA > dig.out.ns3.test$n 2>&1 || ret=1
# A positive answer is expected as the original version of the "verify-ixfr"
# zone should have been successfully verified.
grep "ANSWER: 0" dig.out.ns3.test$n > /dev/null && ret=1
grep "${UPDATED_SERIAL_BAD}.*; serial" dig.out.ns3.test$n > /dev/null && ret=1
nextpartpeek ns3/named.run | grep "No correct RSASHA256 signature for verify-ixfr SOA" > /dev/null || ret=1
# Despite the verification failure for this IXFR, this mirror zone should still
# be in use as its previous version should have been verified successfully.
nextpartpeek ns3/named.run | grep "verify-ixfr.*mirror zone is no longer in use" > /dev/null && ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that an IXFR of an updated, correctly signed mirror zone is accepted after AXFR failover ($n)"
ret=0
nextpart ns3/named.run > /dev/null
# Wait 1 second so that the zone file timestamp changes and the subsequent
# invocation of "rndc reload" triggers a zone reload.
sleep 1
cat ns2/verify-ixfr.db.good.signed > ns2/verify-ixfr.db.signed
reload_zone verify-ixfr ${UPDATED_SERIAL_GOOD}
# Make a copy of the good zone journal for reuse in journal tests below.
cp ns2/verify-ixfr.db.signed.jnl ns3/verify-journal.db.good.mirror.jnl
# Trigger IXFR.
$RNDCCMD 10.53.0.3 refresh verify-ixfr > /dev/null 2>&1
wait_for_transfer verify-ixfr
# Ensure the new, good version of the zone was accepted.
$DIG $DIGOPTS @10.53.0.3 +norec verify-ixfr SOA > dig.out.ns3.test$n 2>&1 || ret=1
grep "ANSWER: 0" dig.out.ns3.test$n > /dev/null && ret=1
grep "${UPDATED_SERIAL_GOOD}.*; serial" dig.out.ns3.test$n > /dev/null || ret=1
Log a message when a transferred mirror zone comes into effect Log a message when a mirror zone is successfully transferred and verified, but only if no database for that zone was yet loaded at the time the transfer was initiated. This could have been implemented in a simpler manner, e.g. by modifying zone_replacedb(), but (due to the calling order of the functions involved in finalizing a zone transfer) that would cause the resulting logs to suggest that a mirror zone comes into effect before its transfer is finished, which would be confusing given the nature of mirror zones and the fact that no message is logged upon successful mirror zone verification. Once the dns_zone_replacedb() call in axfr_finalize() is made, it becomes impossible to determine whether the transferred zone had a database attached before the transfer was started. Thus, that check is instead performed when the transfer context is first created and the result of this check is passed around in a field of the transfer context structure. If it turns out to be desired, the relevant log message is then emitted just before the transfer context is freed. Taking this approach means that the log message added by this commit is not timed precisely, i.e. mirror zone data may be used before this message is logged. However, that can only be fixed by logging the message inside zone_replacedb(), which causes arguably more dire issues discussed above. dns_zone_isloaded() is not used to double-check that transferred zone data was correctly loaded since the 'shutdown_result' field of the zone transfer context will not be set to ISC_R_SUCCESS unless axfr_finalize() succeeds (and that in turn will not happen unless dns_zone_replacedb() succeeds).
2019-01-16 09:31:48 -05:00
# The log message announcing the mirror zone coming into effect should not have
# been logged this time since the mirror zone in question is expected to
# already be in use before this test case is checked.
Log a message when a transferred mirror zone comes into effect Log a message when a mirror zone is successfully transferred and verified, but only if no database for that zone was yet loaded at the time the transfer was initiated. This could have been implemented in a simpler manner, e.g. by modifying zone_replacedb(), but (due to the calling order of the functions involved in finalizing a zone transfer) that would cause the resulting logs to suggest that a mirror zone comes into effect before its transfer is finished, which would be confusing given the nature of mirror zones and the fact that no message is logged upon successful mirror zone verification. Once the dns_zone_replacedb() call in axfr_finalize() is made, it becomes impossible to determine whether the transferred zone had a database attached before the transfer was started. Thus, that check is instead performed when the transfer context is first created and the result of this check is passed around in a field of the transfer context structure. If it turns out to be desired, the relevant log message is then emitted just before the transfer context is freed. Taking this approach means that the log message added by this commit is not timed precisely, i.e. mirror zone data may be used before this message is logged. However, that can only be fixed by logging the message inside zone_replacedb(), which causes arguably more dire issues discussed above. dns_zone_isloaded() is not used to double-check that transferred zone data was correctly loaded since the 'shutdown_result' field of the zone transfer context will not be set to ISC_R_SUCCESS unless axfr_finalize() succeeds (and that in turn will not happen unless dns_zone_replacedb() succeeds).
2019-01-16 09:31:48 -05:00
nextpartpeek ns3/named.run | grep "verify-ixfr.*mirror zone is now in use" > /dev/null && ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that loading an incorrectly signed mirror zone from disk fails ($n)"
ret=0
nextpartreset ns3/named.run
wait_for_load verify-load ${UPDATED_SERIAL_BAD} ns3/named.run
$DIG $DIGOPTS @10.53.0.3 +norec verify-load SOA > dig.out.ns3.test$n 2>&1 || ret=1
grep "ANSWER: 0" dig.out.ns3.test$n > /dev/null || ret=1
grep "${UPDATED_SERIAL_BAD}.*; serial" dig.out.ns3.test$n > /dev/null && ret=1
nextpartpeek ns3/named.run | grep "No correct RSASHA256 signature for verify-load SOA" > /dev/null || ret=1
nextpartpeek ns3/named.run | grep "verify-load.*mirror zone is now in use" > /dev/null && ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "ensuring trust anchor telemetry queries are sent upstream for a mirror zone ($n)"
ret=0
# ns3 is started with "-T tat=3", so TAT queries should have already been sent.
grep "_ta-[-0-9a-f]*/NULL" ns1/named.run > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that loading a correctly signed mirror zone from disk succeeds ($n)"
ret=0
$PERL $SYSTEMTESTTOP/stop.pl --use-rndc --port ${CONTROLPORT} mirror ns3
cat ns2/verify-load.db.good.signed > ns3/verify-load.db.mirror
nextpart ns3/named.run > /dev/null
$PERL $SYSTEMTESTTOP/start.pl --noclean --restart --port ${PORT} mirror ns3
wait_for_load verify-load ${UPDATED_SERIAL_GOOD} ns3/named.run
$DIG $DIGOPTS @10.53.0.3 +norec verify-load SOA > dig.out.ns3.test$n 2>&1 || ret=1
grep "ANSWER: 0" dig.out.ns3.test$n > /dev/null && ret=1
grep "${UPDATED_SERIAL_GOOD}.*; serial" dig.out.ns3.test$n > /dev/null || ret=1
nextpartpeek ns3/named.run | grep "verify-load.*mirror zone is now in use" > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that loading a journal for an incorrectly signed mirror zone fails ($n)"
ret=0
$PERL $SYSTEMTESTTOP/stop.pl --use-rndc --port ${CONTROLPORT} mirror ns3
cp ns3/verify-journal.db.mirror ns3/verify-ixfr.db.mirror
cp ns3/verify-journal.db.bad.mirror.jnl ns3/verify-ixfr.db.mirror.jnl
# Temporarily disable transfers of the "verify-ixfr" zone on ns2. This is
# required to reliably test whether the message announcing the mirror zone
# coming into effect is not logged after a failed journal verification since
# otherwise a corrected version of the zone may be transferred after
# verification fails but before we look for the aforementioned log message.
# (NOTE: Keep the embedded newline in the sed function list below.)
sed '/^zone "verify-ixfr" {$/,/^};$/ {
s/10.53.0.3/10.53.0.254/
}' ns2/named.conf > ns2/named.conf.modified
mv ns2/named.conf.modified ns2/named.conf
rndc_reconfig ns2 10.53.0.2
nextpart ns3/named.run > /dev/null
$PERL $SYSTEMTESTTOP/start.pl --noclean --restart --port ${PORT} mirror ns3
wait_for_load verify-ixfr ${UPDATED_SERIAL_BAD} ns3/named.run
$DIG $DIGOPTS @10.53.0.3 +norec verify-ixfr SOA > dig.out.ns3.test$n 2>&1 || ret=1
grep "ANSWER: 0" dig.out.ns3.test$n > /dev/null || ret=1
grep "${UPDATED_SERIAL_BAD}.*; serial" dig.out.ns3.test$n > /dev/null && ret=1
nextpartpeek ns3/named.run | grep "No correct RSASHA256 signature for verify-ixfr SOA" > /dev/null || ret=1
nextpartpeek ns3/named.run | grep "verify-ixfr.*mirror zone is now in use" > /dev/null && ret=1
# Restore transfers for the "verify-ixfr" zone on ns2.
# (NOTE: Keep the embedded newline in the sed function list below.)
sed '/^zone "verify-ixfr" {$/,/^};$/ {
s/10.53.0.254/10.53.0.3/
}' ns2/named.conf > ns2/named.conf.modified
mv ns2/named.conf.modified ns2/named.conf
rndc_reconfig ns2 10.53.0.2
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that loading a journal for a correctly signed mirror zone succeeds ($n)"
ret=0
$PERL $SYSTEMTESTTOP/stop.pl --use-rndc --port ${CONTROLPORT} mirror ns3
cp ns3/verify-journal.db.mirror ns3/verify-ixfr.db.mirror
cp ns3/verify-journal.db.good.mirror.jnl ns3/verify-ixfr.db.mirror.jnl
nextpart ns3/named.run > /dev/null
$PERL $SYSTEMTESTTOP/start.pl --noclean --restart --port ${PORT} mirror ns3
wait_for_load verify-ixfr ${UPDATED_SERIAL_GOOD} ns3/named.run
$DIG $DIGOPTS @10.53.0.3 +norec verify-ixfr SOA > dig.out.ns3.test$n 2>&1 || ret=1
grep "ANSWER: 0" dig.out.ns3.test$n > /dev/null && ret=1
grep "${UPDATED_SERIAL_GOOD}.*; serial" dig.out.ns3.test$n > /dev/null || ret=1
nextpartpeek ns3/named.run | grep "verify-ixfr.*mirror zone is now in use" > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
Restore zone database and zone node if cache search results are to be ignored When query processing hits a delegation from a locally configured zone, an attempt may be made to look for a better answer in the cache. In such a case, the zone-sourced delegation data is set aside and the lookup is retried using the cache database. When that lookup is completed, a decision is made whether the answer found in the cache is better than the answer found in the zone. Currently, if the zone-sourced answer turns out to be better than the one found in the cache: - qctx->zdb is not restored into qctx->db, - qctx->node, holding the zone database node found, is not even saved. Thus, in such a case both qctx->db and qctx->node will point at cache data. This is not an issue for BIND versions which do not support mirror zones because in these versions non-recursive queries always cause the zone-sourced delegation to be returned and thus the non-recursive part of query_delegation() is never reached if the delegation is coming from a zone. With mirror zones, however, non-recursive queries may cause cache lookups even after a zone delegation is found. Leaving qctx->db assigned to the cache database when query_delegation() determines that the zone-sourced delegation is the best answer to the client's query prevents DS records from being added to delegations coming from mirror zones. Fix this issue by keeping the zone database and zone node in qctx while the cache is searched for an answer and then restoring them into qctx->db and qctx->node, respectively, if the zone-sourced delegation turns out to be the best answer. Since this change means that qctx->zdb cannot be used as the glue database any more as it will be reset to NULL by RESTORE(), ensure that qctx->db is not a cache database before attaching it to qctx->client->query.gluedb. Furthermore, current code contains a conditional statement which prevents a mirror zone from being used as a source of glue records. Said statement was added to prevent assertion failures caused by attempting to use a zone database's glue cache for finding glue for an NS RRset coming from a cache database. However, that check is overly strict since it completely prevents glue from being added to delegations coming from mirror zones. With the changes described above in place, the scenario this check was preventing can no longer happen, so remove the aforementioned check. If qctx->zdb is not NULL, qctx->zfname will also not be NULL; qctx->zsigrdataset may be NULL in such a case, but query_putrdataset() handles pointers to NULL pointers gracefully. Remove redundant conditional expressions to make the cleanup code in query_freedata() match the corresponding sequences of SAVE() / RESTORE() macros more closely.
2018-08-08 01:56:29 -04:00
n=`expr $n + 1`
echo_i "checking delegations sourced from a mirror zone ($n)"
ret=0
$DIG $DIGOPTS @10.53.0.3 foo.example A +norec > dig.out.ns3.test$n 2>&1 || ret=1
# Check response code and flags in the answer.
grep "NOERROR" dig.out.ns3.test$n > /dev/null || ret=1
grep "flags:.* ad" dig.out.ns3.test$n > /dev/null && ret=1
# Check that a delegation containing a DS RRset and glue is present.
grep "ANSWER: 0" dig.out.ns3.test$n > /dev/null || ret=1
grep "example.*IN.*NS" dig.out.ns3.test$n > /dev/null || ret=1
grep "example.*IN.*DS" dig.out.ns3.test$n > /dev/null || ret=1
grep "ns2.example.*A.*10.53.0.2" dig.out.ns3.test$n > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that resolution involving a mirror zone works as expected ($n)"
ret=0
$DIG $DIGOPTS @10.53.0.3 foo.example A > dig.out.ns3.test$n 2>&1 || ret=1
# Check response code and flags in the answer.
grep "NOERROR" dig.out.ns3.test$n > /dev/null || ret=1
grep "flags:.* ad" dig.out.ns3.test$n > /dev/null || ret=1
# Ensure ns1 was not queried.
grep "query 'foo.example/A/IN'" ns1/named.run > /dev/null && ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that non-recursive queries for names below mirror zone get responded from cache ($n)"
ret=0
# Issue a non-recursive query for an RRset which is expected to be in cache.
$DIG $DIGOPTS @10.53.0.3 +norec foo.example. A > dig.out.ns3.test$n 2>&1 || ret=1
# Check response code and flags in the answer.
grep "NOERROR" dig.out.ns3.test$n > /dev/null || ret=1
grep "flags:.* ad" dig.out.ns3.test$n > /dev/null || ret=1
# Ensure the response is not a delegation.
grep "ANSWER: 0" dig.out.ns3.test$n > /dev/null && ret=1
grep "foo.example.*IN.*A.*127.0.0.1" dig.out.ns3.test$n > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that delegations from cache which improve mirror zone delegations are properly handled ($n)"
ret=0
# First, issue a recursive query in order to cache an RRset which is not within
# the mirror zone's bailiwick.
$DIG $DIGOPTS @10.53.0.3 sub.example. NS > dig.out.ns3.test$n.1 2>&1 || ret=1
# Ensure the child-side NS RRset is returned.
grep "NOERROR" dig.out.ns3.test$n.1 > /dev/null || ret=1
Restore zone database and zone node if cache search results are to be ignored When query processing hits a delegation from a locally configured zone, an attempt may be made to look for a better answer in the cache. In such a case, the zone-sourced delegation data is set aside and the lookup is retried using the cache database. When that lookup is completed, a decision is made whether the answer found in the cache is better than the answer found in the zone. Currently, if the zone-sourced answer turns out to be better than the one found in the cache: - qctx->zdb is not restored into qctx->db, - qctx->node, holding the zone database node found, is not even saved. Thus, in such a case both qctx->db and qctx->node will point at cache data. This is not an issue for BIND versions which do not support mirror zones because in these versions non-recursive queries always cause the zone-sourced delegation to be returned and thus the non-recursive part of query_delegation() is never reached if the delegation is coming from a zone. With mirror zones, however, non-recursive queries may cause cache lookups even after a zone delegation is found. Leaving qctx->db assigned to the cache database when query_delegation() determines that the zone-sourced delegation is the best answer to the client's query prevents DS records from being added to delegations coming from mirror zones. Fix this issue by keeping the zone database and zone node in qctx while the cache is searched for an answer and then restoring them into qctx->db and qctx->node, respectively, if the zone-sourced delegation turns out to be the best answer. Since this change means that qctx->zdb cannot be used as the glue database any more as it will be reset to NULL by RESTORE(), ensure that qctx->db is not a cache database before attaching it to qctx->client->query.gluedb. Furthermore, current code contains a conditional statement which prevents a mirror zone from being used as a source of glue records. Said statement was added to prevent assertion failures caused by attempting to use a zone database's glue cache for finding glue for an NS RRset coming from a cache database. However, that check is overly strict since it completely prevents glue from being added to delegations coming from mirror zones. With the changes described above in place, the scenario this check was preventing can no longer happen, so remove the aforementioned check. If qctx->zdb is not NULL, qctx->zfname will also not be NULL; qctx->zsigrdataset may be NULL in such a case, but query_putrdataset() handles pointers to NULL pointers gracefully. Remove redundant conditional expressions to make the cleanup code in query_freedata() match the corresponding sequences of SAVE() / RESTORE() macros more closely.
2018-08-08 01:56:29 -04:00
grep "ANSWER: 2" dig.out.ns3.test$n.1 > /dev/null || ret=1
grep "sub.example.*IN.*NS" dig.out.ns3.test$n.1 > /dev/null || ret=1
# Issue a non-recursive query for something below the cached zone cut.
$DIG $DIGOPTS @10.53.0.3 +norec foo.sub.example. A > dig.out.ns3.test$n.2 2>&1 || ret=1
Restore zone database and zone node if cache search results are to be ignored When query processing hits a delegation from a locally configured zone, an attempt may be made to look for a better answer in the cache. In such a case, the zone-sourced delegation data is set aside and the lookup is retried using the cache database. When that lookup is completed, a decision is made whether the answer found in the cache is better than the answer found in the zone. Currently, if the zone-sourced answer turns out to be better than the one found in the cache: - qctx->zdb is not restored into qctx->db, - qctx->node, holding the zone database node found, is not even saved. Thus, in such a case both qctx->db and qctx->node will point at cache data. This is not an issue for BIND versions which do not support mirror zones because in these versions non-recursive queries always cause the zone-sourced delegation to be returned and thus the non-recursive part of query_delegation() is never reached if the delegation is coming from a zone. With mirror zones, however, non-recursive queries may cause cache lookups even after a zone delegation is found. Leaving qctx->db assigned to the cache database when query_delegation() determines that the zone-sourced delegation is the best answer to the client's query prevents DS records from being added to delegations coming from mirror zones. Fix this issue by keeping the zone database and zone node in qctx while the cache is searched for an answer and then restoring them into qctx->db and qctx->node, respectively, if the zone-sourced delegation turns out to be the best answer. Since this change means that qctx->zdb cannot be used as the glue database any more as it will be reset to NULL by RESTORE(), ensure that qctx->db is not a cache database before attaching it to qctx->client->query.gluedb. Furthermore, current code contains a conditional statement which prevents a mirror zone from being used as a source of glue records. Said statement was added to prevent assertion failures caused by attempting to use a zone database's glue cache for finding glue for an NS RRset coming from a cache database. However, that check is overly strict since it completely prevents glue from being added to delegations coming from mirror zones. With the changes described above in place, the scenario this check was preventing can no longer happen, so remove the aforementioned check. If qctx->zdb is not NULL, qctx->zfname will also not be NULL; qctx->zsigrdataset may be NULL in such a case, but query_putrdataset() handles pointers to NULL pointers gracefully. Remove redundant conditional expressions to make the cleanup code in query_freedata() match the corresponding sequences of SAVE() / RESTORE() macros more closely.
2018-08-08 01:56:29 -04:00
# Ensure the cached NS RRset is returned in a delegation, along with the
# parent-side DS RRset.
grep "NOERROR" dig.out.ns3.test$n.2 > /dev/null || ret=1
grep "ANSWER: 0" dig.out.ns3.test$n.2 > /dev/null || ret=1
grep "sub.example.*IN.*NS" dig.out.ns3.test$n.2 > /dev/null || ret=1
Restore zone database and zone node if cache search results are to be ignored When query processing hits a delegation from a locally configured zone, an attempt may be made to look for a better answer in the cache. In such a case, the zone-sourced delegation data is set aside and the lookup is retried using the cache database. When that lookup is completed, a decision is made whether the answer found in the cache is better than the answer found in the zone. Currently, if the zone-sourced answer turns out to be better than the one found in the cache: - qctx->zdb is not restored into qctx->db, - qctx->node, holding the zone database node found, is not even saved. Thus, in such a case both qctx->db and qctx->node will point at cache data. This is not an issue for BIND versions which do not support mirror zones because in these versions non-recursive queries always cause the zone-sourced delegation to be returned and thus the non-recursive part of query_delegation() is never reached if the delegation is coming from a zone. With mirror zones, however, non-recursive queries may cause cache lookups even after a zone delegation is found. Leaving qctx->db assigned to the cache database when query_delegation() determines that the zone-sourced delegation is the best answer to the client's query prevents DS records from being added to delegations coming from mirror zones. Fix this issue by keeping the zone database and zone node in qctx while the cache is searched for an answer and then restoring them into qctx->db and qctx->node, respectively, if the zone-sourced delegation turns out to be the best answer. Since this change means that qctx->zdb cannot be used as the glue database any more as it will be reset to NULL by RESTORE(), ensure that qctx->db is not a cache database before attaching it to qctx->client->query.gluedb. Furthermore, current code contains a conditional statement which prevents a mirror zone from being used as a source of glue records. Said statement was added to prevent assertion failures caused by attempting to use a zone database's glue cache for finding glue for an NS RRset coming from a cache database. However, that check is overly strict since it completely prevents glue from being added to delegations coming from mirror zones. With the changes described above in place, the scenario this check was preventing can no longer happen, so remove the aforementioned check. If qctx->zdb is not NULL, qctx->zfname will also not be NULL; qctx->zsigrdataset may be NULL in such a case, but query_putrdataset() handles pointers to NULL pointers gracefully. Remove redundant conditional expressions to make the cleanup code in query_freedata() match the corresponding sequences of SAVE() / RESTORE() macros more closely.
2018-08-08 01:56:29 -04:00
grep "sub.example.*IN.*DS" dig.out.ns3.test$n.2 > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking flags set in a DNSKEY response sourced from a mirror zone ($n)"
ret=0
$DIG $DIGOPTS @10.53.0.3 . DNSKEY > dig.out.ns3.test$n 2>&1 || ret=1
# Check response code and flags in the answer.
grep "NOERROR" dig.out.ns3.test$n > /dev/null || ret=1
grep "flags:.* aa" dig.out.ns3.test$n > /dev/null && ret=1
grep "flags:.* ad" dig.out.ns3.test$n > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking flags set in a SOA response sourced from a mirror zone ($n)"
ret=0
$DIG $DIGOPTS @10.53.0.3 . SOA > dig.out.ns3.test$n 2>&1 || ret=1
# Check response code and flags in the answer.
grep "NOERROR" dig.out.ns3.test$n > /dev/null || ret=1
grep "flags:.* aa" dig.out.ns3.test$n > /dev/null && ret=1
grep "flags:.* ad" dig.out.ns3.test$n > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that resolution succeeds with unavailable mirror zone data ($n)"
ret=0
wait_for_transfer initially-unavailable
# Query for a record in a zone that is set up to be mirrored, but
# untransferrable from the configured master. Resolution should still succeed.
$DIG $DIGOPTS @10.53.0.3 foo.initially-unavailable. A > dig.out.ns3.test$n.1 2>&1 || ret=1
# Check response code and flags in the answer.
grep "NOERROR" dig.out.ns3.test$n.1 > /dev/null || ret=1
grep "flags:.* ad" dig.out.ns3.test$n.1 > /dev/null || ret=1
# Sanity check: the authoritative server should have been queried.
nextpart ns2/named.run | grep "query 'foo.initially-unavailable/A/IN'" > /dev/null || ret=1
# Reconfigure ns2 so that the zone can be mirrored on ns3.
sed '/^zone "initially-unavailable" {$/,/^};$/ {
s/10.53.0.254/10.53.0.3/
}' ns2/named.conf > ns2/named.conf.modified
mv ns2/named.conf.modified ns2/named.conf
rndc_reconfig ns2 10.53.0.2
# Flush the cache on ns3 and retransfer the mirror zone.
$RNDCCMD 10.53.0.3 flush > /dev/null 2>&1
nextpart ns3/named.run > /dev/null
$RNDCCMD 10.53.0.3 retransfer initially-unavailable > /dev/null 2>&1
wait_for_transfer initially-unavailable
# Query for the same record again. Resolution should still succeed.
$DIG $DIGOPTS @10.53.0.3 foo.initially-unavailable. A > dig.out.ns3.test$n.2 2>&1 || ret=1
# Check response code and flags in the answer.
grep "NOERROR" dig.out.ns3.test$n.2 > /dev/null || ret=1
grep "flags:.* ad" dig.out.ns3.test$n.2 > /dev/null || ret=1
# Ensure the authoritative server was not queried.
nextpart ns2/named.run | grep "query 'foo.initially-unavailable/A/IN'" > /dev/null && ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that resolution succeeds with expired mirror zone data ($n)"
ret=0
# Reconfigure ns2 so that the zone from the previous test can no longer be
# mirrored on ns3.
sed '/^zone "initially-unavailable" {$/,/^};$/ {
s/10.53.0.3/10.53.0.254/
}' ns2/named.conf > ns2/named.conf.modified
mv ns2/named.conf.modified ns2/named.conf
rndc_reconfig ns2 10.53.0.2
# Stop ns3, update the timestamp of the zone file to one far in the past, then
# restart ns3.
$PERL $SYSTEMTESTTOP/stop.pl --use-rndc --port ${CONTROLPORT} mirror ns3
touch -t 200001010000 ns3/initially-unavailable.db.mirror
nextpart ns3/named.run > /dev/null
$PERL $SYSTEMTESTTOP/start.pl --noclean --restart --port ${PORT} mirror ns3
# Ensure named attempts to retransfer the zone due to its expiry.
wait_for_transfer initially-unavailable
# Ensure the expected messages were logged.
nextpartpeek ns3/named.run | grep "initially-unavailable.*expired" > /dev/null || ret=1
nextpartpeek ns3/named.run | grep "initially-unavailable.*mirror zone is no longer in use" > /dev/null || ret=1
# Query for a record in the expired zone. Resolution should still succeed.
$DIG $DIGOPTS @10.53.0.3 foo.initially-unavailable. A > dig.out.ns3.test$n 2>&1 || ret=1
# Check response code and flags in the answer.
grep "NOERROR" dig.out.ns3.test$n > /dev/null || ret=1
grep "flags:.* ad" dig.out.ns3.test$n > /dev/null || ret=1
# Sanity check: the authoritative server should have been queried.
nextpart ns2/named.run | grep "query 'foo.initially-unavailable/A/IN'" > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that clients without cache access cannot retrieve mirror zone data ($n)"
ret=0
$DIG $DIGOPTS @10.53.0.3 -b 10.53.0.3 +norec . SOA > dig.out.ns3.test$n 2>&1 || ret=1
# Check response code and flags in the answer.
grep "REFUSED" dig.out.ns3.test$n > /dev/null || ret=1
grep "flags:.* ad" dig.out.ns3.test$n > /dev/null && ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that outgoing transfers of mirror zones are disabled by default ($n)"
ret=0
$DIG $DIGOPTS @10.53.0.3 . AXFR > dig.out.ns3.test$n 2>&1 || ret=1
grep "; Transfer failed" dig.out.ns3.test$n > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that notifies are disabled by default for mirror zones ($n)"
ret=0
grep "initially-unavailable.*sending notifies" ns3/named.run > /dev/null && ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking output of \"rndc zonestatus\" for a mirror zone ($n)"
ret=0
$RNDCCMD 10.53.0.3 zonestatus . > rndc.out.ns3.test$n 2>&1
grep "type: mirror" rndc.out.ns3.test$n > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that \"rndc reconfig\" properly handles a mirror -> slave zone type change ($n)"
ret=0
# Sanity check before we start.
$DIG $DIGOPTS @10.53.0.3 +norec verify-reconfig SOA > dig.out.ns3.test$n.1 2>&1 || ret=1
grep "NOERROR" dig.out.ns3.test$n.1 > /dev/null || ret=1
grep "flags:.* aa" dig.out.ns3.test$n.1 > /dev/null && ret=1
grep "flags:.* ad" dig.out.ns3.test$n.1 > /dev/null || ret=1
# Reconfigure the zone so that it is no longer a mirror zone.
2018-07-11 00:25:53 -04:00
# (NOTE: Keep the embedded newline in the sed function list below.)
sed '/^zone "verify-reconfig" {$/,/^};$/ {
s/type mirror;/type slave;/
2018-07-11 00:25:53 -04:00
}' ns3/named.conf > ns3/named.conf.modified
mv ns3/named.conf.modified ns3/named.conf
2018-07-11 00:25:53 -04:00
nextpart ns3/named.run > /dev/null
rndc_reconfig ns3 10.53.0.3
# Zones whose type was changed should not be reusable, which means the tested
# zone should have been reloaded from disk.
wait_for_load verify-reconfig ${ORIGINAL_SERIAL} ns3/named.run
# Ensure responses sourced from the reconfigured zone have AA=1 and AD=0.
$DIG $DIGOPTS @10.53.0.3 +norec verify-reconfig SOA > dig.out.ns3.test$n.2 2>&1 || ret=1
grep "NOERROR" dig.out.ns3.test$n.2 > /dev/null || ret=1
grep "flags:.* aa" dig.out.ns3.test$n.2 > /dev/null || ret=1
grep "flags:.* ad" dig.out.ns3.test$n.2 > /dev/null && ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that \"rndc reconfig\" properly handles a slave -> mirror zone type change ($n)"
ret=0
# Put an incorrectly signed version of the zone in the zone file used by ns3.
nextpart ns3/named.run > /dev/null
cat ns2/verify-reconfig.db.bad.signed > ns3/verify-reconfig.db.mirror
# Reconfigure the zone so that it is a mirror zone again.
2018-07-11 00:25:53 -04:00
# (NOTE: Keep the embedded newline in the sed function list below.)
sed '/^zone "verify-reconfig" {$/,/^};$/ {
s/type slave;/type mirror;/
2018-07-11 00:25:53 -04:00
}' ns3/named.conf > ns3/named.conf.modified
mv ns3/named.conf.modified ns3/named.conf
rndc_reconfig ns3 10.53.0.3
# The reconfigured zone should fail verification.
wait_for_load verify-reconfig ${UPDATED_SERIAL_BAD} ns3/named.run
$DIG $DIGOPTS @10.53.0.3 +norec verify-reconfig SOA > dig.out.ns3.test$n 2>&1 || ret=1
grep "${UPDATED_SERIAL_BAD}.*; serial" dig.out.ns3.test$n > /dev/null && ret=1
nextpart ns3/named.run | grep "No correct RSASHA256 signature for verify-reconfig SOA" > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that a mirror zone can be added using rndc ($n)"
ret=0
# Sanity check: the zone should not exist in the root zone.
$DIG $DIGOPTS @10.53.0.3 +norec verify-addzone SOA > dig.out.ns3.test$n.1 2>&1 || ret=1
grep "NXDOMAIN" dig.out.ns3.test$n.1 > /dev/null || ret=1
grep "flags:.* aa" dig.out.ns3.test$n.1 > /dev/null && ret=1
grep "flags:.* ad" dig.out.ns3.test$n.1 > /dev/null || ret=1
# Mirror a zone which does not exist in the root zone.
nextpart ns3/named.run > /dev/null
$RNDCCMD 10.53.0.3 addzone verify-addzone '{ type mirror; masters { 10.53.0.2; }; };' > rndc.out.ns3.test$n 2>&1 || ret=1
wait_for_transfer verify-addzone
# Check whether the mirror zone was added and whether it behaves as expected.
$DIG $DIGOPTS @10.53.0.3 +norec verify-addzone SOA > dig.out.ns3.test$n.2 2>&1 || ret=1
grep "NOERROR" dig.out.ns3.test$n.2 > /dev/null || ret=1
grep "flags:.* aa" dig.out.ns3.test$n.2 > /dev/null && ret=1
grep "flags:.* ad" dig.out.ns3.test$n.2 > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo_i "checking that a mirror zone can be deleted using rndc ($n)"
ret=0
# Remove the mirror zone added in the previous test.
$RNDCCMD 10.53.0.3 delzone verify-addzone > rndc.out.ns3.test$n 2>&1 || ret=1
# Check whether the mirror zone was removed.
$DIG $DIGOPTS @10.53.0.3 +norec verify-addzone SOA > dig.out.ns3.test$n 2>&1 || ret=1
grep "NXDOMAIN" dig.out.ns3.test$n > /dev/null || ret=1
grep "flags:.* aa" dig.out.ns3.test$n > /dev/null && ret=1
grep "flags:.* ad" dig.out.ns3.test$n > /dev/null || ret=1
if [ $ret != 0 ]; then echo_i "failed"; fi
status=`expr $status + $ret`
echo_i "exit status: $status"
[ $status -eq 0 ] || exit 1