Fall back to normal recursion when mirror zone data is unavailable
If transferring or loading a mirror zone fails, resolution should still succeed by means of falling back to regular recursive queries. Currently, though, if a slave zone is present in the zone table and not loaded, a SERVFAIL response is generated. Thus, mirror zones need special handling in this regard. Add a new dns_zt_find() flag, DNS_ZTFIND_MIRROR, and set it every time a domain name is looked up rather than a zone itself. Handle that flag in dns_zt_find() in such a way that a mirror zone which is expired or not yet loaded is ignored when looking up domain names, but still possible to find when the caller wants to know whether the zone is configured. This causes a fallback to recursion when mirror zone data is unavailable without making unloaded mirror zones invisible to code checking a zone's existence.
This commit is contained in:
@@ -16,11 +16,11 @@ DIGOPTS="-p ${PORT} +dnssec +time=1 +tries=1 +multi"
|
||||
RNDCCMD="$RNDC -c $SYSTEMTESTTOP/common/rndc.conf -p ${CONTROLPORT} -s"
|
||||
|
||||
# Wait until the transfer of the given zone to ns3 either completes successfully
|
||||
# or is aborted by a verification failure.
|
||||
# or is aborted by a verification failure or a REFUSED response from the master.
|
||||
wait_for_transfer() {
|
||||
zone=$1
|
||||
for i in 1 2 3 4 5 6 7 8 9 10; do
|
||||
nextpartpeek ns3/named.run | egrep "'$zone/IN'.*Transfer status: (success|verify failure)" > /dev/null && return
|
||||
nextpartpeek ns3/named.run | egrep "'$zone/IN'.*Transfer status: (success|verify failure|REFUSED)" > /dev/null && return
|
||||
sleep 1
|
||||
done
|
||||
echo_i "exceeded time limit waiting for proof of '$zone' being transferred to appear in ns3/named.run"
|
||||
@@ -271,5 +271,63 @@ grep "flags:.* ad" dig.out.ns3.test$n > /dev/null || ret=1
|
||||
if [ $ret != 0 ]; then echo_i "failed"; fi
|
||||
status=`expr $status + $ret`
|
||||
|
||||
n=`expr $n + 1`
|
||||
echo_i "checking that resolution succeeds with unavailable mirror zone data ($n)"
|
||||
ret=0
|
||||
wait_for_transfer initially-unavailable
|
||||
# Query for a record in a zone that is set up to be mirrored, but
|
||||
# untransferrable from the configured master. Resolution should still succeed.
|
||||
$DIG $DIGOPTS @10.53.0.3 foo.initially-unavailable. A > dig.out.ns3.test$n.1 2>&1 || ret=1
|
||||
# Check response code and flags in the answer.
|
||||
grep "NOERROR" dig.out.ns3.test$n.1 > /dev/null || ret=1
|
||||
grep "flags:.* ad" dig.out.ns3.test$n.1 > /dev/null || ret=1
|
||||
# Sanity check: the authoritative server should have been queried.
|
||||
nextpart ns2/named.run | grep "query 'foo.initially-unavailable/A/IN'" > /dev/null || ret=1
|
||||
# Reconfigure ns2 so that the zone can be mirrored on ns3.
|
||||
sed "s/10.53.0.254/10.53.0.3/;" ns2/named.conf > ns2/named.conf.modified
|
||||
mv ns2/named.conf.modified ns2/named.conf
|
||||
$RNDCCMD 10.53.0.2 reconfig > /dev/null 2>&1
|
||||
# Flush the cache on ns3 and retransfer the mirror zone.
|
||||
$RNDCCMD 10.53.0.3 flush > /dev/null 2>&1
|
||||
nextpart ns3/named.run > /dev/null
|
||||
$RNDCCMD 10.53.0.3 retransfer initially-unavailable > /dev/null 2>&1
|
||||
wait_for_transfer initially-unavailable
|
||||
# Query for the same record again. Resolution should still succeed.
|
||||
$DIG $DIGOPTS @10.53.0.3 foo.initially-unavailable. A > dig.out.ns3.test$n.2 2>&1 || ret=1
|
||||
# Check response code and flags in the answer.
|
||||
grep "NOERROR" dig.out.ns3.test$n.2 > /dev/null || ret=1
|
||||
grep "flags:.* ad" dig.out.ns3.test$n.2 > /dev/null || ret=1
|
||||
# Ensure the authoritative server was not queried.
|
||||
nextpart ns2/named.run | grep "query 'foo.initially-unavailable/A/IN'" > /dev/null && ret=1
|
||||
if [ $ret != 0 ]; then echo_i "failed"; fi
|
||||
status=`expr $status + $ret`
|
||||
|
||||
n=`expr $n + 1`
|
||||
echo_i "checking that resolution succeeds with expired mirror zone data ($n)"
|
||||
ret=0
|
||||
# Reconfigure ns2 so that the zone from the previous test can no longer be
|
||||
# mirrored on ns3.
|
||||
sed "s/10.53.0.3/10.53.0.254/;" ns2/named.conf > ns2/named.conf.modified
|
||||
mv ns2/named.conf.modified ns2/named.conf
|
||||
$RNDCCMD 10.53.0.2 reconfig > /dev/null 2>&1
|
||||
# Stop ns3, update the timestamp of the zone file to one far in the past, then
|
||||
# restart ns3.
|
||||
$PERL $SYSTEMTESTTOP/stop.pl --use-rndc --port ${CONTROLPORT} . ns3
|
||||
touch -t 200001010000 ns3/initially-unavailable.db.mirror
|
||||
nextpart ns3/named.run > /dev/null
|
||||
$PERL $SYSTEMTESTTOP/start.pl --noclean --restart --port ${PORT} . ns3
|
||||
# Ensure named attempts to retransfer the zone due to its expiry.
|
||||
wait_for_transfer initially-unavailable
|
||||
nextpart ns3/named.run | grep "initially-unavailable.*expired" > /dev/null || ret=1
|
||||
# Query for a record in the expired zone. Resolution should still succeed.
|
||||
$DIG $DIGOPTS @10.53.0.3 foo.initially-unavailable. A > dig.out.ns3.test$n 2>&1 || ret=1
|
||||
# Check response code and flags in the answer.
|
||||
grep "NOERROR" dig.out.ns3.test$n > /dev/null || ret=1
|
||||
grep "flags:.* ad" dig.out.ns3.test$n > /dev/null || ret=1
|
||||
# Sanity check: the authoritative server should have been queried.
|
||||
nextpart ns2/named.run | grep "query 'foo.initially-unavailable/A/IN'" > /dev/null || ret=1
|
||||
if [ $ret != 0 ]; then echo_i "failed"; fi
|
||||
status=`expr $status + $ret`
|
||||
|
||||
echo_i "exit status: $status"
|
||||
[ $status -eq 0 ] || exit 1
|
||||
|
||||
Reference in New Issue
Block a user