svn commit: r276543 - in vendor/unbound/1.5.0: . iterator
Dag-Erling Smørgrav
des at FreeBSD.org
Fri Jan 2 17:33:31 UTC 2015
Author: des
Date: Fri Jan 2 17:33:30 2015
New Revision: 276543
URL: https://svnweb.freebsd.org/changeset/base/276543
Log:
tag unbound 1.5.0
Added:
vendor/unbound/1.5.0/
- copied from r276541, vendor/unbound/dist/
Replaced:
vendor/unbound/1.5.0/iterator/iterator.c
- copied unchanged from r276542, vendor/unbound/dist/iterator/iterator.c
vendor/unbound/1.5.0/iterator/iterator.h
- copied unchanged from r276542, vendor/unbound/dist/iterator/iterator.h
Copied: vendor/unbound/1.5.0/iterator/iterator.c (from r276542, vendor/unbound/dist/iterator/iterator.c)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ vendor/unbound/1.5.0/iterator/iterator.c Fri Jan 2 17:33:30 2015 (r276543, copy of r276542, vendor/unbound/dist/iterator/iterator.c)
@@ -0,0 +1,2996 @@
+/*
+ * iterator/iterator.c - iterative resolver DNS query response module
+ *
+ * Copyright (c) 2007, NLnet Labs. All rights reserved.
+ *
+ * This software is open source.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of the NLNET LABS nor the names of its contributors may
+ * be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * \file
+ *
+ * This file contains a module that performs recusive iterative DNS query
+ * processing.
+ */
+
+#include "config.h"
+#include "iterator/iterator.h"
+#include "iterator/iter_utils.h"
+#include "iterator/iter_hints.h"
+#include "iterator/iter_fwd.h"
+#include "iterator/iter_donotq.h"
+#include "iterator/iter_delegpt.h"
+#include "iterator/iter_resptype.h"
+#include "iterator/iter_scrub.h"
+#include "iterator/iter_priv.h"
+#include "validator/val_neg.h"
+#include "services/cache/dns.h"
+#include "services/cache/infra.h"
+#include "util/module.h"
+#include "util/netevent.h"
+#include "util/net_help.h"
+#include "util/regional.h"
+#include "util/data/dname.h"
+#include "util/data/msgencode.h"
+#include "util/fptr_wlist.h"
+#include "util/config_file.h"
+#include "ldns/rrdef.h"
+#include "ldns/wire2str.h"
+#include "ldns/parseutil.h"
+#include "ldns/sbuffer.h"
+
+int
+iter_init(struct module_env* env, int id)
+{
+ struct iter_env* iter_env = (struct iter_env*)calloc(1,
+ sizeof(struct iter_env));
+ if(!iter_env) {
+ log_err("malloc failure");
+ return 0;
+ }
+ env->modinfo[id] = (void*)iter_env;
+ if(!iter_apply_cfg(iter_env, env->cfg)) {
+ log_err("iterator: could not apply configuration settings.");
+ return 0;
+ }
+ return 1;
+}
+
+void
+iter_deinit(struct module_env* env, int id)
+{
+ struct iter_env* iter_env;
+ if(!env || !env->modinfo[id])
+ return;
+ iter_env = (struct iter_env*)env->modinfo[id];
+ free(iter_env->target_fetch_policy);
+ priv_delete(iter_env->priv);
+ donotq_delete(iter_env->donotq);
+ free(iter_env);
+ env->modinfo[id] = NULL;
+}
+
+/** new query for iterator */
+static int
+iter_new(struct module_qstate* qstate, int id)
+{
+ struct iter_qstate* iq = (struct iter_qstate*)regional_alloc(
+ qstate->region, sizeof(struct iter_qstate));
+ qstate->minfo[id] = iq;
+ if(!iq)
+ return 0;
+ memset(iq, 0, sizeof(*iq));
+ iq->state = INIT_REQUEST_STATE;
+ iq->final_state = FINISHED_STATE;
+ iq->an_prepend_list = NULL;
+ iq->an_prepend_last = NULL;
+ iq->ns_prepend_list = NULL;
+ iq->ns_prepend_last = NULL;
+ iq->dp = NULL;
+ iq->depth = 0;
+ iq->num_target_queries = 0;
+ iq->num_current_queries = 0;
+ iq->query_restart_count = 0;
+ iq->referral_count = 0;
+ iq->sent_count = 0;
+ iq->wait_priming_stub = 0;
+ iq->refetch_glue = 0;
+ iq->dnssec_expected = 0;
+ iq->dnssec_lame_query = 0;
+ iq->chase_flags = qstate->query_flags;
+ /* Start with the (current) qname. */
+ iq->qchase = qstate->qinfo;
+ outbound_list_init(&iq->outlist);
+ return 1;
+}
+
+/**
+ * Transition to the next state. This can be used to advance a currently
+ * processing event. It cannot be used to reactivate a forEvent.
+ *
+ * @param iq: iterator query state
+ * @param nextstate The state to transition to.
+ * @return true. This is so this can be called as the return value for the
+ * actual process*State() methods. (Transitioning to the next state
+ * implies further processing).
+ */
+static int
+next_state(struct iter_qstate* iq, enum iter_state nextstate)
+{
+ /* If transitioning to a "response" state, make sure that there is a
+ * response */
+ if(iter_state_is_responsestate(nextstate)) {
+ if(iq->response == NULL) {
+ log_err("transitioning to response state sans "
+ "response.");
+ }
+ }
+ iq->state = nextstate;
+ return 1;
+}
+
+/**
+ * Transition an event to its final state. Final states always either return
+ * a result up the module chain, or reactivate a dependent event. Which
+ * final state to transtion to is set in the module state for the event when
+ * it was created, and depends on the original purpose of the event.
+ *
+ * The response is stored in the qstate->buf buffer.
+ *
+ * @param iq: iterator query state
+ * @return false. This is so this method can be used as the return value for
+ * the processState methods. (Transitioning to the final state
+ */
+static int
+final_state(struct iter_qstate* iq)
+{
+ return next_state(iq, iq->final_state);
+}
+
+/**
+ * Callback routine to handle errors in parent query states
+ * @param qstate: query state that failed.
+ * @param id: module id.
+ * @param super: super state.
+ */
+static void
+error_supers(struct module_qstate* qstate, int id, struct module_qstate* super)
+{
+ struct iter_qstate* super_iq = (struct iter_qstate*)super->minfo[id];
+
+ if(qstate->qinfo.qtype == LDNS_RR_TYPE_A ||
+ qstate->qinfo.qtype == LDNS_RR_TYPE_AAAA) {
+ /* mark address as failed. */
+ struct delegpt_ns* dpns = NULL;
+ if(super_iq->dp)
+ dpns = delegpt_find_ns(super_iq->dp,
+ qstate->qinfo.qname, qstate->qinfo.qname_len);
+ if(!dpns) {
+ /* not interested */
+ verbose(VERB_ALGO, "subq error, but not interested");
+ log_query_info(VERB_ALGO, "superq", &super->qinfo);
+ if(super_iq->dp)
+ delegpt_log(VERB_ALGO, super_iq->dp);
+ log_assert(0);
+ return;
+ } else {
+ /* see if the failure did get (parent-lame) info */
+ if(!cache_fill_missing(super->env,
+ super_iq->qchase.qclass, super->region,
+ super_iq->dp))
+ log_err("out of memory adding missing");
+ }
+ dpns->resolved = 1; /* mark as failed */
+ super_iq->num_target_queries--;
+ }
+ if(qstate->qinfo.qtype == LDNS_RR_TYPE_NS) {
+ /* prime failed to get delegation */
+ super_iq->dp = NULL;
+ }
+ /* evaluate targets again */
+ super_iq->state = QUERYTARGETS_STATE;
+ /* super becomes runnable, and will process this change */
+}
+
+/**
+ * Return an error to the client
+ * @param qstate: our query state
+ * @param id: module id
+ * @param rcode: error code (DNS errcode).
+ * @return: 0 for use by caller, to make notation easy, like:
+ * return error_response(..).
+ */
+static int
+error_response(struct module_qstate* qstate, int id, int rcode)
+{
+ verbose(VERB_QUERY, "return error response %s",
+ sldns_lookup_by_id(sldns_rcodes, rcode)?
+ sldns_lookup_by_id(sldns_rcodes, rcode)->name:"??");
+ qstate->return_rcode = rcode;
+ qstate->return_msg = NULL;
+ qstate->ext_state[id] = module_finished;
+ return 0;
+}
+
+/**
+ * Return an error to the client and cache the error code in the
+ * message cache (so per qname, qtype, qclass).
+ * @param qstate: our query state
+ * @param id: module id
+ * @param rcode: error code (DNS errcode).
+ * @return: 0 for use by caller, to make notation easy, like:
+ * return error_response(..).
+ */
+static int
+error_response_cache(struct module_qstate* qstate, int id, int rcode)
+{
+ /* store in cache */
+ struct reply_info err;
+ if(qstate->prefetch_leeway > NORR_TTL) {
+ verbose(VERB_ALGO, "error response for prefetch in cache");
+ /* attempt to adjust the cache entry prefetch */
+ if(dns_cache_prefetch_adjust(qstate->env, &qstate->qinfo,
+ NORR_TTL))
+ return error_response(qstate, id, rcode);
+ /* if that fails (not in cache), fall through to store err */
+ }
+ memset(&err, 0, sizeof(err));
+ err.flags = (uint16_t)(BIT_QR | BIT_RA);
+ FLAGS_SET_RCODE(err.flags, rcode);
+ err.qdcount = 1;
+ err.ttl = NORR_TTL;
+ err.prefetch_ttl = PREFETCH_TTL_CALC(err.ttl);
+ /* do not waste time trying to validate this servfail */
+ err.security = sec_status_indeterminate;
+ verbose(VERB_ALGO, "store error response in message cache");
+ iter_dns_store(qstate->env, &qstate->qinfo, &err, 0, 0, 0, NULL);
+ return error_response(qstate, id, rcode);
+}
+
+/** check if prepend item is duplicate item */
+static int
+prepend_is_duplicate(struct ub_packed_rrset_key** sets, size_t to,
+ struct ub_packed_rrset_key* dup)
+{
+ size_t i;
+ for(i=0; i<to; i++) {
+ if(sets[i]->rk.type == dup->rk.type &&
+ sets[i]->rk.rrset_class == dup->rk.rrset_class &&
+ sets[i]->rk.dname_len == dup->rk.dname_len &&
+ query_dname_compare(sets[i]->rk.dname, dup->rk.dname)
+ == 0)
+ return 1;
+ }
+ return 0;
+}
+
+/** prepend the prepend list in the answer and authority section of dns_msg */
+static int
+iter_prepend(struct iter_qstate* iq, struct dns_msg* msg,
+ struct regional* region)
+{
+ struct iter_prep_list* p;
+ struct ub_packed_rrset_key** sets;
+ size_t num_an = 0, num_ns = 0;;
+ for(p = iq->an_prepend_list; p; p = p->next)
+ num_an++;
+ for(p = iq->ns_prepend_list; p; p = p->next)
+ num_ns++;
+ if(num_an + num_ns == 0)
+ return 1;
+ verbose(VERB_ALGO, "prepending %d rrsets", (int)num_an + (int)num_ns);
+ sets = regional_alloc(region, (num_an+num_ns+msg->rep->rrset_count) *
+ sizeof(struct ub_packed_rrset_key*));
+ if(!sets)
+ return 0;
+ /* ANSWER section */
+ num_an = 0;
+ for(p = iq->an_prepend_list; p; p = p->next) {
+ sets[num_an++] = p->rrset;
+ }
+ memcpy(sets+num_an, msg->rep->rrsets, msg->rep->an_numrrsets *
+ sizeof(struct ub_packed_rrset_key*));
+ /* AUTH section */
+ num_ns = 0;
+ for(p = iq->ns_prepend_list; p; p = p->next) {
+ if(prepend_is_duplicate(sets+msg->rep->an_numrrsets+num_an,
+ num_ns, p->rrset) || prepend_is_duplicate(
+ msg->rep->rrsets+msg->rep->an_numrrsets,
+ msg->rep->ns_numrrsets, p->rrset))
+ continue;
+ sets[msg->rep->an_numrrsets + num_an + num_ns++] = p->rrset;
+ }
+ memcpy(sets + num_an + msg->rep->an_numrrsets + num_ns,
+ msg->rep->rrsets + msg->rep->an_numrrsets,
+ (msg->rep->ns_numrrsets + msg->rep->ar_numrrsets) *
+ sizeof(struct ub_packed_rrset_key*));
+
+ /* NXDOMAIN rcode can stay if we prepended DNAME/CNAMEs, because
+ * this is what recursors should give. */
+ msg->rep->rrset_count += num_an + num_ns;
+ msg->rep->an_numrrsets += num_an;
+ msg->rep->ns_numrrsets += num_ns;
+ msg->rep->rrsets = sets;
+ return 1;
+}
+
+/**
+ * Add rrset to ANSWER prepend list
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param rrset: rrset to add.
+ * @return false on failure (malloc).
+ */
+static int
+iter_add_prepend_answer(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct ub_packed_rrset_key* rrset)
+{
+ struct iter_prep_list* p = (struct iter_prep_list*)regional_alloc(
+ qstate->region, sizeof(struct iter_prep_list));
+ if(!p)
+ return 0;
+ p->rrset = rrset;
+ p->next = NULL;
+ /* add at end */
+ if(iq->an_prepend_last)
+ iq->an_prepend_last->next = p;
+ else iq->an_prepend_list = p;
+ iq->an_prepend_last = p;
+ return 1;
+}
+
+/**
+ * Add rrset to AUTHORITY prepend list
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param rrset: rrset to add.
+ * @return false on failure (malloc).
+ */
+static int
+iter_add_prepend_auth(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct ub_packed_rrset_key* rrset)
+{
+ struct iter_prep_list* p = (struct iter_prep_list*)regional_alloc(
+ qstate->region, sizeof(struct iter_prep_list));
+ if(!p)
+ return 0;
+ p->rrset = rrset;
+ p->next = NULL;
+ /* add at end */
+ if(iq->ns_prepend_last)
+ iq->ns_prepend_last->next = p;
+ else iq->ns_prepend_list = p;
+ iq->ns_prepend_last = p;
+ return 1;
+}
+
+/**
+ * Given a CNAME response (defined as a response containing a CNAME or DNAME
+ * that does not answer the request), process the response, modifying the
+ * state as necessary. This follows the CNAME/DNAME chain and returns the
+ * final query name.
+ *
+ * sets the new query name, after following the CNAME/DNAME chain.
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param msg: the response.
+ * @param mname: returned target new query name.
+ * @param mname_len: length of mname.
+ * @return false on (malloc) error.
+ */
+static int
+handle_cname_response(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct dns_msg* msg, uint8_t** mname, size_t* mname_len)
+{
+ size_t i;
+ /* Start with the (current) qname. */
+ *mname = iq->qchase.qname;
+ *mname_len = iq->qchase.qname_len;
+
+ /* Iterate over the ANSWER rrsets in order, looking for CNAMEs and
+ * DNAMES. */
+ for(i=0; i<msg->rep->an_numrrsets; i++) {
+ struct ub_packed_rrset_key* r = msg->rep->rrsets[i];
+ /* If there is a (relevant) DNAME, add it to the list.
+ * We always expect there to be CNAME that was generated
+ * by this DNAME following, so we don't process the DNAME
+ * directly. */
+ if(ntohs(r->rk.type) == LDNS_RR_TYPE_DNAME &&
+ dname_strict_subdomain_c(*mname, r->rk.dname)) {
+ if(!iter_add_prepend_answer(qstate, iq, r))
+ return 0;
+ continue;
+ }
+
+ if(ntohs(r->rk.type) == LDNS_RR_TYPE_CNAME &&
+ query_dname_compare(*mname, r->rk.dname) == 0) {
+ /* Add this relevant CNAME rrset to the prepend list.*/
+ if(!iter_add_prepend_answer(qstate, iq, r))
+ return 0;
+ get_cname_target(r, mname, mname_len);
+ }
+
+ /* Other rrsets in the section are ignored. */
+ }
+ /* add authority rrsets to authority prepend, for wildcarded CNAMEs */
+ for(i=msg->rep->an_numrrsets; i<msg->rep->an_numrrsets +
+ msg->rep->ns_numrrsets; i++) {
+ struct ub_packed_rrset_key* r = msg->rep->rrsets[i];
+ /* only add NSEC/NSEC3, as they may be needed for validation */
+ if(ntohs(r->rk.type) == LDNS_RR_TYPE_NSEC ||
+ ntohs(r->rk.type) == LDNS_RR_TYPE_NSEC3) {
+ if(!iter_add_prepend_auth(qstate, iq, r))
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/**
+ * Generate a subrequest.
+ * Generate a local request event. Local events are tied to this module, and
+ * have a correponding (first tier) event that is waiting for this event to
+ * resolve to continue.
+ *
+ * @param qname The query name for this request.
+ * @param qnamelen length of qname
+ * @param qtype The query type for this request.
+ * @param qclass The query class for this request.
+ * @param qstate The event that is generating this event.
+ * @param id: module id.
+ * @param iq: The iterator state that is generating this event.
+ * @param initial_state The initial response state (normally this
+ * is QUERY_RESP_STATE, unless it is known that the request won't
+ * need iterative processing
+ * @param finalstate The final state for the response to this request.
+ * @param subq_ret: if newly allocated, the subquerystate, or NULL if it does
+ * not need initialisation.
+ * @param v: if true, validation is done on the subquery.
+ * @return false on error (malloc).
+ */
+static int
+generate_sub_request(uint8_t* qname, size_t qnamelen, uint16_t qtype,
+ uint16_t qclass, struct module_qstate* qstate, int id,
+ struct iter_qstate* iq, enum iter_state initial_state,
+ enum iter_state finalstate, struct module_qstate** subq_ret, int v)
+{
+ struct module_qstate* subq = NULL;
+ struct iter_qstate* subiq = NULL;
+ uint16_t qflags = 0; /* OPCODE QUERY, no flags */
+ struct query_info qinf;
+ int prime = (finalstate == PRIME_RESP_STATE)?1:0;
+ qinf.qname = qname;
+ qinf.qname_len = qnamelen;
+ qinf.qtype = qtype;
+ qinf.qclass = qclass;
+
+ /* RD should be set only when sending the query back through the INIT
+ * state. */
+ if(initial_state == INIT_REQUEST_STATE)
+ qflags |= BIT_RD;
+ /* We set the CD flag so we can send this through the "head" of
+ * the resolution chain, which might have a validator. We are
+ * uninterested in validating things not on the direct resolution
+ * path. */
+ if(!v)
+ qflags |= BIT_CD;
+
+ /* attach subquery, lookup existing or make a new one */
+ fptr_ok(fptr_whitelist_modenv_attach_sub(qstate->env->attach_sub));
+ if(!(*qstate->env->attach_sub)(qstate, &qinf, qflags, prime, &subq)) {
+ return 0;
+ }
+ *subq_ret = subq;
+ if(subq) {
+ /* initialise the new subquery */
+ subq->curmod = id;
+ subq->ext_state[id] = module_state_initial;
+ subq->minfo[id] = regional_alloc(subq->region,
+ sizeof(struct iter_qstate));
+ if(!subq->minfo[id]) {
+ log_err("init subq: out of memory");
+ fptr_ok(fptr_whitelist_modenv_kill_sub(
+ qstate->env->kill_sub));
+ (*qstate->env->kill_sub)(subq);
+ return 0;
+ }
+ subiq = (struct iter_qstate*)subq->minfo[id];
+ memset(subiq, 0, sizeof(*subiq));
+ subiq->num_target_queries = 0;
+ subiq->num_current_queries = 0;
+ subiq->depth = iq->depth+1;
+ outbound_list_init(&subiq->outlist);
+ subiq->state = initial_state;
+ subiq->final_state = finalstate;
+ subiq->qchase = subq->qinfo;
+ subiq->chase_flags = subq->query_flags;
+ subiq->refetch_glue = 0;
+ }
+ return 1;
+}
+
+/**
+ * Generate and send a root priming request.
+ * @param qstate: the qtstate that triggered the need to prime.
+ * @param iq: iterator query state.
+ * @param id: module id.
+ * @param qclass: the class to prime.
+ * @return 0 on failure
+ */
+static int
+prime_root(struct module_qstate* qstate, struct iter_qstate* iq, int id,
+ uint16_t qclass)
+{
+ struct delegpt* dp;
+ struct module_qstate* subq;
+ verbose(VERB_DETAIL, "priming . %s NS",
+ sldns_lookup_by_id(sldns_rr_classes, (int)qclass)?
+ sldns_lookup_by_id(sldns_rr_classes, (int)qclass)->name:"??");
+ dp = hints_lookup_root(qstate->env->hints, qclass);
+ if(!dp) {
+ verbose(VERB_ALGO, "Cannot prime due to lack of hints");
+ return 0;
+ }
+ /* Priming requests start at the QUERYTARGETS state, skipping
+ * the normal INIT state logic (which would cause an infloop). */
+ if(!generate_sub_request((uint8_t*)"\000", 1, LDNS_RR_TYPE_NS,
+ qclass, qstate, id, iq, QUERYTARGETS_STATE, PRIME_RESP_STATE,
+ &subq, 0)) {
+ verbose(VERB_ALGO, "could not prime root");
+ return 0;
+ }
+ if(subq) {
+ struct iter_qstate* subiq =
+ (struct iter_qstate*)subq->minfo[id];
+ /* Set the initial delegation point to the hint.
+ * copy dp, it is now part of the root prime query.
+ * dp was part of in the fixed hints structure. */
+ subiq->dp = delegpt_copy(dp, subq->region);
+ if(!subiq->dp) {
+ log_err("out of memory priming root, copydp");
+ fptr_ok(fptr_whitelist_modenv_kill_sub(
+ qstate->env->kill_sub));
+ (*qstate->env->kill_sub)(subq);
+ return 0;
+ }
+ /* there should not be any target queries. */
+ subiq->num_target_queries = 0;
+ subiq->dnssec_expected = iter_indicates_dnssec(
+ qstate->env, subiq->dp, NULL, subq->qinfo.qclass);
+ }
+
+ /* this module stops, our submodule starts, and does the query. */
+ qstate->ext_state[id] = module_wait_subquery;
+ return 1;
+}
+
+/**
+ * Generate and process a stub priming request. This method tests for the
+ * need to prime a stub zone, so it is safe to call for every request.
+ *
+ * @param qstate: the qtstate that triggered the need to prime.
+ * @param iq: iterator query state.
+ * @param id: module id.
+ * @param qname: request name.
+ * @param qclass: request class.
+ * @return true if a priming subrequest was made, false if not. The will only
+ * issue a priming request if it detects an unprimed stub.
+ * Uses value of 2 to signal during stub-prime in root-prime situation
+ * that a noprime-stub is available and resolution can continue.
+ */
+static int
+prime_stub(struct module_qstate* qstate, struct iter_qstate* iq, int id,
+ uint8_t* qname, uint16_t qclass)
+{
+ /* Lookup the stub hint. This will return null if the stub doesn't
+ * need to be re-primed. */
+ struct iter_hints_stub* stub;
+ struct delegpt* stub_dp;
+ struct module_qstate* subq;
+
+ if(!qname) return 0;
+ stub = hints_lookup_stub(qstate->env->hints, qname, qclass, iq->dp);
+ /* The stub (if there is one) does not need priming. */
+ if(!stub)
+ return 0;
+ stub_dp = stub->dp;
+
+ /* is it a noprime stub (always use) */
+ if(stub->noprime) {
+ int r = 0;
+ if(iq->dp == NULL) r = 2;
+ /* copy the dp out of the fixed hints structure, so that
+ * it can be changed when servicing this query */
+ iq->dp = delegpt_copy(stub_dp, qstate->region);
+ if(!iq->dp) {
+ log_err("out of memory priming stub");
+ (void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ return 1; /* return 1 to make module stop, with error */
+ }
+ log_nametypeclass(VERB_DETAIL, "use stub", stub_dp->name,
+ LDNS_RR_TYPE_NS, qclass);
+ return r;
+ }
+
+ /* Otherwise, we need to (re)prime the stub. */
+ log_nametypeclass(VERB_DETAIL, "priming stub", stub_dp->name,
+ LDNS_RR_TYPE_NS, qclass);
+
+ /* Stub priming events start at the QUERYTARGETS state to avoid the
+ * redundant INIT state processing. */
+ if(!generate_sub_request(stub_dp->name, stub_dp->namelen,
+ LDNS_RR_TYPE_NS, qclass, qstate, id, iq,
+ QUERYTARGETS_STATE, PRIME_RESP_STATE, &subq, 0)) {
+ verbose(VERB_ALGO, "could not prime stub");
+ (void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ return 1; /* return 1 to make module stop, with error */
+ }
+ if(subq) {
+ struct iter_qstate* subiq =
+ (struct iter_qstate*)subq->minfo[id];
+
+ /* Set the initial delegation point to the hint. */
+ /* make copy to avoid use of stub dp by different qs/threads */
+ subiq->dp = delegpt_copy(stub_dp, subq->region);
+ if(!subiq->dp) {
+ log_err("out of memory priming stub, copydp");
+ fptr_ok(fptr_whitelist_modenv_kill_sub(
+ qstate->env->kill_sub));
+ (*qstate->env->kill_sub)(subq);
+ (void)error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ return 1; /* return 1 to make module stop, with error */
+ }
+ /* there should not be any target queries -- although there
+ * wouldn't be anyway, since stub hints never have
+ * missing targets. */
+ subiq->num_target_queries = 0;
+ subiq->wait_priming_stub = 1;
+ subiq->dnssec_expected = iter_indicates_dnssec(
+ qstate->env, subiq->dp, NULL, subq->qinfo.qclass);
+ }
+
+ /* this module stops, our submodule starts, and does the query. */
+ qstate->ext_state[id] = module_wait_subquery;
+ return 1;
+}
+
+/**
+ * Generate A and AAAA checks for glue that is in-zone for the referral
+ * we just got to obtain authoritative information on the adresses.
+ *
+ * @param qstate: the qtstate that triggered the need to prime.
+ * @param iq: iterator query state.
+ * @param id: module id.
+ */
+static void
+generate_a_aaaa_check(struct module_qstate* qstate, struct iter_qstate* iq,
+ int id)
+{
+ struct iter_env* ie = (struct iter_env*)qstate->env->modinfo[id];
+ struct module_qstate* subq;
+ size_t i;
+ struct reply_info* rep = iq->response->rep;
+ struct ub_packed_rrset_key* s;
+ log_assert(iq->dp);
+
+ if(iq->depth == ie->max_dependency_depth)
+ return;
+ /* walk through additional, and check if in-zone,
+ * only relevant A, AAAA are left after scrub anyway */
+ for(i=rep->an_numrrsets+rep->ns_numrrsets; i<rep->rrset_count; i++) {
+ s = rep->rrsets[i];
+ /* check *ALL* addresses that are transmitted in additional*/
+ /* is it an address ? */
+ if( !(ntohs(s->rk.type)==LDNS_RR_TYPE_A ||
+ ntohs(s->rk.type)==LDNS_RR_TYPE_AAAA)) {
+ continue;
+ }
+ /* is this query the same as the A/AAAA check for it */
+ if(qstate->qinfo.qtype == ntohs(s->rk.type) &&
+ qstate->qinfo.qclass == ntohs(s->rk.rrset_class) &&
+ query_dname_compare(qstate->qinfo.qname,
+ s->rk.dname)==0 &&
+ (qstate->query_flags&BIT_RD) &&
+ !(qstate->query_flags&BIT_CD))
+ continue;
+
+ /* generate subrequest for it */
+ log_nametypeclass(VERB_ALGO, "schedule addr fetch",
+ s->rk.dname, ntohs(s->rk.type),
+ ntohs(s->rk.rrset_class));
+ if(!generate_sub_request(s->rk.dname, s->rk.dname_len,
+ ntohs(s->rk.type), ntohs(s->rk.rrset_class),
+ qstate, id, iq,
+ INIT_REQUEST_STATE, FINISHED_STATE, &subq, 1)) {
+ verbose(VERB_ALGO, "could not generate addr check");
+ return;
+ }
+ /* ignore subq - not need for more init */
+ }
+}
+
+/**
+ * Generate a NS check request to obtain authoritative information
+ * on an NS rrset.
+ *
+ * @param qstate: the qtstate that triggered the need to prime.
+ * @param iq: iterator query state.
+ * @param id: module id.
+ */
+static void
+generate_ns_check(struct module_qstate* qstate, struct iter_qstate* iq, int id)
+{
+ struct iter_env* ie = (struct iter_env*)qstate->env->modinfo[id];
+ struct module_qstate* subq;
+ log_assert(iq->dp);
+
+ if(iq->depth == ie->max_dependency_depth)
+ return;
+ /* is this query the same as the nscheck? */
+ if(qstate->qinfo.qtype == LDNS_RR_TYPE_NS &&
+ query_dname_compare(iq->dp->name, qstate->qinfo.qname)==0 &&
+ (qstate->query_flags&BIT_RD) && !(qstate->query_flags&BIT_CD)){
+ /* spawn off A, AAAA queries for in-zone glue to check */
+ generate_a_aaaa_check(qstate, iq, id);
+ return;
+ }
+
+ log_nametypeclass(VERB_ALGO, "schedule ns fetch",
+ iq->dp->name, LDNS_RR_TYPE_NS, iq->qchase.qclass);
+ if(!generate_sub_request(iq->dp->name, iq->dp->namelen,
+ LDNS_RR_TYPE_NS, iq->qchase.qclass, qstate, id, iq,
+ INIT_REQUEST_STATE, FINISHED_STATE, &subq, 1)) {
+ verbose(VERB_ALGO, "could not generate ns check");
+ return;
+ }
+ if(subq) {
+ struct iter_qstate* subiq =
+ (struct iter_qstate*)subq->minfo[id];
+
+ /* make copy to avoid use of stub dp by different qs/threads */
+ /* refetch glue to start higher up the tree */
+ subiq->refetch_glue = 1;
+ subiq->dp = delegpt_copy(iq->dp, subq->region);
+ if(!subiq->dp) {
+ log_err("out of memory generating ns check, copydp");
+ fptr_ok(fptr_whitelist_modenv_kill_sub(
+ qstate->env->kill_sub));
+ (*qstate->env->kill_sub)(subq);
+ return;
+ }
+ }
+}
+
+/**
+ * Generate a DNSKEY prefetch query to get the DNSKEY for the DS record we
+ * just got in a referral (where we have dnssec_expected, thus have trust
+ * anchors above it). Note that right after calling this routine the
+ * iterator detached subqueries (because of following the referral), and thus
+ * the DNSKEY query becomes detached, its return stored in the cache for
+ * later lookup by the validator. This cache lookup by the validator avoids
+ * the roundtrip incurred by the DNSKEY query. The DNSKEY query is now
+ * performed at about the same time the original query is sent to the domain,
+ * thus the two answers are likely to be returned at about the same time,
+ * saving a roundtrip from the validated lookup.
+ *
+ * @param qstate: the qtstate that triggered the need to prime.
+ * @param iq: iterator query state.
+ * @param id: module id.
+ */
+static void
+generate_dnskey_prefetch(struct module_qstate* qstate,
+ struct iter_qstate* iq, int id)
+{
+ struct module_qstate* subq;
+ log_assert(iq->dp);
+
+ /* is this query the same as the prefetch? */
+ if(qstate->qinfo.qtype == LDNS_RR_TYPE_DNSKEY &&
+ query_dname_compare(iq->dp->name, qstate->qinfo.qname)==0 &&
+ (qstate->query_flags&BIT_RD) && !(qstate->query_flags&BIT_CD)){
+ return;
+ }
+
+ /* if the DNSKEY is in the cache this lookup will stop quickly */
+ log_nametypeclass(VERB_ALGO, "schedule dnskey prefetch",
+ iq->dp->name, LDNS_RR_TYPE_DNSKEY, iq->qchase.qclass);
+ if(!generate_sub_request(iq->dp->name, iq->dp->namelen,
+ LDNS_RR_TYPE_DNSKEY, iq->qchase.qclass, qstate, id, iq,
+ INIT_REQUEST_STATE, FINISHED_STATE, &subq, 0)) {
+ /* we'll be slower, but it'll work */
+ verbose(VERB_ALGO, "could not generate dnskey prefetch");
+ return;
+ }
+ if(subq) {
+ struct iter_qstate* subiq =
+ (struct iter_qstate*)subq->minfo[id];
+ /* this qstate has the right delegation for the dnskey lookup*/
+ /* make copy to avoid use of stub dp by different qs/threads */
+ subiq->dp = delegpt_copy(iq->dp, subq->region);
+ /* if !subiq->dp, it'll start from the cache, no problem */
+ }
+}
+
+/**
+ * See if the query needs forwarding.
+ *
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @return true if the request is forwarded, false if not.
+ * If returns true but, iq->dp is NULL then a malloc failure occurred.
+ */
+static int
+forward_request(struct module_qstate* qstate, struct iter_qstate* iq)
+{
+ struct delegpt* dp;
+ uint8_t* delname = iq->qchase.qname;
+ size_t delnamelen = iq->qchase.qname_len;
+ if(iq->refetch_glue) {
+ delname = iq->dp->name;
+ delnamelen = iq->dp->namelen;
+ }
+ /* strip one label off of DS query to lookup higher for it */
+ if( (iq->qchase.qtype == LDNS_RR_TYPE_DS || iq->refetch_glue)
+ && !dname_is_root(iq->qchase.qname))
+ dname_remove_label(&delname, &delnamelen);
+ dp = forwards_lookup(qstate->env->fwds, delname, iq->qchase.qclass);
+ if(!dp)
+ return 0;
+ /* send recursion desired to forward addr */
+ iq->chase_flags |= BIT_RD;
+ iq->dp = delegpt_copy(dp, qstate->region);
+ /* iq->dp checked by caller */
+ verbose(VERB_ALGO, "forwarding request");
+ return 1;
+}
+
+/**
+ * Process the initial part of the request handling. This state roughly
+ * corresponds to resolver algorithms steps 1 (find answer in cache) and 2
+ * (find the best servers to ask).
+ *
+ * Note that all requests start here, and query restarts revisit this state.
+ *
+ * This state either generates: 1) a response, from cache or error, 2) a
+ * priming event, or 3) forwards the request to the next state (init2,
+ * generally).
+ *
+ * @param qstate: query state.
+ * @param iq: iterator query state.
+ * @param ie: iterator shared global environment.
+ * @param id: module id.
+ * @return true if the event needs more request processing immediately,
+ * false if not.
+ */
+static int
+processInitRequest(struct module_qstate* qstate, struct iter_qstate* iq,
+ struct iter_env* ie, int id)
+{
+ uint8_t* delname;
+ size_t delnamelen;
+ struct dns_msg* msg;
+
+ log_query_info(VERB_DETAIL, "resolving", &qstate->qinfo);
+ /* check effort */
+
+ /* We enforce a maximum number of query restarts. This is primarily a
+ * cheap way to prevent CNAME loops. */
+ if(iq->query_restart_count > MAX_RESTART_COUNT) {
+ verbose(VERB_QUERY, "request has exceeded the maximum number"
+ " of query restarts with %d", iq->query_restart_count);
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+
+ /* We enforce a maximum recursion/dependency depth -- in general,
+ * this is unnecessary for dependency loops (although it will
+ * catch those), but it provides a sensible limit to the amount
+ * of work required to answer a given query. */
+ verbose(VERB_ALGO, "request has dependency depth of %d", iq->depth);
+ if(iq->depth > ie->max_dependency_depth) {
+ verbose(VERB_QUERY, "request has exceeded the maximum "
+ "dependency depth with depth of %d", iq->depth);
+ return error_response(qstate, id, LDNS_RCODE_SERVFAIL);
+ }
+
+ /* If the request is qclass=ANY, setup to generate each class */
+ if(qstate->qinfo.qclass == LDNS_RR_CLASS_ANY) {
+ iq->qchase.qclass = 0;
+ return next_state(iq, COLLECT_CLASS_STATE);
+ }
+
+ /* Resolver Algorithm Step 1 -- Look for the answer in local data. */
+
+ /* This either results in a query restart (CNAME cache response), a
+ * terminating response (ANSWER), or a cache miss (null). */
+
+ if(qstate->blacklist) {
+ /* if cache, or anything else, was blacklisted then
+ * getting older results from cache is a bad idea, no cache */
+ verbose(VERB_ALGO, "cache blacklisted, going to the network");
+ msg = NULL;
+ } else {
+ msg = dns_cache_lookup(qstate->env, iq->qchase.qname,
+ iq->qchase.qname_len, iq->qchase.qtype,
+ iq->qchase.qclass, qstate->region, qstate->env->scratch);
+ if(!msg && qstate->env->neg_cache) {
+ /* lookup in negative cache; may result in
+ * NOERROR/NODATA or NXDOMAIN answers that need validation */
+ msg = val_neg_getmsg(qstate->env->neg_cache, &iq->qchase,
+ qstate->region, qstate->env->rrset_cache,
+ qstate->env->scratch_buffer,
+ *qstate->env->now, 1/*add SOA*/, NULL);
+ }
+ /* item taken from cache does not match our query name, thus
+ * security needs to be re-examined later */
+ if(msg && query_dname_compare(qstate->qinfo.qname,
+ iq->qchase.qname) != 0)
+ msg->rep->security = sec_status_unchecked;
+ }
+ if(msg) {
+ /* handle positive cache response */
+ enum response_type type = response_type_from_cache(msg,
+ &iq->qchase);
+ if(verbosity >= VERB_ALGO) {
+ log_dns_msg("msg from cache lookup", &msg->qinfo,
+ msg->rep);
+ verbose(VERB_ALGO, "msg ttl is %d, prefetch ttl %d",
+ (int)msg->rep->ttl,
+ (int)msg->rep->prefetch_ttl);
+ }
+
+ if(type == RESPONSE_TYPE_CNAME) {
+ uint8_t* sname = 0;
+ size_t slen = 0;
+ verbose(VERB_ALGO, "returning CNAME response from "
+ "cache");
+ if(!handle_cname_response(qstate, iq, msg,
+ &sname, &slen))
+ return error_response(qstate, id,
+ LDNS_RCODE_SERVFAIL);
+ iq->qchase.qname = sname;
+ iq->qchase.qname_len = slen;
+ /* This *is* a query restart, even if it is a cheap
+ * one. */
+ iq->dp = NULL;
+ iq->refetch_glue = 0;
+ iq->query_restart_count++;
+ iq->sent_count = 0;
+ sock_list_insert(&qstate->reply_origin, NULL, 0, qstate->region);
+ return next_state(iq, INIT_REQUEST_STATE);
+ }
+
+ /* if from cache, NULL, else insert 'cache IP' len=0 */
+ if(qstate->reply_origin)
+ sock_list_insert(&qstate->reply_origin, NULL, 0, qstate->region);
+ /* it is an answer, response, to final state */
+ verbose(VERB_ALGO, "returning answer from cache.");
+ iq->response = msg;
+ return final_state(iq);
+ }
+
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-vendor
mailing list