Attachment 'iterate.c'
Download 1 /* Copyright (C) 2014-2017 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
2
3 This program is free software: you can redistribute it and/or modify
4 it under the terms of the GNU General Public License as published by
5 the Free Software Foundation, either version 3 of the License, or
6 (at your option) any later version.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12
13 You should have received a copy of the GNU General Public License
14 along with this program. If not, see <https://www.gnu.org/licenses/>.
15 */
16
17 #include <sys/time.h>
18 #include <assert.h>
19 #include <arpa/inet.h>
20
21 #include <libknot/descriptor.h>
22 #include <libknot/rrtype/rdname.h>
23 #include <libknot/rrtype/rrsig.h>
24
25 #include "lib/layer/iterate.h"
26 #include "lib/resolve.h"
27 #include "lib/rplan.h"
28 #include "lib/defines.h"
29 #include "lib/nsrep.h"
30 #include "lib/module.h"
31 #include "lib/dnssec/ta.h"
32
33 #define VERBOSE_MSG(fmt...) QRVERBOSE(req->current_query, "iter", fmt)
34 #define QVERBOSE_MSG(qry, fmt...) QRVERBOSE(qry, "iter", fmt)
35
36 /* Iterator often walks through packet section, this is an abstraction. */
37 typedef int (*rr_callback_t)(const knot_rrset_t *, unsigned, struct kr_request *);
38
39 /** Return minimized QNAME/QTYPE for current zone cut. */
40 static const knot_dname_t *minimized_qname(struct kr_query *query, uint16_t *qtype)
41 {
42 /* Minimization disabled. */
43 const knot_dname_t *qname = query->sname;
44 if (qname[0] == '\0' || query->flags & (QUERY_NO_MINIMIZE|QUERY_STUB)) {
45 return qname;
46 }
47
48 /* Minimize name to contain current zone cut + 1 label. */
49 int cut_labels = knot_dname_labels(query->zone_cut.name, NULL);
50 int qname_labels = knot_dname_labels(qname, NULL);
51 while(qname[0] && qname_labels > cut_labels + 1) {
52 qname = knot_wire_next_label(qname, NULL);
53 qname_labels -= 1;
54 }
55
56 /* Hide QTYPE if minimized. */
57 if (qname != query->sname) {
58 *qtype = KNOT_RRTYPE_NS;
59 }
60
61 return qname;
62 }
63
64 /** Answer is paired to query. */
65 static bool is_paired_to_query(const knot_pkt_t *answer, struct kr_query *query)
66 {
67 uint16_t qtype = query->stype;
68 const knot_dname_t *qname = minimized_qname(query, &qtype);
69
70 return query->id == knot_wire_get_id(answer->wire) &&
71 knot_wire_get_qdcount(answer->wire) > 0 &&
72 query->sclass == knot_pkt_qclass(answer) &&
73 qtype == knot_pkt_qtype(answer) &&
74 knot_dname_is_equal(qname, knot_pkt_qname(answer));
75 }
76
77 /** Relaxed rule for AA, either AA=1 or SOA matching zone cut is required. */
78 static bool is_authoritative(const knot_pkt_t *answer, struct kr_query *query)
79 {
80 if (knot_wire_get_aa(answer->wire)) {
81 return true;
82 }
83
84 const knot_pktsection_t *ns = knot_pkt_section(answer, KNOT_AUTHORITY);
85 for (unsigned i = 0; i < ns->count; ++i) {
86 const knot_rrset_t *rr = knot_pkt_rr(ns, i);
87 if (rr->type == KNOT_RRTYPE_SOA && knot_dname_in(query->zone_cut.name, rr->owner)) {
88 return true;
89 }
90 }
91
92 #ifndef STRICT_MODE
93 /* Last resort to work around broken auths, if the zone cut is at/parent of the QNAME. */
94 if (knot_dname_is_equal(query->zone_cut.name, knot_pkt_qname(answer))) {
95 return true;
96 }
97 #endif
98 return false;
99 }
100
101 int kr_response_classify(knot_pkt_t *pkt)
102 {
103 const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
104 switch (knot_wire_get_rcode(pkt->wire)) {
105 case KNOT_RCODE_NOERROR:
106 return (an->count == 0) ? PKT_NODATA : PKT_NOERROR;
107 case KNOT_RCODE_NXDOMAIN:
108 return PKT_NXDOMAIN;
109 case KNOT_RCODE_REFUSED:
110 return PKT_REFUSED;
111 default:
112 return PKT_ERROR;
113 }
114 }
115
116 /** @internal Filter ANY or loopback addresses. */
117 static bool is_valid_addr(const uint8_t *addr, size_t len)
118 {
119 if (len == sizeof(struct in_addr)) {
120 /* Filter ANY and 127.0.0.0/8 */
121 uint32_t ip_host = ntohl(*(const uint32_t *)(addr));
122 if (ip_host == 0 || (ip_host & 0xff000000) == 0x7f000000) {
123 return false;
124 }
125 } else if (len == sizeof(struct in6_addr)) {
126 struct in6_addr ip6_mask;
127 memset(&ip6_mask, 0, sizeof(ip6_mask));
128 /* All except last byte are zeroed, last byte defines ANY/::1 */
129 if (memcmp(addr, ip6_mask.s6_addr, sizeof(ip6_mask.s6_addr) - 1) == 0) {
130 return (addr[len - 1] > 1);
131 }
132 }
133 return true;
134 }
135
136 /** @internal Update NS address from record \a rr. Return _FAIL on error. */
137 static int update_nsaddr(const knot_rrset_t *rr, struct kr_query *query)
138 {
139 if (rr->type == KNOT_RRTYPE_A || rr->type == KNOT_RRTYPE_AAAA) {
140 const knot_rdata_t *rdata = rr->rrs.data;
141 const void *addr = knot_rdata_data(rdata);
142 const int addr_len = knot_rdata_rdlen(rdata);
143 char name_str[KNOT_DNAME_MAXLEN];
144 char addr_str[INET6_ADDRSTRLEN];
145 WITH_VERBOSE {
146 const int af = (addr_len == sizeof(struct in_addr)) ?
147 AF_INET : AF_INET6;
148 knot_dname_to_str(name_str, rr->owner, sizeof(name_str));
149 inet_ntop(af, addr, addr_str, sizeof(addr_str));
150 }
151 if (!(query->flags & QUERY_ALLOW_LOCAL) &&
152 !is_valid_addr(addr, addr_len)) {
153 QVERBOSE_MSG(query, "<= ignoring invalid glue for "
154 "'%s': '%s'\n", name_str, addr_str);
155 return KR_STATE_CONSUME; /* Ignore invalid addresses */
156 }
157 int ret = kr_zonecut_add(&query->zone_cut, rr->owner, rdata);
158 if (ret != 0) {
159 return KR_STATE_FAIL;
160 }
161 QVERBOSE_MSG(query, "<= using glue for "
162 "'%s': '%s'\n", name_str, addr_str);
163 }
164 return KR_STATE_CONSUME;
165 }
166
167 /** @internal From \a pkt, fetch glue records for name \a ns, and update the cut etc. */
168 static void fetch_glue(knot_pkt_t *pkt, const knot_dname_t *ns, struct kr_request *req)
169 {
170 for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
171 const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
172 for (unsigned k = 0; k < sec->count; ++k) {
173 const knot_rrset_t *rr = knot_pkt_rr(sec, k);
174 if (!knot_dname_is_equal(ns, rr->owner)) {
175 continue;
176 }
177 if ((rr->type != KNOT_RRTYPE_A) &&
178 (rr->type != KNOT_RRTYPE_AAAA)) {
179 continue;
180 }
181 if ((rr->type == KNOT_RRTYPE_A) &&
182 (req->ctx->options & QUERY_NO_IPV4)) {
183 continue;
184 }
185 if ((rr->type == KNOT_RRTYPE_AAAA) &&
186 (req->ctx->options & QUERY_NO_IPV6)) {
187 continue;
188 }
189 (void) update_nsaddr(rr, req->current_query);
190 }
191 }
192 }
193
194 /** Attempt to find glue for given nameserver name (best effort). */
195 static int has_glue(knot_pkt_t *pkt, const knot_dname_t *ns)
196 {
197 for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
198 const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
199 for (unsigned k = 0; k < sec->count; ++k) {
200 const knot_rrset_t *rr = knot_pkt_rr(sec, k);
201 if (knot_dname_is_equal(ns, rr->owner) &&
202 (rr->type == KNOT_RRTYPE_A || rr->type == KNOT_RRTYPE_AAAA)) {
203 return 1;
204 }
205 }
206 }
207 return 0;
208 }
209
210 /** @internal Update the cut with another NS(+glue) record.
211 * @param current_cut is cut name before this packet.
212 * @return _DONE if cut->name changes, _FAIL on error, and _CONSUME otherwise. */
213 static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr,
214 struct kr_request *req, const knot_dname_t *current_cut)
215 {
216 struct kr_query *qry = req->current_query;
217 struct kr_zonecut *cut = &qry->zone_cut;
218 int state = KR_STATE_CONSUME;
219
220 /* New authority MUST be at/below the authority of the current cut;
221 * also qname must be below new authority;
222 * otherwise it's a possible cache injection attempt. */
223 if (!knot_dname_in(current_cut, rr->owner) ||
224 !knot_dname_in(rr->owner, qry->sname)) {
225 VERBOSE_MSG("<= authority: ns outside bailiwick\n");
226 #ifdef STRICT_MODE
227 return KR_STATE_FAIL;
228 #else
229 /* Workaround: ignore out-of-bailiwick NSs for authoritative answers,
230 * but fail for referrals. This is important to detect lame answers. */
231 if (knot_pkt_section(pkt, KNOT_ANSWER)->count == 0) {
232 state = KR_STATE_FAIL;
233 }
234 return state;
235 #endif
236 }
237
238 /* Update zone cut name */
239 if (!knot_dname_is_equal(rr->owner, cut->name)) {
240 /* Remember parent cut and descend to new (keep keys and TA). */
241 struct kr_zonecut *parent = mm_alloc(&req->pool, sizeof(*parent));
242 if (parent) {
243 memcpy(parent, cut, sizeof(*parent));
244 kr_zonecut_init(cut, rr->owner, &req->pool);
245 cut->key = parent->key;
246 cut->trust_anchor = parent->trust_anchor;
247 cut->parent = parent;
248 } else {
249 kr_zonecut_set(cut, rr->owner);
250 }
251 state = KR_STATE_DONE;
252 }
253
254 /* Fetch glue for each NS */
255 for (unsigned i = 0; i < rr->rrs.rr_count; ++i) {
256 const knot_dname_t *ns_name = knot_ns_name(&rr->rrs, i);
257 int glue_records = has_glue(pkt, ns_name);
258 /* Glue is mandatory for NS below zone */
259 if (!glue_records && knot_dname_in(rr->owner, ns_name)) {
260 VERBOSE_MSG("<= authority: missing mandatory glue, rejecting\n");
261 continue;
262 }
263 kr_zonecut_add(cut, ns_name, NULL);
264 /* Choose when to use glue records. */
265 if (qry->flags & QUERY_PERMISSIVE) {
266 fetch_glue(pkt, ns_name, req);
267 } else if (qry->flags & QUERY_STRICT) {
268 /* Strict mode uses only mandatory glue. */
269 if (knot_dname_in(cut->name, ns_name))
270 fetch_glue(pkt, ns_name, req);
271 } else {
272 /* Normal mode uses in-bailiwick glue. */
273 if (knot_dname_in(current_cut, ns_name))
274 fetch_glue(pkt, ns_name, req);
275 }
276 }
277
278 return state;
279 }
280
281 static int pick_authority(knot_pkt_t *pkt, struct kr_request *req, bool to_wire)
282 {
283 struct kr_query *qry = req->current_query;
284 const knot_pktsection_t *ns = knot_pkt_section(pkt, KNOT_AUTHORITY);
285 uint8_t rank = !(qry->flags & QUERY_DNSSEC_WANT) || (qry->flags & QUERY_CACHED) ?
286 KR_VLDRANK_SECURE : KR_VLDRANK_INITIAL;
287 const knot_dname_t *zonecut_name = qry->zone_cut.name;
288 bool referral = !knot_wire_get_aa(pkt->wire);
289 if (referral) {
290 /* zone cut already updated by process_authority()
291 * use parent zonecut name */
292 zonecut_name = qry->zone_cut.parent ? qry->zone_cut.parent->name : qry->zone_cut.name;
293 to_wire = false;
294 }
295
296 for (unsigned i = 0; i < ns->count; ++i) {
297 const knot_rrset_t *rr = knot_pkt_rr(ns, i);
298 if (!knot_dname_in(zonecut_name, rr->owner)) {
299 continue;
300 }
301 int ret = kr_ranked_rrarray_add(&req->auth_selected, rr,
302 rank, to_wire, qry->uid, &req->pool);
303 if (ret != kr_ok()) {
304 return ret;
305 }
306 }
307
308 return kr_ok();
309 }
310
311 static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
312 {
313 struct kr_query *qry = req->current_query;
314 assert(!(qry->flags & QUERY_STUB));
315
316 int result = KR_STATE_CONSUME;
317 const knot_pktsection_t *ns = knot_pkt_section(pkt, KNOT_AUTHORITY);
318
319 #ifdef STRICT_MODE
320 /* AA, terminate resolution chain. */
321 if (knot_wire_get_aa(pkt->wire)) {
322 return KR_STATE_CONSUME;
323 }
324 #else
325
326 /* Work around servers sending back CNAME with different delegation and no AA. */
327 const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
328 if (an->count > 0 && ns->count > 0) {
329 const knot_rrset_t *rr = knot_pkt_rr(an, 0);
330 if (rr->type == KNOT_RRTYPE_CNAME) {
331 return KR_STATE_CONSUME;
332 }
333 /* Work around for these NSs which are authoritative both for
334 * parent and child and mixes data from both zones in single answer */
335 if (knot_wire_get_aa(pkt->wire) &&
336 (rr->type == qry->stype) &&
337 (knot_dname_is_equal(rr->owner, qry->sname))) {
338 return KR_STATE_CONSUME;
339 }
340 }
341 #endif
342 /* Remember current bailiwick for NS processing. */
343 const knot_dname_t *current_zone_cut = qry->zone_cut.name;
344 bool ns_record_exists = false;
345 /* Update zone cut information. */
346 for (unsigned i = 0; i < ns->count; ++i) {
347 const knot_rrset_t *rr = knot_pkt_rr(ns, i);
348 if (rr->type == KNOT_RRTYPE_NS) {
349 ns_record_exists = true;
350 int state = update_cut(pkt, rr, req, current_zone_cut);
351 switch(state) {
352 case KR_STATE_DONE: result = state; break;
353 case KR_STATE_FAIL: return state; break;
354 default: /* continue */ break;
355 }
356 } else if (rr->type == KNOT_RRTYPE_SOA && knot_dname_is_sub(rr->owner, qry->zone_cut.name)) {
357 /* SOA below cut in authority indicates different authority, but same NS set. */
358 qry->zone_cut.name = knot_dname_copy(rr->owner, &req->pool);
359 }
360 }
361
362
363 if ((qry->flags & QUERY_DNSSEC_WANT) && (result == KR_STATE_CONSUME)) {
364 if (knot_wire_get_aa(pkt->wire) == 0 &&
365 knot_wire_get_ancount(pkt->wire) == 0 &&
366 ns_record_exists) {
367 /* Unhelpful referral
368 Prevent from validating as an authoritative answer */
369 result = KR_STATE_DONE;
370 }
371 }
372
373 /* CONSUME => Unhelpful referral.
374 * DONE => Zone cut updated. */
375 return result;
376 }
377
378 static void finalize_answer(knot_pkt_t *pkt, struct kr_query *qry, struct kr_request *req)
379 {
380 /* Finalize header */
381 knot_pkt_t *answer = req->answer;
382 knot_wire_set_rcode(answer->wire, knot_wire_get_rcode(pkt->wire));
383 }
384
385 static int unroll_cname(knot_pkt_t *pkt, struct kr_request *req, bool referral, const knot_dname_t **cname_ret)
386 {
387 struct kr_query *query = req->current_query;
388 assert(!(query->flags & QUERY_STUB));
389 /* Process answer type */
390 const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
391 const knot_dname_t *cname = NULL;
392 const knot_dname_t *pending_cname = query->sname;
393 unsigned cname_chain_len = 0;
394 uint8_t rank = !(query->flags & QUERY_DNSSEC_WANT) || (query->flags & QUERY_CACHED) ?
395 KR_VLDRANK_SECURE : KR_VLDRANK_INITIAL;
396 bool is_final = (query->parent == NULL);
397 uint32_t iter_count = 0;
398 bool strict_mode = (query->flags & QUERY_STRICT);
399 do {
400 /* CNAME was found at previous iteration, but records may not follow the correct order.
401 * Try to find records for pending_cname owner from section start. */
402 cname = pending_cname;
403 pending_cname = NULL;
404 for (unsigned i = 0; i < an->count; ++i) {
405 const knot_rrset_t *rr = knot_pkt_rr(an, i);
406
407 /* Skip the RR if its owner+type doesn't interest us. */
408 const bool type_OK = rr->type == query->stype
409 || rr->type == KNOT_RRTYPE_CNAME
410 || rr->type == KNOT_RRTYPE_DNAME /* TODO: actually handle it */
411 || (rr->type == KNOT_RRTYPE_RRSIG
412 && knot_rrsig_type_covered(&rr->rrs, 0))
413 ;
414 if (!type_OK || !knot_dname_is_equal(rr->owner, cname)) {
415 continue;
416 }
417
418 /* Process records matching current SNAME */
419 int state = KR_STATE_FAIL;
420 bool to_wire = false;
421 if (is_final) {
422 /* if not referral, mark record to be written to final answer */
423 to_wire = !referral;
424 } else {
425 state = update_nsaddr(rr, query->parent);
426 if (state == KR_STATE_FAIL) {
427 return state;
428 }
429 }
430 state = kr_ranked_rrarray_add(&req->answ_selected, rr,
431 rank, to_wire, query->uid, &req->pool);
432 if (state != kr_ok()) {
433 return KR_STATE_FAIL;
434 }
435 /* Jump to next CNAME target */
436 if ((query->stype == KNOT_RRTYPE_CNAME) || (rr->type != KNOT_RRTYPE_CNAME)) {
437 continue;
438 }
439 cname_chain_len += 1;
440 pending_cname = knot_cname_name(&rr->rrs);
441 if (!pending_cname) {
442 break;
443 }
444 if (cname_chain_len > an->count || cname_chain_len > KR_CNAME_CHAIN_LIMIT) {
445 VERBOSE_MSG("<= too long cname chain\n");
446 return KR_STATE_FAIL;
447 }
448 /* Don't use pending_cname immediately.
449 * There are can be records for "old" cname. */
450 }
451 if (!pending_cname) {
452 break;
453 }
454 if (knot_dname_is_equal(cname, pending_cname)) {
455 VERBOSE_MSG("<= cname chain loop\n");
456 return KR_STATE_FAIL;
457 }
458 /* In strict mode, explicitly fetch each CNAME target. */
459 if (strict_mode) {
460 cname = pending_cname;
461 break;
462 }
463 /* try to unroll cname only within current zone */
464 const int pending_labels = knot_dname_labels(pending_cname, NULL);
465 const int cname_labels = knot_dname_labels(cname, NULL);
466 if (pending_labels != cname_labels) {
467 cname = pending_cname;
468 break;
469 }
470 if (knot_dname_matched_labels(pending_cname, cname) !=
471 (cname_labels - 1)) {
472 cname = pending_cname;
473 break;
474 }
475 } while (++iter_count < KR_CNAME_CHAIN_LIMIT);
476 if (iter_count >= KR_CNAME_CHAIN_LIMIT) {
477 VERBOSE_MSG("<= too long cname chain\n");
478 return KR_STATE_FAIL;
479 }
480 *cname_ret = cname;
481 return kr_ok();
482 }
483
484 static int process_referral_answer(knot_pkt_t *pkt, struct kr_request *req)
485 {
486 const knot_dname_t *cname = NULL;
487 int state = unroll_cname(pkt, req, true, &cname);
488 if (state != kr_ok()) {
489 return KR_STATE_FAIL;
490 }
491 struct kr_query *query = req->current_query;
492 if (!(query->flags & QUERY_CACHED)) {
493 /* If not cached (i.e. got from upstream)
494 * make sure that this is not an authoritative answer
495 * (even with AA=1) for other layers.
496 * There can be answers with AA=1,
497 * empty answer section and NS in authority.
498 * Clearing of AA prevents them from
499 * caching in the packet cache.
500 * If packet already cached, don't touch him. */
501 knot_wire_clear_aa(pkt->wire);
502 }
503 state = pick_authority(pkt, req, false);
504 return state == kr_ok() ? KR_STATE_DONE : KR_STATE_FAIL;
505 }
506
507 static int process_final(knot_pkt_t *pkt, struct kr_request *req,
508 const knot_dname_t *cname)
509 {
510 const int pkt_class = kr_response_classify(pkt);
511 struct kr_query *query = req->current_query;
512 ranked_rr_array_t *array = &req->answ_selected;
513 for (size_t i = 0; i < array->len; ++i) {
514 const knot_rrset_t *rr = array->at[i]->rr;
515 if (!knot_dname_is_equal(rr->owner, cname)) {
516 continue;
517 }
518 if ((rr->rclass != query->sclass) ||
519 (rr->type != query->stype)) {
520 continue;
521 }
522 const bool to_wire = ((pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) != 0);
523 const int state = pick_authority(pkt, req, to_wire);
524 if (state != kr_ok()) {
525 return KR_STATE_FAIL;
526 }
527 if (!array->at[i]->to_wire) {
528 const size_t last_idx = array->len - 1;
529 size_t j = i;
530 ranked_rr_array_entry_t *entry = array->at[i];
531 /* Relocate record to the end, after current cname */
532 while (j < last_idx) {
533 array->at[j] = array->at[j + 1];
534 ++j;
535 }
536 array->at[last_idx] = entry;
537 entry->to_wire = true;
538 }
539 finalize_answer(pkt, query, req);
540 return KR_STATE_DONE;
541 }
542 return kr_ok();
543 }
544
545 static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
546 {
547 struct kr_query *query = req->current_query;
548
549 /* Response for minimized QNAME.
550 * NODATA => may be empty non-terminal, retry (found zone cut)
551 * NOERROR => found zone cut, retry
552 * NXDOMAIN => parent is zone cut, retry as a workaround for bad authoritatives
553 */
554 bool is_final = (query->parent == NULL);
555 int pkt_class = kr_response_classify(pkt);
556 if (!knot_dname_is_equal(knot_pkt_qname(pkt), query->sname) &&
557 (pkt_class & (PKT_NOERROR|PKT_NXDOMAIN|PKT_REFUSED|PKT_NODATA))) {
558 VERBOSE_MSG("<= found cut, retrying with non-minimized name\n");
559 query->flags |= QUERY_NO_MINIMIZE;
560 return KR_STATE_CONSUME;
561 }
562
563 /* This answer didn't improve resolution chain, therefore must be authoritative (relaxed to negative). */
564 if (!is_authoritative(pkt, query)) {
565 if (pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) {
566 VERBOSE_MSG("<= lame response: non-auth sent negative response\n");
567 return KR_STATE_FAIL;
568 }
569 }
570
571 const knot_dname_t *cname = NULL;
572 /* Process answer type */
573 int state = unroll_cname(pkt, req, false, &cname);
574 if (state != kr_ok()) {
575 return state;
576 }
577 /* Make sure that this is an authoritative answer (even with AA=0) for other layers */
578 knot_wire_set_aa(pkt->wire);
579 /* Either way it resolves current query. */
580 query->flags |= QUERY_RESOLVED;
581 /* Follow canonical name as next SNAME. */
582 if (!knot_dname_is_equal(cname, query->sname)) {
583 /* Check if target record has been already copied */
584 query->flags |= QUERY_CNAME;
585 if (is_final) {
586 state = process_final(pkt, req, cname);
587 if (state != kr_ok()) {
588 return state;
589 }
590 }
591 VERBOSE_MSG("<= cname chain, following\n");
592 /* Check if the same query was already resolved */
593 for (int i = 0; i < req->rplan.resolved.len; ++i) {
594 struct kr_query * q = req->rplan.resolved.at[i];
595 if (q->parent == query->parent &&
596 q->sclass == query->sclass &&
597 q->stype == query->stype &&
598 knot_dname_is_equal(q->sname, cname)) {
599 VERBOSE_MSG("<= cname chain loop\n");
600 return KR_STATE_FAIL;
601 }
602 }
603 struct kr_query *next = kr_rplan_push(&req->rplan, query->parent, cname, query->sclass, query->stype);
604 if (!next) {
605 return KR_STATE_FAIL;
606 }
607 next->flags |= QUERY_AWAIT_CUT;
608 if (query->flags & QUERY_DNSSEC_INSECURE) {
609 next->flags &= ~QUERY_DNSSEC_WANT;
610 next->flags |= QUERY_DNSSEC_INSECURE;
611 } else if (kr_ta_covers(&req->ctx->trust_anchors, cname) &&
612 !kr_ta_covers(&req->ctx->negative_anchors, cname)) {
613 /* Want DNSSEC if it's posible to secure
614 * this name (e.g. is covered by any TA) */
615 next->flags |= QUERY_DNSSEC_WANT;
616 }
617 state = pick_authority(pkt, req, false);
618 if (state != kr_ok()) {
619 return KR_STATE_FAIL;
620 }
621 } else if (!query->parent) {
622 /* Answer for initial query */
623 const bool to_wire = ((pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) != 0);
624 state = pick_authority(pkt, req, to_wire);
625 if (state != kr_ok()) {
626 return KR_STATE_FAIL;
627 }
628 finalize_answer(pkt, query, req);
629 } else {
630 /* Answer for sub-query; DS, IP for NS etc.
631 * It may contains NSEC \ NSEC3 records for
632 * data non-existence or wc expansion proving.
633 * If yes, they must be validated by validator.
634 * If no, authority section is unuseful.
635 * dnssec\nsec.c & dnssec\nsec3.c use
636 * rrsets from incoming packet.
637 * validator uses answer_selected & auth_selected.
638 * So, if nsec\nsec3 records are present in authority,
639 * pick_authority() must be called.
640 * TODO refactor nsec\nsec3 modules to work with
641 * answer_selected & auth_selected instead of incoming pkt. */
642 bool auth_is_unuseful = true;
643 const knot_pktsection_t *ns = knot_pkt_section(pkt, KNOT_AUTHORITY);
644 for (unsigned i = 0; i < ns->count; ++i) {
645 const knot_rrset_t *rr = knot_pkt_rr(ns, i);
646 if (rr->type == KNOT_RRTYPE_NSEC ||
647 rr->type == KNOT_RRTYPE_NSEC3) {
648 auth_is_unuseful = false;
649 break;
650 }
651 }
652 if (!auth_is_unuseful) {
653 state = pick_authority(pkt, req, false);
654 if (state != kr_ok()) {
655 return KR_STATE_FAIL;
656 }
657 }
658 }
659 return KR_STATE_DONE;
660 }
661
662 /** @internal like process_answer() but for the forwarding mode. */
663 static int process_stub(knot_pkt_t *pkt, struct kr_request *req)
664 {
665 struct kr_query *query = req->current_query;
666 assert(query->flags & QUERY_STUB);
667 /* Pick all answer RRs. */
668 const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
669 for (unsigned i = 0; i < an->count; ++i) {
670 const knot_rrset_t *rr = knot_pkt_rr(an, i);
671 int err = kr_ranked_rrarray_add(&req->answ_selected, rr,
672 KR_VLDRANK_INITIAL, true, query->uid, &req->pool);
673 if (err != kr_ok()) {
674 return KR_STATE_FAIL;
675 }
676 }
677
678 knot_wire_set_aa(pkt->wire);
679 query->flags |= QUERY_RESOLVED;
680 /* Pick authority RRs. */
681 int pkt_class = kr_response_classify(pkt);
682 const bool to_wire = ((pkt_class & (PKT_NXDOMAIN|PKT_NODATA)) != 0);
683 int err = pick_authority(pkt, req, to_wire);
684 if (err != kr_ok()) {
685 return KR_STATE_FAIL;
686 }
687
688 finalize_answer(pkt, query, req);
689 return KR_STATE_DONE;
690 }
691
692
693 /** Error handling, RFC1034 5.3.3, 4d. */
694 static int resolve_error(knot_pkt_t *pkt, struct kr_request *req)
695 {
696 return KR_STATE_FAIL;
697 }
698
699 /* State-less single resolution iteration step, not needed. */
700 static int reset(kr_layer_t *ctx) { return KR_STATE_PRODUCE; }
701
702 /* Set resolution context and parameters. */
703 static int begin(kr_layer_t *ctx)
704 {
705 if (ctx->state & (KR_STATE_DONE|KR_STATE_FAIL)) {
706 return ctx->state;
707 }
708 /*
709 * RFC7873 5.4 extends the QUERY operation code behaviour in order to
710 * be able to generate requests for server cookies. Such requests have
711 * QDCOUNT equal to zero and must contain a cookie option.
712 * Server cookie queries must be handled by the cookie module/layer
713 * before this layer.
714 */
715 const knot_pkt_t *pkt = ctx->req->qsource.packet;
716 if (!pkt || knot_wire_get_qdcount(pkt->wire) == 0) {
717 return KR_STATE_FAIL;
718 }
719
720 struct kr_query *qry = ctx->req->current_query;
721 /* Avoid any other classes, and avoid any meta-types ~~except for ANY~~. */
722 if (qry->sclass != KNOT_CLASS_IN
723 || (knot_rrtype_is_metatype(qry->stype)
724 /* && qry->stype != KNOT_RRTYPE_ANY hmm ANY seems broken ATM */)) {
725 knot_wire_set_rcode(ctx->req->answer->wire, KNOT_RCODE_NOTIMPL);
726 return KR_STATE_FAIL;
727 }
728
729 return reset(ctx);
730 }
731
732 int kr_make_query(struct kr_query *query, knot_pkt_t *pkt)
733 {
734 /* Minimize QNAME (if possible). */
735 uint16_t qtype = query->stype;
736 const knot_dname_t *qname = minimized_qname(query, &qtype);
737
738 /* Form a query for the authoritative. */
739 knot_pkt_clear(pkt);
740 int ret = knot_pkt_put_question(pkt, qname, query->sclass, qtype);
741 if (ret != KNOT_EOK) {
742 return ret;
743 }
744
745 /* Query built, expect answer. */
746 query->id = kr_rand_uint(UINT16_MAX);
747 knot_wire_set_id(pkt->wire, query->id);
748 pkt->parsed = pkt->size;
749 WITH_VERBOSE {
750 char name_str[KNOT_DNAME_MAXLEN], type_str[16];
751 knot_dname_to_str(name_str, query->sname, sizeof(name_str));
752 knot_rrtype_to_string(query->stype, type_str, sizeof(type_str));
753 QVERBOSE_MSG(query, "'%s' type '%s' id was assigned, parent id %hu\n",
754 name_str, type_str, query->parent ? query->parent->id : 0);
755 }
756 return kr_ok();
757 }
758
759 static int prepare_query(kr_layer_t *ctx, knot_pkt_t *pkt)
760 {
761 assert(pkt && ctx);
762 struct kr_request *req = ctx->req;
763 struct kr_query *query = req->current_query;
764 if (!query || ctx->state & (KR_STATE_DONE|KR_STATE_FAIL)) {
765 return ctx->state;
766 }
767
768 /* Make query */
769 int ret = kr_make_query(query, pkt);
770 if (ret != 0) {
771 return KR_STATE_FAIL;
772 }
773
774 query->uid = req->rplan.next_uid;
775 req->rplan.next_uid += 1;
776
777 return KR_STATE_CONSUME;
778 }
779
780 static int resolve_badmsg(knot_pkt_t *pkt, struct kr_request *req, struct kr_query *query)
781 {
782
783 #ifndef STRICT_MODE
784 /* Work around broken auths/load balancers */
785 if (query->flags & QUERY_SAFEMODE) {
786 return resolve_error(pkt, req);
787 } else if (query->flags & QUERY_NO_MINIMIZE) {
788 query->flags |= QUERY_SAFEMODE;
789 return KR_STATE_DONE;
790 } else {
791 query->flags |= QUERY_NO_MINIMIZE;
792 return KR_STATE_DONE;
793 }
794 #else
795 return resolve_error(pkt, req);
796 #endif
797 }
798
799 /** Resolve input query or continue resolution with followups.
800 *
801 * This roughly corresponds to RFC1034, 5.3.3 4a-d.
802 */
803 static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
804 {
805 assert(pkt && ctx);
806 struct kr_request *req = ctx->req;
807 struct kr_query *query = req->current_query;
808 if (!query) {
809 return ctx->state;
810 }
811
812 WITH_VERBOSE {
813 if (query->flags & QUERY_TRACE) {
814 VERBOSE_MSG("<= answer received:\n");
815 kr_pkt_print(pkt);
816 }
817 }
818
819 if (query->flags & (QUERY_RESOLVED|QUERY_BADCOOKIE_AGAIN)) {
820 return ctx->state;
821 }
822
823 /* Check for packet processing errors first.
824 * Note - we *MUST* check if it has at least a QUESTION,
825 * otherwise it would crash on accessing QNAME. */
826 #ifdef STRICT_MODE
827 if (pkt->parsed < pkt->size) {
828 VERBOSE_MSG("<= pkt contains excessive data\n");
829 return resolve_badmsg(pkt, req, query);
830 } else
831 #endif
832 if (pkt->parsed <= KNOT_WIRE_HEADER_SIZE) {
833 VERBOSE_MSG("<= malformed response\n");
834 return resolve_badmsg(pkt, req, query);
835 } else if (!is_paired_to_query(pkt, query)) {
836 VERBOSE_MSG("<= ignoring mismatching response\n");
837 /* Force TCP, to work around authoritatives messing up question
838 * without yielding to spoofed responses. */
839 query->flags |= QUERY_TCP;
840 return resolve_badmsg(pkt, req, query);
841 } else if (knot_wire_get_tc(pkt->wire)) {
842 VERBOSE_MSG("<= truncated response, failover to TCP\n");
843 if (query) {
844 /* Fail if already on TCP. */
845 if (query->flags & QUERY_TCP) {
846 VERBOSE_MSG("<= TC=1 with TCP, bailing out\n");
847 return resolve_error(pkt, req);
848 }
849 query->flags |= QUERY_TCP;
850 }
851 return KR_STATE_CONSUME;
852 }
853
854 #ifndef NOVERBOSELOG
855 const knot_lookup_t *rcode = knot_lookup_by_id(knot_rcode_names, knot_wire_get_rcode(pkt->wire));
856 #endif
857
858 /* Check response code. */
859 switch(knot_wire_get_rcode(pkt->wire)) {
860 case KNOT_RCODE_NOERROR:
861 case KNOT_RCODE_NXDOMAIN:
862 break; /* OK */
863 case KNOT_RCODE_SERVFAIL: {
864 if (query->flags & QUERY_STUB) { break; } /* Pass through in stub mode */
865 VERBOSE_MSG("<= rcode: %s\n", rcode ? rcode->name : "??");
866 query->fails += 1;
867 if (query->fails >= KR_QUERY_NSRETRY_LIMIT) {
868 query->fails = 0; /* Reset per-query counter. */
869 return resolve_error(pkt, req);
870 } else {
871 query->flags |= QUERY_NO_MINIMIZE; /* Drop minimisation as a safe-guard. */
872 return KR_STATE_CONSUME;
873 }
874 }
875 case KNOT_RCODE_REFUSED:
876 case KNOT_RCODE_FORMERR:
877 case KNOT_RCODE_NOTIMPL:
878 VERBOSE_MSG("<= rcode: %s\n", rcode ? rcode->name : "??");
879 return resolve_badmsg(pkt, req, query);
880 default:
881 VERBOSE_MSG("<= rcode: %s\n", rcode ? rcode->name : "??");
882 return resolve_error(pkt, req);
883 }
884
885 /* Forwarding/stub mode is special. */
886 if (query->flags & QUERY_STUB) {
887 return process_stub(pkt, req);
888 }
889
890 /* Resolve authority to see if it's referral or authoritative. */
891 int state = process_authority(pkt, req);
892 switch(state) {
893 case KR_STATE_CONSUME: /* Not referral, process answer. */
894 VERBOSE_MSG("<= rcode: %s\n", rcode ? rcode->name : "??");
895 state = process_answer(pkt, req);
896 break;
897 case KR_STATE_DONE: /* Referral */
898 state = process_referral_answer(pkt,req);
899 VERBOSE_MSG("<= referral response, follow\n");
900 break;
901 default:
902 break;
903 }
904
905 return state;
906 }
907
908 /** Module implementation. */
909 const kr_layer_api_t *iterate_layer(struct kr_module *module)
910 {
911 static const kr_layer_api_t _layer = {
912 .begin = &begin,
913 .reset = &reset,
914 .consume = &resolve,
915 .produce = &prepare_query
916 };
917 return &_layer;
918 }
919
920 KR_MODULE_EXPORT(iterate)
921
922 #undef VERBOSE_MSG
923
Attached Files
To refer to attachments on a page, use attachment:filename, as shown below in the list of files. Do NOT use the URL of the [get] link, since this is subject to change and can break easily.You are not allowed to attach a file to this page.