From b49d075cce77cccc50450248689c513cae36d783 Mon Sep 17 00:00:00 2001 From: eabdullin Date: Mon, 15 Sep 2025 11:35:28 +0000 Subject: [PATCH] import CS bind9.18-9.18.29-4.el9_6 --- .../bind-9.18-CVE-2024-11187-pre-test.patch | 68 + SOURCES/bind-9.18-CVE-2024-11187.patch | 226 +++ SOURCES/bind-9.18-CVE-2024-12705.patch | 1418 +++++++++++++++++ SOURCES/bind-9.18-query-fname-relative.patch | 90 ++ SOURCES/bind-9.21-resume-qmin-cname.patch | 44 + SPECS/bind9.18.spec | 21 +- 6 files changed, 1866 insertions(+), 1 deletion(-) create mode 100644 SOURCES/bind-9.18-CVE-2024-11187-pre-test.patch create mode 100644 SOURCES/bind-9.18-CVE-2024-11187.patch create mode 100644 SOURCES/bind-9.18-CVE-2024-12705.patch create mode 100644 SOURCES/bind-9.18-query-fname-relative.patch create mode 100644 SOURCES/bind-9.21-resume-qmin-cname.patch diff --git a/SOURCES/bind-9.18-CVE-2024-11187-pre-test.patch b/SOURCES/bind-9.18-CVE-2024-11187-pre-test.patch new file mode 100644 index 0000000..9b0cca8 --- /dev/null +++ b/SOURCES/bind-9.18-CVE-2024-11187-pre-test.patch @@ -0,0 +1,68 @@ +From cd48dcb0f87f8bed8138cbc4635a6a46f3148620 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Ond=C5=99ej=20Sur=C3=BD?= +Date: Tue, 7 Jan 2025 15:22:40 +0100 +Subject: [PATCH] Isolate using the -T noaa flag only for part of the resolver + test + +Instead of running the whole resolver/ns4 server with -T noaa flag, +use it only for the part where it is actually needed. The -T noaa +could interfere with other parts of the test because the answers don't +have the authoritative-answer bit set, and we could have false +positives (or false negatives) in the test because the authoritative +server doesn't follow the DNS protocol for all the tests in the resolver +system test. + +(cherry picked from commit e51d4d3b88af00d6667f2055087ebfc47fb3107c) +--- + bin/tests/system/resolver/ns4/named.noaa | 12 ------------ + bin/tests/system/resolver/tests.sh | 8 ++++++++ + 2 files changed, 8 insertions(+), 12 deletions(-) + delete mode 100644 bin/tests/system/resolver/ns4/named.noaa + +diff --git a/bin/tests/system/resolver/ns4/named.noaa b/bin/tests/system/resolver/ns4/named.noaa +deleted file mode 100644 +index be78cc2c949..00000000000 +--- a/bin/tests/system/resolver/ns4/named.noaa ++++ /dev/null +@@ -1,12 +0,0 @@ +-Copyright (C) Internet Systems Consortium, Inc. ("ISC") +- +-SPDX-License-Identifier: MPL-2.0 +- +-This Source Code Form is subject to the terms of the Mozilla Public +-License, v. 2.0. If a copy of the MPL was not distributed with this +-file, you can obtain one at https://mozilla.org/MPL/2.0/. +- +-See the COPYRIGHT file distributed with this work for additional +-information regarding copyright ownership. +- +-Add -T noaa. +diff --git a/bin/tests/system/resolver/tests.sh b/bin/tests/system/resolver/tests.sh +index 982ff9761be..23b42f728cd 100755 +--- a/bin/tests/system/resolver/tests.sh ++++ b/bin/tests/system/resolver/tests.sh +@@ -322,6 +322,10 @@ done + if [ $ret != 0 ]; then echo_i "failed"; fi + status=$((status + ret)) + ++stop_server ns4 ++touch ns4/named.noaa ++start_server --noclean --restart --port ${PORT} ns4 || ret=1 ++ + n=$((n + 1)) + echo_i "RT21594 regression test check setup ($n)" + ret=0 +@@ -358,6 +362,10 @@ grep "status: NXDOMAIN" dig.ns5.out.${n} >/dev/null || ret=1 + if [ $ret != 0 ]; then echo_i "failed"; fi + status=$((status + ret)) + ++stop_server ns4 ++rm ns4/named.noaa ++start_server --noclean --restart --port ${PORT} ns4 || ret=1 ++ + n=$((n + 1)) + echo_i "check that replacement of additional data by a negative cache no data entry clears the additional RRSIGs ($n)" + ret=0 +-- +2.48.1 + diff --git a/SOURCES/bind-9.18-CVE-2024-11187.patch b/SOURCES/bind-9.18-CVE-2024-11187.patch new file mode 100644 index 0000000..68682e6 --- /dev/null +++ b/SOURCES/bind-9.18-CVE-2024-11187.patch @@ -0,0 +1,226 @@ +From 7ded6b358ced23bb6214c7309cff0850b7d1b77d Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Ond=C5=99ej=20Sur=C3=BD?= +Date: Thu, 14 Nov 2024 10:37:29 +0100 +Subject: [PATCH] Limit the additional processing for large RDATA sets + +When answering queries, don't add data to the additional section if +the answer has more than 13 names in the RDATA. This limits the +number of lookups into the database(s) during a single client query, +reducing query processing load. + +Also, don't append any additional data to type=ANY queries. The +answer to ANY is already big enough. + +(cherry picked from commit a1982cf1bb95c818aa7b58988b5611dec80f2408) +--- + bin/tests/system/additional/tests.sh | 2 +- + lib/dns/include/dns/rdataset.h | 10 +++++++++- + lib/dns/rbtdb.c | 2 +- + lib/dns/rdataset.c | 7 ++++++- + lib/dns/resolver.c | 19 ++++++++++++------- + lib/ns/query.c | 12 ++++++++---- + 6 files changed, 37 insertions(+), 15 deletions(-) + +diff --git a/bin/tests/system/additional/tests.sh b/bin/tests/system/additional/tests.sh +index 193c9f9..e1b0cfb 100644 +--- a/bin/tests/system/additional/tests.sh ++++ b/bin/tests/system/additional/tests.sh +@@ -279,7 +279,7 @@ n=$((n + 1)) + echo_i "testing with 'minimal-any no;' ($n)" + ret=0 + $DIG $DIGOPTS -t ANY www.rt.example @10.53.0.1 >dig.out.$n || ret=1 +-grep "ANSWER: 3, AUTHORITY: 2, ADDITIONAL: 2" dig.out.$n >/dev/null || ret=1 ++grep "ANSWER: 3, AUTHORITY: 2, ADDITIONAL: 1" dig.out.$n >/dev/null || ret=1 + if [ $ret -eq 1 ]; then + echo_i "failed" + status=$((status + 1)) +diff --git a/lib/dns/include/dns/rdataset.h b/lib/dns/include/dns/rdataset.h +index f63591c..b28686a 100644 +--- a/lib/dns/include/dns/rdataset.h ++++ b/lib/dns/include/dns/rdataset.h +@@ -54,6 +54,8 @@ + #include + #include + ++#define DNS_RDATASET_MAXADDITIONAL 13 ++ + ISC_LANG_BEGINDECLS + + typedef enum { +@@ -453,7 +455,8 @@ dns_rdataset_towirepartial(dns_rdataset_t *rdataset, + isc_result_t + dns_rdataset_additionaldata(dns_rdataset_t *rdataset, + const dns_name_t *owner_name, +- dns_additionaldatafunc_t add, void *arg); ++ dns_additionaldatafunc_t add, void *arg, ++ size_t limit); + /*%< + * For each rdata in rdataset, call 'add' for each name and type in the + * rdata which is subject to additional section processing. +@@ -472,10 +475,15 @@ dns_rdataset_additionaldata(dns_rdataset_t *rdataset, + *\li If a call to dns_rdata_additionaldata() is not successful, the + * result returned will be the result of dns_rdataset_additionaldata(). + * ++ *\li If 'limit' is non-zero and the number of the rdatasets is larger ++ * than 'limit', no additional data will be processed. ++ * + * Returns: + * + *\li #ISC_R_SUCCESS + * ++ *\li #DNS_R_TOOMANYRECORDS in case rdataset count is larger than 'limit' ++ * + *\li Any error that dns_rdata_additionaldata() can return. + */ + +diff --git a/lib/dns/rbtdb.c b/lib/dns/rbtdb.c +index 5c2f0b2..c4db047 100644 +--- a/lib/dns/rbtdb.c ++++ b/lib/dns/rbtdb.c +@@ -10317,7 +10317,7 @@ no_glue: + idx = hash_32(hash, rbtversion->glue_table_bits); + + (void)dns_rdataset_additionaldata(rdataset, dns_rootname, +- glue_nsdname_cb, &ctx); ++ glue_nsdname_cb, &ctx, 0); + + cur = isc_mem_get(rbtdb->common.mctx, sizeof(*cur)); + +diff --git a/lib/dns/rdataset.c b/lib/dns/rdataset.c +index 4d48203..0b450a9 100644 +--- a/lib/dns/rdataset.c ++++ b/lib/dns/rdataset.c +@@ -577,7 +577,8 @@ dns_rdataset_towire(dns_rdataset_t *rdataset, const dns_name_t *owner_name, + isc_result_t + dns_rdataset_additionaldata(dns_rdataset_t *rdataset, + const dns_name_t *owner_name, +- dns_additionaldatafunc_t add, void *arg) { ++ dns_additionaldatafunc_t add, void *arg, ++ size_t limit) { + dns_rdata_t rdata = DNS_RDATA_INIT; + isc_result_t result; + +@@ -589,6 +590,10 @@ dns_rdataset_additionaldata(dns_rdataset_t *rdataset, + REQUIRE(DNS_RDATASET_VALID(rdataset)); + REQUIRE((rdataset->attributes & DNS_RDATASETATTR_QUESTION) == 0); + ++ if (limit != 0 && dns_rdataset_count(rdataset) > limit) { ++ return DNS_R_TOOMANYRECORDS; ++ } ++ + result = dns_rdataset_first(rdataset); + if (result != ISC_R_SUCCESS) { + return (result); +diff --git a/lib/dns/resolver.c b/lib/dns/resolver.c +index f8f53d2..bb0bfa1 100644 +--- a/lib/dns/resolver.c ++++ b/lib/dns/resolver.c +@@ -8904,7 +8904,7 @@ rctx_answer_any(respctx_t *rctx) { + rdataset->trust = rctx->trust; + + (void)dns_rdataset_additionaldata(rdataset, rctx->aname, +- check_related, rctx); ++ check_related, rctx, 0); + } + + return (ISC_R_SUCCESS); +@@ -8952,7 +8952,7 @@ rctx_answer_match(respctx_t *rctx) { + rctx->ardataset->attributes |= DNS_RDATASETATTR_CACHE; + rctx->ardataset->trust = rctx->trust; + (void)dns_rdataset_additionaldata(rctx->ardataset, rctx->aname, +- check_related, rctx); ++ check_related, rctx, 0); + + for (sigrdataset = ISC_LIST_HEAD(rctx->aname->list); + sigrdataset != NULL; +@@ -9159,7 +9159,7 @@ rctx_authority_positive(respctx_t *rctx) { + */ + (void)dns_rdataset_additionaldata( + rdataset, name, check_related, +- rctx); ++ rctx, 0); + done = true; + } + } +@@ -9666,8 +9666,12 @@ rctx_referral(respctx_t *rctx) { + */ + INSIST(rctx->ns_rdataset != NULL); + FCTX_ATTR_SET(fctx, FCTX_ATTR_GLUING); ++ ++ /* ++ * Mark the glue records in the additional section to be cached. ++ */ + (void)dns_rdataset_additionaldata(rctx->ns_rdataset, rctx->ns_name, +- check_related, rctx); ++ check_related, rctx, 0); + #if CHECK_FOR_GLUE_IN_ANSWER + /* + * Look in the answer section for "glue" that is incorrectly +@@ -9679,8 +9683,9 @@ rctx_referral(respctx_t *rctx) { + if (rctx->glue_in_answer && + (fctx->type == dns_rdatatype_aaaa || fctx->type == dns_rdatatype_a)) + { +- (void)dns_rdataset_additionaldata( +- rctx->ns_rdataset, rctx->ns_name, check_answer, fctx); ++ (void)dns_rdataset_additionaldata(rctx->ns_rdataset, ++ rctx->ns_name, check_answer, ++ fctx, 0); + } + #endif /* if CHECK_FOR_GLUE_IN_ANSWER */ + FCTX_ATTR_CLR(fctx, FCTX_ATTR_GLUING); +@@ -9782,7 +9787,7 @@ again: + if (CHASE(rdataset)) { + rdataset->attributes &= ~DNS_RDATASETATTR_CHASE; + (void)dns_rdataset_additionaldata( +- rdataset, name, check_related, rctx); ++ rdataset, name, check_related, rctx, 0); + rescan = true; + } + } +diff --git a/lib/ns/query.c b/lib/ns/query.c +index 5549e20..ded1eae 100644 +--- a/lib/ns/query.c ++++ b/lib/ns/query.c +@@ -2094,7 +2094,8 @@ addname: + if (trdataset != NULL && dns_rdatatype_followadditional(type)) { + if (client->additionaldepth++ < client->view->max_restarts) { + eresult = dns_rdataset_additionaldata( +- trdataset, fname, query_additional_cb, qctx); ++ trdataset, fname, query_additional_cb, qctx, ++ DNS_RDATASET_MAXADDITIONAL); + } + client->additionaldepth--; + } +@@ -2194,7 +2195,7 @@ regular: + * We don't care if dns_rdataset_additionaldata() fails. + */ + (void)dns_rdataset_additionaldata(rdataset, name, query_additional_cb, +- qctx); ++ qctx, DNS_RDATASET_MAXADDITIONAL); + CTRACE(ISC_LOG_DEBUG(3), "query_additional: done"); + } + +@@ -2220,7 +2221,8 @@ query_addrrset(query_ctx_t *qctx, dns_name_t **namep, + * To the current response for 'client', add the answer RRset + * '*rdatasetp' and an optional signature set '*sigrdatasetp', with + * owner name '*namep', to section 'section', unless they are +- * already there. Also add any pertinent additional data. ++ * already there. Also add any pertinent additional data, unless ++ * the query was for type ANY. + * + * If 'dbuf' is not NULL, then '*namep' is the name whose data is + * stored in 'dbuf'. In this case, query_addrrset() guarantees that +@@ -2275,7 +2277,9 @@ query_addrrset(query_ctx_t *qctx, dns_name_t **namep, + */ + query_addtoname(mname, rdataset); + query_setorder(qctx, mname, rdataset); +- query_additional(qctx, mname, rdataset); ++ if (qctx->qtype != dns_rdatatype_any) { ++ query_additional(qctx, mname, rdataset); ++ } + + /* + * Note: we only add SIGs if we've added the type they cover, so +-- +2.48.1 + diff --git a/SOURCES/bind-9.18-CVE-2024-12705.patch b/SOURCES/bind-9.18-CVE-2024-12705.patch new file mode 100644 index 0000000..39db165 --- /dev/null +++ b/SOURCES/bind-9.18-CVE-2024-12705.patch @@ -0,0 +1,1418 @@ +From 05f63912f7a9246ccf1c59ee0b93782b5a576dbf Mon Sep 17 00:00:00 2001 +From: Artem Boldariev +Date: Wed, 4 Sep 2024 18:53:35 +0300 +Subject: [PATCH] Implement TCP manual read timer control functionality + +This commit adds a manual TCP read timer control mode which is +supposed to override automatic resetting of the timer when any data is +received. That can be accomplished by +`isc__nmhandle_set_manual_timer()`. + +This functionality is supposed to be used by multilevel networking +transports which require finer grained control over the read +timer (TLS Stream, DoH). + +The commit is essentially an implementation of the functionality from +newer versions of BIND. + +(cherry picked from commit a67b3255426160c55faa9d1d3aa6e7579439b2b4) + +Implement TLS manual read timer control functionality + +This commit adds a manual TLS read timer control mode which is +supposed to override automatic resetting of the timer when any data is +received. + +It both depends and complements similar functionality in TCP. + +(cherry picked from commit 13d521fa5fe7102733db15079fd49c7ccf5b0648) + +Add isc__nm_async_run() + +This commit adds isc__nm_async_run() which is very similar to +isc_async_run() in newer versions of BIND: it allows calling a +callback asynchronously. + +Potentially, it can be used to replace some other async operations in +other networking code, in particular the delayed I/O calls in TLS a +TCP DNS transports to name a few and remove quiet a lot of code, but +it we are unlikely to do that for the strictly maintenance only +branch, so it is protected with DoH-related #ifdefs. + +It is implemented in a "universal" way mainly because doing it in the +specific code requires the same amount of code and is not simpler. + +(cherry picked from commit 125bfd71d3c8d0ad23477867f1e41a2392e03138) + +DoH: process data chunk by chunk instead of all at once + +Initially, our DNS-over-HTTP(S) implementation would try to process as +much incoming data from the network as possible. However, that might +be undesirable as we might create too many streams (each effectively +backed by a ns_client_t object). That is too forgiving as it might +overwhelm the server and trash its memory allocator, causing high CPU +and memory usage. + +Instead of doing that, we resort to processing incoming data using a +chunk-by-chunk processing strategy. That is, we split data into small +chunks (currently 256 bytes) and process each of them +asynchronously. However, we can process more than one chunk at +once (up to 4 currently), given that the number of HTTP/2 streams has +not increased while processing a chunk. + +That alone is not enough, though. In addition to the above, we should +limit the number of active streams: these streams for which we have +received a request and started processing it (the ones for which a +read callback was called), as it is perfectly fine to have more opened +streams than active ones. In the case we have reached or surpassed the +limit of active streams, we stop reading AND processing the data from +the remote peer. The number of active streams is effectively decreased +only when responses associated with the active streams are sent to the +remote peer. + +Overall, this strategy is very similar to the one used for other +stream-based DNS transports like TCP and TLS. + +(cherry picked from commit 9846f395ad79bb50a5fa5ca6ab97ef904b3be35a) +(cherry picked from commit 11a2956dce6f983d2bfcb532f5719791845b06ab) + +DoH: floodding clients detection + +This commit adds logic to make code better protected against clients +that send valid HTTP/2 data that is useless from a DNS server +perspective. + +Firstly, it adds logic that protects against clients who send too +little useful (=DNS) data. We achieve that by adding a check that +eventually detects such clients with a nonfavorable useful to +processed data ratio after the initial grace period. The grace period +is limited to processing 128 KiB of data, which should be enough for +sending the largest possible DNS message in a GET request and then +some. This is the main safety belt that would detect even flooding +clients that initially behave well in order to fool the checks server. + +Secondly, in addition to the above, we introduce additional checks to +detect outright misbehaving clients earlier: + +The code will treat clients that open too many streams (50) without +sending any data for processing as flooding ones; The clients that +managed to send 1.5 KiB of data without opening a single stream or +submitting at least some DNS data will be treated as flooding ones. +Of course, the behaviour described above is nothing else but +heuristical checks, so they can never be perfect. At the same time, +they should be reasonable enough not to drop any valid clients, +realatively easy to implement, and have negligible computational +overhead. + +(cherry picked from commit 3425e4b1d04746520931e93ac7ef5979fd6b54fd) +(cherry picked from commit ee42514be2cc56f5d31fdb9d0c6a838e0d372654) + +DoH: introduce manual read timer control + +This commit introduces manual read timer control as used by StreamDNS +and its underlying transports. Before that, DoH code would rely on the +timer control provided by TCP, which would reset the timer any time +some data arrived. Now, the timer is restarted only when a full DNS +message is processed in line with other DNS transports. + +That change is required because we should not stop the timer when +reading from the network is paused due to throttling. We need a way to +drop timed-out clients, particularly those who refuse to read the data +we send. + +(cherry picked from commit 609a41517b1631c320876a41c43c68c9a0ee0f9f) +(cherry picked from commit 796708775d178adc5256ce03956d134c9fd38a33) + +DoH: reduce excessive bad request logging + +We started using isc_nm_bad_request() more actively throughout +codebase. In the case of HTTP/2 it can lead to a large count of +useless "Bad Request" messages in the BIND log, as often we attempt to +send such request over effectively finished HTTP/2 sessions. + +This commit fixes that. + +(cherry picked from commit 937b5f8349a6a5e15af254a53a659e39c7c1d179) +(cherry picked from commit 550b692343e398e0debe18ddb5908b4ccb183b91) +--- + lib/isc/netmgr/http.c | 448 +++++++++++++++++++++++++++++++++--- + lib/isc/netmgr/netmgr-int.h | 81 ++++++- + lib/isc/netmgr/netmgr.c | 80 ++++++- + lib/isc/netmgr/tcp.c | 26 ++- + lib/isc/netmgr/tlsstream.c | 137 +++++++++-- + 5 files changed, 721 insertions(+), 51 deletions(-) + +diff --git a/lib/isc/netmgr/http.c b/lib/isc/netmgr/http.c +index 1d4b82fd0ec..2002848c519 100644 +--- a/lib/isc/netmgr/http.c ++++ b/lib/isc/netmgr/http.c +@@ -85,6 +85,37 @@ + + #define INITIAL_DNS_MESSAGE_BUFFER_SIZE (512) + ++/* ++ * The value should be small enough to not allow a server to open too ++ * many streams at once. It should not be too small either because ++ * the incoming data will be split into too many chunks with each of ++ * them processed asynchronously. ++ */ ++#define INCOMING_DATA_CHUNK_SIZE (256) ++ ++/* ++ * Often processing a chunk does not change the number of streams. In ++ * that case we can process more than once, but we still should have a ++ * hard limit on that. ++ */ ++#define INCOMING_DATA_MAX_CHUNKS_AT_ONCE (4) ++ ++/* ++ * These constants define the grace period to help detect flooding clients. ++ * ++ * The first one defines how much data can be processed before opening ++ * a first stream and received at least some useful (=DNS) data. ++ * ++ * The second one defines how much data from a client we read before ++ * trying to drop a clients who sends not enough useful data. ++ * ++ * The third constant defines how many streams we agree to process ++ * before checking if there was at least one DNS request received. ++ */ ++#define INCOMING_DATA_INITIAL_STREAM_SIZE (1536) ++#define INCOMING_DATA_GRACE_SIZE (MAX_ALLOWED_DATA_IN_HEADERS) ++#define MAX_STREAMS_BEFORE_FIRST_REQUEST (50) ++ + typedef struct isc_nm_http_response_status { + size_t code; + size_t content_length; +@@ -143,6 +174,7 @@ struct isc_nm_http_session { + ISC_LIST(http_cstream_t) cstreams; + ISC_LIST(isc_nmsocket_h2_t) sstreams; + size_t nsstreams; ++ uint64_t total_opened_sstreams; + + isc_nmhandle_t *handle; + isc_nmhandle_t *client_httphandle; +@@ -155,6 +187,18 @@ struct isc_nm_http_session { + + isc__nm_http_pending_callbacks_t pending_write_callbacks; + isc_buffer_t *pending_write_data; ++ ++ /* ++ * The statistical values below are for usage on server-side ++ * only. They are meant to detect clients that are taking too many ++ * resources from the server. ++ */ ++ uint64_t received; /* How many requests have been received. */ ++ uint64_t submitted; /* How many responses were submitted to send */ ++ uint64_t processed; /* How many responses were processed. */ ++ ++ uint64_t processed_incoming_data; ++ uint64_t processed_useful_data; /* DNS data */ + }; + + typedef enum isc_http_error_responses { +@@ -177,6 +221,7 @@ typedef struct isc_http_send_req { + void *cbarg; + isc_buffer_t *pending_write_data; + isc__nm_http_pending_callbacks_t pending_write_callbacks; ++ uint64_t submitted; + } isc_http_send_req_t; + + #define HTTP_ENDPOINTS_MAGIC ISC_MAGIC('H', 'T', 'E', 'P') +@@ -189,10 +234,26 @@ static bool + http_send_outgoing(isc_nm_http_session_t *session, isc_nmhandle_t *httphandle, + isc_nm_cb_t cb, void *cbarg); + ++static void ++http_log_flooding_peer(isc_nm_http_session_t *session); ++ ++static bool ++http_is_flooding_peer(isc_nm_http_session_t *session); ++ ++static ssize_t ++http_process_input_data(isc_nm_http_session_t *session, ++ isc_buffer_t *input_data); ++ ++static inline bool ++http_too_many_active_streams(isc_nm_http_session_t *session); ++ + static void + http_do_bio(isc_nm_http_session_t *session, isc_nmhandle_t *send_httphandle, + isc_nm_cb_t send_cb, void *send_cbarg); + ++static void ++http_do_bio_async(isc_nm_http_session_t *session); ++ + static void + failed_httpstream_read_cb(isc_nmsocket_t *sock, isc_result_t result, + isc_nm_http_session_t *session); +@@ -494,6 +555,16 @@ finish_http_session(isc_nm_http_session_t *session) { + if (!session->closed) { + session->closed = true; + isc_nm_cancelread(session->handle); ++ isc__nmsocket_timer_stop(session->handle->sock); ++ } ++ ++ /* ++ * Free any unprocessed incoming data in order to not process ++ * it during indirect calls to http_do_bio() that might happen ++ * when calling the failed callbacks. ++ */ ++ if (session->buf != NULL) { ++ isc_buffer_free(&session->buf); + } + + if (session->client) { +@@ -567,6 +638,7 @@ on_server_data_chunk_recv_callback(int32_t stream_id, const uint8_t *data, + if (new_bufsize <= MAX_DNS_MESSAGE_SIZE && + new_bufsize <= h2->content_length) + { ++ session->processed_useful_data += len; + isc_buffer_putmem(&h2->rbuf, data, len); + break; + } +@@ -615,6 +687,9 @@ call_unlink_cstream_readcb(http_cstream_t *cstream, + isc_buffer_usedregion(cstream->rbuf, &read_data); + cstream->read_cb(session->client_httphandle, result, &read_data, + cstream->read_cbarg); ++ if (result == ISC_R_SUCCESS) { ++ isc__nmsocket_timer_restart(session->handle->sock); ++ } + put_http_cstream(session->mctx, cstream); + } + +@@ -656,6 +731,9 @@ on_server_stream_close_callback(int32_t stream_id, + + ISC_LIST_UNLINK(session->sstreams, &sock->h2, link); + session->nsstreams--; ++ if (sock->h2.request_received) { ++ session->submitted++; ++ } + + /* + * By making a call to isc__nmsocket_prep_destroy(), we ensure that +@@ -967,6 +1045,182 @@ client_submit_request(isc_nm_http_session_t *session, http_cstream_t *stream) { + return (ISC_R_SUCCESS); + } + ++static ssize_t ++http_process_input_data(isc_nm_http_session_t *session, ++ isc_buffer_t *input_data) { ++ ssize_t readlen = 0; ++ ssize_t processed = 0; ++ isc_region_t chunk = { 0 }; ++ size_t before, after; ++ size_t i; ++ ++ REQUIRE(VALID_HTTP2_SESSION(session)); ++ REQUIRE(input_data != NULL); ++ ++ if (!http_session_active(session)) { ++ return 0; ++ } ++ ++ /* ++ * For clients that initiate request themselves just process ++ * everything. ++ */ ++ if (session->client) { ++ isc_buffer_remainingregion(input_data, &chunk); ++ if (chunk.length == 0) { ++ return 0; ++ } ++ ++ readlen = nghttp2_session_mem_recv(session->ngsession, ++ chunk.base, chunk.length); ++ ++ if (readlen >= 0) { ++ isc_buffer_forward(input_data, readlen); ++ session->processed_incoming_data += readlen; ++ } ++ ++ return readlen; ++ } ++ ++ /* ++ * If no streams are created during processing, we might process ++ * more than one chunk at a time. Still we should not overdo that ++ * to avoid processing too much data at once as such behaviour is ++ * known for trashing the memory allocator at times. ++ */ ++ for (before = after = session->nsstreams, i = 0; ++ after <= before && i < INCOMING_DATA_MAX_CHUNKS_AT_ONCE; ++ after = session->nsstreams, i++) ++ { ++ const uint64_t active_streams = ++ (session->received - session->processed); ++ ++ /* ++ * If there are non completed send requests in flight -let's ++ * not process any incoming data, as it could lead to piling ++ * up too much send data in send buffers. With many clients ++ * connected it can lead to excessive memory consumption on ++ * the server instance. ++ */ ++ if (session->sending > 0) { ++ break; ++ } ++ ++ /* ++ * If we have reached the maximum number of streams used, we ++ * might stop processing for now, as nghttp2 will happily ++ * consume as much data as possible. ++ */ ++ if (session->nsstreams >= session->max_concurrent_streams && ++ active_streams > 0) ++ { ++ break; ++ } ++ ++ if (http_too_many_active_streams(session)) { ++ break; ++ } ++ ++ isc_buffer_remainingregion(input_data, &chunk); ++ if (chunk.length == 0) { ++ break; ++ } ++ ++ chunk.length = ISC_MIN(chunk.length, INCOMING_DATA_CHUNK_SIZE); ++ ++ readlen = nghttp2_session_mem_recv(session->ngsession, ++ chunk.base, chunk.length); ++ ++ if (readlen >= 0) { ++ isc_buffer_forward(input_data, readlen); ++ session->processed_incoming_data += readlen; ++ processed += readlen; ++ } else { ++ isc_buffer_clear(input_data); ++ return readlen; ++ } ++ } ++ ++ return processed; ++} ++ ++static void ++http_log_flooding_peer(isc_nm_http_session_t *session) { ++ const int log_level = ISC_LOG_DEBUG(1); ++ if (session->handle != NULL && isc_log_wouldlog(isc_lctx, log_level)) { ++ char client_sabuf[ISC_SOCKADDR_FORMATSIZE]; ++ char local_sabuf[ISC_SOCKADDR_FORMATSIZE]; ++ ++ isc_sockaddr_format(&session->handle->sock->peer, client_sabuf, ++ sizeof(client_sabuf)); ++ isc_sockaddr_format(&session->handle->sock->iface, local_sabuf, ++ sizeof(local_sabuf)); ++ isc_log_write(isc_lctx, ISC_LOGCATEGORY_GENERAL, ++ ISC_LOGMODULE_NETMGR, log_level, ++ "Dropping a flooding HTTP/2 peer " ++ "%s (on %s) - processed: %" PRIu64 ++ " bytes, of them useful: %" PRIu64 "", ++ client_sabuf, local_sabuf, ++ session->processed_incoming_data, ++ session->processed_useful_data); ++ } ++} ++ ++static bool ++http_is_flooding_peer(isc_nm_http_session_t *session) { ++ if (session->client) { ++ return false; ++ } ++ ++ /* ++ * A flooding client can try to open a lot of streams before ++ * submitting a request. Let's drop such clients. ++ */ ++ if (session->received == 0 && ++ session->total_opened_sstreams > MAX_STREAMS_BEFORE_FIRST_REQUEST) ++ { ++ return true; ++ } ++ ++ /* ++ * We have processed enough data to open at least one stream and ++ * get some useful data. ++ */ ++ if (session->processed_incoming_data > ++ INCOMING_DATA_INITIAL_STREAM_SIZE && ++ (session->total_opened_sstreams == 0 || ++ session->processed_useful_data == 0)) ++ { ++ return true; ++ } ++ ++ if (session->processed_incoming_data < INCOMING_DATA_GRACE_SIZE) { ++ return false; ++ } ++ ++ /* ++ * The overhead of DoH per DNS message can be minimum 160-180 ++ * bytes. We should allow more for extra information that can be ++ * included in headers, so let's use 256 bytes. Minimum DNS ++ * message size is 12 bytes. So, (256+12)/12=22. Even that can be ++ * too restricting for some edge cases, but should be good enough ++ * for any practical purposes. Not to mention that HTTP/2 may ++ * include legitimate data that is completely useless for DNS ++ * purposes... ++ * ++ * Anyway, at that point we should have processed enough requests ++ * for such clients (if any). ++ */ ++ if (session->processed_useful_data == 0 || ++ (session->processed_incoming_data / ++ session->processed_useful_data) > 22) ++ { ++ return true; ++ } ++ ++ return false; ++} ++ + /* + * Read callback from TLS socket. + */ +@@ -976,6 +1230,7 @@ http_readcb(isc_nmhandle_t *handle, isc_result_t result, isc_region_t *region, + isc_nm_http_session_t *session = (isc_nm_http_session_t *)data; + isc_nm_http_session_t *tmpsess = NULL; + ssize_t readlen; ++ isc_buffer_t input; + + REQUIRE(VALID_HTTP2_SESSION(session)); + +@@ -994,11 +1249,17 @@ http_readcb(isc_nmhandle_t *handle, isc_result_t result, isc_region_t *region, + goto done; + } + +- readlen = nghttp2_session_mem_recv(session->ngsession, region->base, +- region->length); ++ isc_buffer_init(&input, region->base, region->length); ++ isc_buffer_add(&input, region->length); ++ ++ readlen = http_process_input_data(session, &input); + if (readlen < 0) { + failed_read_cb(ISC_R_UNEXPECTED, session); + goto done; ++ } else if (http_is_flooding_peer(session)) { ++ http_log_flooding_peer(session); ++ failed_read_cb(ISC_R_RANGE, session); ++ goto done; + } + + if ((size_t)readlen < region->length) { +@@ -1011,11 +1272,12 @@ http_readcb(isc_nmhandle_t *handle, isc_result_t result, isc_region_t *region, + isc_buffer_putmem(session->buf, region->base + readlen, + unread_size); + isc_nm_pauseread(session->handle); ++ http_do_bio_async(session); ++ } else { ++ /* We might have something to receive or send, do IO */ ++ http_do_bio(session, NULL, NULL, NULL); + } + +- /* We might have something to receive or send, do IO */ +- http_do_bio(session, NULL, NULL, NULL); +- + done: + isc__nm_httpsession_detach(&tmpsess); + } +@@ -1053,14 +1315,18 @@ http_writecb(isc_nmhandle_t *handle, isc_result_t result, void *arg) { + } + + isc_buffer_free(&req->pending_write_data); ++ session->processed += req->submitted; + isc_mem_put(session->mctx, req, sizeof(*req)); + + session->sending--; +- http_do_bio(session, NULL, NULL, NULL); +- isc_nmhandle_detach(&transphandle); +- if (result != ISC_R_SUCCESS && session->sending == 0) { ++ ++ if (result == ISC_R_SUCCESS) { ++ http_do_bio(session, NULL, NULL, NULL); ++ } else { + finish_http_session(session); + } ++ isc_nmhandle_detach(&transphandle); ++ + isc__nm_httpsession_detach(&session); + } + +@@ -1206,7 +1472,9 @@ http_send_outgoing(isc_nm_http_session_t *session, isc_nmhandle_t *httphandle, + *send = (isc_http_send_req_t){ .pending_write_data = + session->pending_write_data, + .cb = cb, +- .cbarg = cbarg }; ++ .cbarg = cbarg, ++ .submitted = session->submitted }; ++ session->submitted = 0; + session->pending_write_data = NULL; + move_pending_send_callbacks(session, send); + +@@ -1227,6 +1495,27 @@ nothing_to_send: + return (false); + } + ++static inline bool ++http_too_many_active_streams(isc_nm_http_session_t *session) { ++ const uint64_t active_streams = session->received - session->processed; ++ const uint64_t max_active_streams = ISC_MIN( ++ STREAM_CLIENTS_PER_CONN, session->max_concurrent_streams); ++ ++ if (session->client) { ++ return false; ++ } ++ ++ /* ++ * Do not process incoming data if there are too many active DNS ++ * clients (streams) per connection. ++ */ ++ if (active_streams >= max_active_streams) { ++ return true; ++ } ++ ++ return false; ++} ++ + static void + http_do_bio(isc_nm_http_session_t *session, isc_nmhandle_t *send_httphandle, + isc_nm_cb_t send_cb, void *send_cbarg) { +@@ -1242,59 +1531,140 @@ http_do_bio(isc_nm_http_session_t *session, isc_nmhandle_t *send_httphandle, + finish_http_session(session); + } + return; +- } else if (nghttp2_session_want_read(session->ngsession) == 0 && +- nghttp2_session_want_write(session->ngsession) == 0 && +- session->pending_write_data == NULL) +- { +- session->closing = true; ++ } ++ ++ if (send_cb != NULL) { ++ INSIST(VALID_NMHANDLE(send_httphandle)); ++ (void)http_send_outgoing(session, send_httphandle, send_cb, ++ send_cbarg); ++ return; ++ } ++ ++ INSIST(send_httphandle == NULL); ++ INSIST(send_cb == NULL); ++ INSIST(send_cbarg == NULL); ++ ++ if (session->pending_write_data != NULL && session->sending == 0) { ++ (void)http_send_outgoing(session, NULL, NULL, NULL); + return; + } + + if (nghttp2_session_want_read(session->ngsession) != 0) { + if (!session->reading) { + /* We have not yet started reading from this handle */ ++ isc__nmsocket_timer_start(session->handle->sock); + isc_nm_read(session->handle, http_readcb, session); + session->reading = true; + } else if (session->buf != NULL) { + size_t remaining = + isc_buffer_remaininglength(session->buf); + /* Leftover data in the buffer, use it */ +- size_t readlen = nghttp2_session_mem_recv( +- session->ngsession, +- isc_buffer_current(session->buf), remaining); ++ size_t remaining_after = 0; ++ ssize_t readlen = 0; ++ isc_nm_http_session_t *tmpsess = NULL; + +- if (readlen == remaining) { ++ /* ++ * Let's ensure that HTTP/2 session and its associated ++ * data will not go "out of scope" too early. ++ */ ++ isc__nm_httpsession_attach(session, &tmpsess); ++ ++ readlen = http_process_input_data(session, ++ session->buf); ++ ++ remaining_after = ++ isc_buffer_remaininglength(session->buf); ++ ++ if (readlen < 0) { ++ failed_read_cb(ISC_R_UNEXPECTED, session); ++ } else if (http_is_flooding_peer(session)) { ++ http_log_flooding_peer(session); ++ failed_read_cb(ISC_R_RANGE, session); ++ } else if ((size_t)readlen == remaining) { + isc_buffer_free(&session->buf); ++ http_do_bio(session, NULL, NULL, NULL); ++ } else if (remaining_after > 0 && ++ remaining_after < remaining) ++ { ++ /* ++ * We have processed a part of the data, now ++ * let's delay processing of whatever is left ++ * here. We want it to be an async operation so ++ * that we will: ++ * ++ * a) let other things run; ++ * b) have finer grained control over how much ++ * data is processed at once, because nghttp2 ++ * would happily consume as much data we pass to ++ * it and that could overwhelm the server. ++ */ ++ http_do_bio_async(session); + } else { +- isc_buffer_forward(session->buf, readlen); ++ (void)http_send_outgoing(session, NULL, NULL, ++ NULL); + } + +- http_do_bio(session, send_httphandle, send_cb, +- send_cbarg); ++ isc__nm_httpsession_detach(&tmpsess); + return; + } else { + /* Resume reading, it's idempotent, wait for more */ + isc_nm_resumeread(session->handle); ++ isc__nmsocket_timer_start(session->handle->sock); + } + } else { + /* We don't want more data, stop reading for now */ + isc_nm_pauseread(session->handle); + } + +- if (send_cb != NULL) { +- INSIST(VALID_NMHANDLE(send_httphandle)); +- (void)http_send_outgoing(session, send_httphandle, send_cb, +- send_cbarg); +- } else { +- INSIST(send_httphandle == NULL); +- INSIST(send_cb == NULL); +- INSIST(send_cbarg == NULL); +- (void)http_send_outgoing(session, NULL, NULL, NULL); ++ /* we might have some data to send after processing */ ++ (void)http_send_outgoing(session, NULL, NULL, NULL); ++ ++ if (nghttp2_session_want_read(session->ngsession) == 0 && ++ nghttp2_session_want_write(session->ngsession) == 0 && ++ session->pending_write_data == NULL) ++ { ++ session->closing = true; ++ isc_nm_pauseread(session->handle); ++ if (session->sending == 0) { ++ finish_http_session(session); ++ } + } + + return; + } + ++static void ++http_do_bio_async_cb(void *arg) { ++ isc_nm_http_session_t *session = arg; ++ ++ REQUIRE(VALID_HTTP2_SESSION(session)); ++ ++ if (session->handle != NULL && ++ !isc__nmsocket_closing(session->handle->sock)) ++ { ++ http_do_bio(session, NULL, NULL, NULL); ++ } ++ ++ isc__nm_httpsession_detach(&session); ++} ++ ++static void ++http_do_bio_async(isc_nm_http_session_t *session) { ++ isc_nm_http_session_t *tmpsess = NULL; ++ ++ REQUIRE(VALID_HTTP2_SESSION(session)); ++ ++ if (session->handle == NULL || ++ isc__nmsocket_closing(session->handle->sock)) ++ { ++ return; ++ } ++ isc__nm_httpsession_attach(session, &tmpsess); ++ isc__nm_async_run( ++ &session->handle->sock->mgr->workers[session->handle->sock->tid], ++ http_do_bio_async_cb, tmpsess); ++} ++ + static isc_result_t + get_http_cstream(isc_nmsocket_t *sock, http_cstream_t **streamp) { + http_cstream_t *cstream = sock->h2.connect.cstream; +@@ -1424,6 +1794,7 @@ transport_connect_cb(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) { + } + + http_transpost_tcp_nodelay(handle); ++ isc__nmhandle_set_manual_timer(session->handle, true); + + http_call_connect_cb(http_sock, session, result); + +@@ -1670,6 +2041,7 @@ server_on_begin_headers_callback(nghttp2_session *ngsession, + socket->tid = session->handle->sock->tid; + ISC_LINK_INIT(&socket->h2, link); + ISC_LIST_APPEND(session->sstreams, &socket->h2, link); ++ session->total_opened_sstreams++; + + nghttp2_session_set_stream_user_data(ngsession, frame->hd.stream_id, + socket); +@@ -1730,6 +2102,8 @@ server_handle_path_header(isc_nmsocket_t *socket, const uint8_t *value, + socket->mgr->mctx, dns_value, + dns_value_len, + &socket->h2.query_data_len); ++ socket->h2.session->processed_useful_data += ++ dns_value_len; + } else { + socket->h2.query_too_large = true; + return (ISC_HTTP_ERROR_PAYLOAD_TOO_LARGE); +@@ -2038,6 +2412,12 @@ server_call_cb(isc_nmsocket_t *socket, const isc_result_t result, + handle = isc__nmhandle_get(socket, NULL, NULL); + if (result != ISC_R_SUCCESS) { + data = NULL; ++ } else if (socket->h2.session->handle != NULL) { ++ isc__nmsocket_timer_restart(socket->h2.session->handle->sock); ++ } ++ if (result == ISC_R_SUCCESS) { ++ socket->h2.request_received = true; ++ socket->h2.session->received++; + } + socket->h2.cb(handle, result, data, socket->h2.cbarg); + isc_nmhandle_detach(&handle); +@@ -2054,6 +2434,12 @@ isc__nm_http_bad_request(isc_nmhandle_t *handle) { + REQUIRE(!atomic_load(&sock->client)); + REQUIRE(VALID_HTTP2_SESSION(sock->h2.session)); + ++ if (sock->h2.response_submitted || ++ !http_session_active(sock->h2.session)) ++ { ++ return; ++ } ++ + (void)server_send_error_response(ISC_HTTP_ERROR_BAD_REQUEST, + sock->h2.session->ngsession, sock); + } +@@ -2475,6 +2861,8 @@ httplisten_acceptcb(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) { + isc__nmsocket_attach(httplistensock, &session->serversocket); + server_send_connection_header(session); + ++ isc__nmhandle_set_manual_timer(session->handle, true); ++ + /* TODO H2 */ + http_do_bio(session, NULL, NULL, NULL); + return (ISC_R_SUCCESS); +diff --git a/lib/isc/netmgr/netmgr-int.h b/lib/isc/netmgr/netmgr-int.h +index cc635e3f5f9..79e970a2f6d 100644 +--- a/lib/isc/netmgr/netmgr-int.h ++++ b/lib/isc/netmgr/netmgr-int.h +@@ -337,6 +337,7 @@ typedef enum isc__netievent_type { + netievent_privilegedtask, + + netievent_settlsctx, ++ netievent_asyncrun, + + /* + * event type values higher than this will be treated +@@ -708,6 +709,42 @@ typedef struct isc__netievent__tlsctx { + } + + #ifdef HAVE_LIBNGHTTP2 ++typedef void (*isc__nm_asyncrun_cb_t)(void *); ++ ++typedef struct isc__netievent__asyncrun { ++ isc__netievent_type type; ++ ISC_LINK(isc__netievent_t) link; ++ isc__nm_asyncrun_cb_t cb; ++ void *cbarg; ++} isc__netievent__asyncrun_t; ++ ++#define NETIEVENT_ASYNCRUN_TYPE(type) \ ++ typedef isc__netievent__asyncrun_t isc__netievent_##type##_t; ++ ++#define NETIEVENT_ASYNCRUN_DECL(type) \ ++ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ ++ isc_nm_t *nm, isc__nm_asyncrun_cb_t cb, void *cbarg); \ ++ void isc__nm_put_netievent_##type(isc_nm_t *nm, \ ++ isc__netievent_##type##_t *ievent); ++ ++#define NETIEVENT_ASYNCRUN_DEF(type) \ ++ isc__netievent_##type##_t *isc__nm_get_netievent_##type( \ ++ isc_nm_t *nm, isc__nm_asyncrun_cb_t cb, void *cbarg) { \ ++ isc__netievent_##type##_t *ievent = \ ++ isc__nm_get_netievent(nm, netievent_##type); \ ++ ievent->cb = cb; \ ++ ievent->cbarg = cbarg; \ ++ \ ++ return (ievent); \ ++ } \ ++ \ ++ void isc__nm_put_netievent_##type(isc_nm_t *nm, \ ++ isc__netievent_##type##_t *ievent) { \ ++ ievent->cb = NULL; \ ++ ievent->cbarg = NULL; \ ++ isc__nm_put_netievent(nm, ievent); \ ++ } ++ + typedef struct isc__netievent__http_eps { + NETIEVENT__SOCKET; + isc_nm_http_endpoints_t *endpoints; +@@ -752,6 +789,7 @@ typedef union { + isc__netievent_tlsconnect_t nitc; + isc__netievent__tlsctx_t nitls; + #ifdef HAVE_LIBNGHTTP2 ++ isc__netievent__asyncrun_t niasync; + isc__netievent__http_eps_t nihttpeps; + #endif /* HAVE_LIBNGHTTP2 */ + } isc__netievent_storage_t; +@@ -944,6 +982,7 @@ typedef struct isc_nmsocket_h2 { + + isc_nm_http_endpoints_t *peer_endpoints; + ++ bool request_received; + bool response_submitted; + struct { + char *uri; +@@ -1228,6 +1267,7 @@ struct isc_nmsocket { + + isc_barrier_t barrier; + bool barrier_initialised; ++ atomic_bool manual_read_timer; + #ifdef NETMGR_TRACE + void *backtrace[TRACE_SIZE]; + int backtrace_size; +@@ -1546,6 +1586,9 @@ isc__nm_tcp_settimeout(isc_nmhandle_t *handle, uint32_t timeout); + * Set the read timeout for the TCP socket associated with 'handle'. + */ + ++void ++isc__nmhandle_tcp_set_manual_timer(isc_nmhandle_t *handle, const bool manual); ++ + void + isc__nm_async_tcpconnect(isc__networker_t *worker, isc__netievent_t *ev0); + void +@@ -1788,6 +1831,9 @@ isc__nm_tls_cleartimeout(isc_nmhandle_t *handle); + * around. + */ + ++void ++isc__nmhandle_tls_set_manual_timer(isc_nmhandle_t *handle, const bool manual); ++ + const char * + isc__nm_tls_verify_tls_peer_result_string(const isc_nmhandle_t *handle); + +@@ -1805,6 +1851,15 @@ void + isc__nmhandle_tls_setwritetimeout(isc_nmhandle_t *handle, + uint64_t write_timeout); + ++bool ++isc__nmsocket_tls_timer_running(isc_nmsocket_t *sock); ++ ++void ++isc__nmsocket_tls_timer_restart(isc_nmsocket_t *sock); ++ ++void ++isc__nmsocket_tls_timer_stop(isc_nmsocket_t *sock); ++ + void + isc__nm_http_stoplistening(isc_nmsocket_t *sock); + +@@ -1897,7 +1952,10 @@ void + isc__nm_http_set_max_streams(isc_nmsocket_t *listener, + const uint32_t max_concurrent_streams); + +-#endif ++void ++isc__nm_async_asyncrun(isc__networker_t *worker, isc__netievent_t *ev0); ++ ++#endif /* HAVE_LIBNGHTTP2 */ + + void + isc__nm_async_settlsctx(isc__networker_t *worker, isc__netievent_t *ev0); +@@ -2093,6 +2151,8 @@ NETIEVENT_SOCKET_TYPE(tlsdnscycle); + NETIEVENT_SOCKET_REQ_TYPE(httpsend); + NETIEVENT_SOCKET_TYPE(httpclose); + NETIEVENT_SOCKET_HTTP_EPS_TYPE(httpendpoints); ++ ++NETIEVENT_ASYNCRUN_TYPE(asyncrun); + #endif /* HAVE_LIBNGHTTP2 */ + + NETIEVENT_SOCKET_REQ_TYPE(tcpconnect); +@@ -2167,6 +2227,8 @@ NETIEVENT_SOCKET_DECL(tlsdnscycle); + NETIEVENT_SOCKET_REQ_DECL(httpsend); + NETIEVENT_SOCKET_DECL(httpclose); + NETIEVENT_SOCKET_HTTP_EPS_DECL(httpendpoints); ++ ++NETIEVENT_ASYNCRUN_DECL(asyncrun); + #endif /* HAVE_LIBNGHTTP2 */ + + NETIEVENT_SOCKET_REQ_DECL(tcpconnect); +@@ -2283,3 +2345,20 @@ isc__nmsocket_writetimeout_cb(void *data, isc_result_t eresult); + + void + isc__nmsocket_log_tls_session_reuse(isc_nmsocket_t *sock, isc_tls_t *tls); ++ ++void ++isc__nmhandle_set_manual_timer(isc_nmhandle_t *handle, const bool manual); ++/* ++ * Set manual read timer control mode - so that it will not get reset ++ * automatically on read nor get started when read is initiated. ++ */ ++ ++#if HAVE_LIBNGHTTP2 ++void ++isc__nm_async_run(isc__networker_t *worker, isc__nm_asyncrun_cb_t cb, ++ void *cbarg); ++/* ++ * Call the given callback asynchronously by the give network manager ++ * worker, pass the given argument to it. ++ */ ++#endif /* HAVE_LIBNGHTTP2 */ +diff --git a/lib/isc/netmgr/netmgr.c b/lib/isc/netmgr/netmgr.c +index a42ca90e8d2..8df6dc07408 100644 +--- a/lib/isc/netmgr/netmgr.c ++++ b/lib/isc/netmgr/netmgr.c +@@ -998,6 +998,8 @@ process_netievent(isc__networker_t *worker, isc__netievent_t *ievent) { + NETIEVENT_CASE(httpsend); + NETIEVENT_CASE(httpclose); + NETIEVENT_CASE(httpendpoints); ++ ++ NETIEVENT_CASE(asyncrun); + #endif + NETIEVENT_CASE(settlsctx); + NETIEVENT_CASE(sockstop); +@@ -1116,6 +1118,8 @@ NETIEVENT_SOCKET_DEF(tlsdnsshutdown); + NETIEVENT_SOCKET_REQ_DEF(httpsend); + NETIEVENT_SOCKET_DEF(httpclose); + NETIEVENT_SOCKET_HTTP_EPS_DEF(httpendpoints); ++ ++NETIEVENT_ASYNCRUN_DEF(asyncrun); + #endif /* HAVE_LIBNGHTTP2 */ + + NETIEVENT_SOCKET_REQ_DEF(tcpconnect); +@@ -1627,6 +1631,7 @@ isc___nmsocket_init(isc_nmsocket_t *sock, isc_nm_t *mgr, isc_nmsocket_type type, + atomic_init(&sock->keepalive, false); + atomic_init(&sock->connected, false); + atomic_init(&sock->timedout, false); ++ atomic_init(&sock->manual_read_timer, false); + + atomic_init(&sock->active_child_connections, 0); + +@@ -2136,6 +2141,15 @@ void + isc__nmsocket_timer_restart(isc_nmsocket_t *sock) { + REQUIRE(VALID_NMSOCK(sock)); + ++ switch (sock->type) { ++#if HAVE_LIBNGHTTP2 ++ case isc_nm_tlssocket: ++ return isc__nmsocket_tls_timer_restart(sock); ++#endif /* HAVE_LIBNGHTTP2 */ ++ default: ++ break; ++ } ++ + if (uv_is_closing((uv_handle_t *)&sock->read_timer)) { + return; + } +@@ -2170,7 +2184,16 @@ bool + isc__nmsocket_timer_running(isc_nmsocket_t *sock) { + REQUIRE(VALID_NMSOCK(sock)); + +- return (uv_is_active((uv_handle_t *)&sock->read_timer)); ++ switch (sock->type) { ++#if HAVE_LIBNGHTTP2 ++ case isc_nm_tlssocket: ++ return isc__nmsocket_tls_timer_running(sock); ++#endif /* HAVE_LIBNGHTTP2 */ ++ default: ++ break; ++ } ++ ++ return uv_is_active((uv_handle_t *)&sock->read_timer); + } + + void +@@ -2190,6 +2213,15 @@ isc__nmsocket_timer_stop(isc_nmsocket_t *sock) { + + REQUIRE(VALID_NMSOCK(sock)); + ++ switch (sock->type) { ++#if HAVE_LIBNGHTTP2 ++ case isc_nm_tlssocket: ++ return isc__nmsocket_tls_timer_stop(sock); ++#endif /* HAVE_LIBNGHTTP2 */ ++ default: ++ break; ++ } ++ + /* uv_timer_stop() is idempotent, no need to check if running */ + + r = uv_timer_stop(&sock->read_timer); +@@ -3946,6 +3978,52 @@ isc__nmsocket_log_tls_session_reuse(isc_nmsocket_t *sock, isc_tls_t *tls) { + client_sabuf, local_sabuf); + } + ++void ++isc__nmhandle_set_manual_timer(isc_nmhandle_t *handle, const bool manual) { ++ REQUIRE(VALID_NMHANDLE(handle)); ++ REQUIRE(VALID_NMSOCK(handle->sock)); ++ ++ isc_nmsocket_t *sock = handle->sock; ++ ++ switch (sock->type) { ++ case isc_nm_tcpsocket: ++ isc__nmhandle_tcp_set_manual_timer(handle, manual); ++ return; ++#if HAVE_LIBNGHTTP2 ++ case isc_nm_tlssocket: ++ isc__nmhandle_tls_set_manual_timer(handle, manual); ++ return; ++#endif /* HAVE_LIBNGHTTP2 */ ++ default: ++ break; ++ }; ++ ++ UNREACHABLE(); ++} ++ ++#if HAVE_LIBNGHTTP2 ++void ++isc__nm_async_run(isc__networker_t *worker, isc__nm_asyncrun_cb_t cb, ++ void *cbarg) { ++ isc__netievent__asyncrun_t *ievent = NULL; ++ REQUIRE(worker != NULL); ++ REQUIRE(cb != NULL); ++ ++ ievent = isc__nm_get_netievent_asyncrun(worker->mgr, cb, cbarg); ++ isc__nm_enqueue_ievent(worker, (isc__netievent_t *)ievent); ++} ++ ++void ++isc__nm_async_asyncrun(isc__networker_t *worker, isc__netievent_t *ev0) { ++ isc__netievent_asyncrun_t *ievent = (isc__netievent_asyncrun_t *)ev0; ++ ++ UNUSED(worker); ++ ++ ievent->cb(ievent->cbarg); ++} ++ ++#endif /* HAVE_LIBNGHTTP2 */ ++ + #ifdef NETMGR_TRACE + /* + * Dump all active sockets in netmgr. We output to stderr +diff --git a/lib/isc/netmgr/tcp.c b/lib/isc/netmgr/tcp.c +index 37d44bd9c84..925bc85e028 100644 +--- a/lib/isc/netmgr/tcp.c ++++ b/lib/isc/netmgr/tcp.c +@@ -784,7 +784,9 @@ isc__nm_async_tcpstartread(isc__networker_t *worker, isc__netievent_t *ev0) { + return; + } + +- isc__nmsocket_timer_start(sock); ++ if (!atomic_load(&sock->manual_read_timer)) { ++ isc__nmsocket_timer_start(sock); ++ } + } + + void +@@ -822,7 +824,9 @@ isc__nm_async_tcppauseread(isc__networker_t *worker, isc__netievent_t *ev0) { + REQUIRE(sock->tid == isc_nm_tid()); + UNUSED(worker); + +- isc__nmsocket_timer_stop(sock); ++ if (!atomic_load(&sock->manual_read_timer)) { ++ isc__nmsocket_timer_stop(sock); ++ } + isc__nm_stop_reading(sock); + } + +@@ -931,8 +935,10 @@ isc__nm_tcp_read_cb(uv_stream_t *stream, ssize_t nread, const uv_buf_t *buf) { + } + } + +- /* The timer will be updated */ +- isc__nmsocket_timer_restart(sock); ++ if (!atomic_load(&sock->manual_read_timer)) { ++ /* The timer will be updated */ ++ isc__nmsocket_timer_restart(sock); ++ } + } + + free: +@@ -1521,3 +1527,15 @@ isc__nm_tcp_listener_nactive(isc_nmsocket_t *listener) { + INSIST(nactive >= 0); + return (nactive); + } ++ ++void ++isc__nmhandle_tcp_set_manual_timer(isc_nmhandle_t *handle, const bool manual) { ++ isc_nmsocket_t *sock; ++ ++ REQUIRE(VALID_NMHANDLE(handle)); ++ sock = handle->sock; ++ REQUIRE(VALID_NMSOCK(sock)); ++ REQUIRE(sock->type == isc_nm_tcpsocket); ++ ++ atomic_store(&sock->manual_read_timer, manual); ++} +diff --git a/lib/isc/netmgr/tlsstream.c b/lib/isc/netmgr/tlsstream.c +index 4fef5985b25..3d78df6a4d1 100644 +--- a/lib/isc/netmgr/tlsstream.c ++++ b/lib/isc/netmgr/tlsstream.c +@@ -60,6 +60,12 @@ tls_error_to_result(const int tls_err, const int tls_state, isc_tls_t *tls) { + } + } + ++static void ++tls_read_start(isc_nmsocket_t *sock); ++ ++static void ++tls_read_stop(isc_nmsocket_t *sock); ++ + static void + tls_failed_read_cb(isc_nmsocket_t *sock, const isc_result_t result); + +@@ -203,8 +209,13 @@ tls_failed_read_cb(isc_nmsocket_t *sock, const isc_result_t result) { + tls_call_connect_cb(sock, handle, result); + isc__nmsocket_clearcb(sock); + isc_nmhandle_detach(&handle); +- } else if (sock->recv_cb != NULL && sock->statichandle != NULL && +- (sock->recv_read || result == ISC_R_TIMEDOUT)) ++ goto do_destroy; ++ } ++ ++ isc__nmsocket_timer_stop(sock); ++ ++ if (sock->recv_cb != NULL && sock->statichandle != NULL && ++ (sock->recv_read || result == ISC_R_TIMEDOUT)) + { + isc__nm_uvreq_t *req = NULL; + INSIST(VALID_NMHANDLE(sock->statichandle)); +@@ -218,13 +229,13 @@ tls_failed_read_cb(isc_nmsocket_t *sock, const isc_result_t result) { + } + isc__nm_readcb(sock, req, result); + if (result == ISC_R_TIMEDOUT && +- (sock->outerhandle == NULL || +- isc__nmsocket_timer_running(sock->outerhandle->sock))) ++ isc__nmsocket_timer_running(sock)) + { + destroy = false; + } + } + ++do_destroy: + if (destroy) { + isc__nmsocket_prep_destroy(sock); + } +@@ -344,6 +355,8 @@ tls_try_handshake(isc_nmsocket_t *sock, isc_result_t *presult) { + INSIST(sock->statichandle == NULL); + isc__nmsocket_log_tls_session_reuse(sock, sock->tlsstream.tls); + tlshandle = isc__nmhandle_get(sock, &sock->peer, &sock->iface); ++ isc__nmsocket_timer_stop(sock); ++ tls_read_stop(sock); + + if (isc__nm_closing(sock)) { + result = ISC_R_SHUTTINGDOWN; +@@ -437,6 +450,7 @@ tls_do_bio(isc_nmsocket_t *sock, isc_region_t *received_data, + sock->tlsstream.state = TLS_HANDSHAKE; + rv = tls_try_handshake(sock, NULL); + INSIST(SSL_is_init_finished(sock->tlsstream.tls) == 0); ++ isc__nmsocket_timer_restart(sock); + } else if (sock->tlsstream.state == TLS_CLOSED) { + return; + } else { /* initialised and doing I/O */ +@@ -502,6 +516,7 @@ tls_do_bio(isc_nmsocket_t *sock, isc_region_t *received_data, + !atomic_load(&sock->readpaused) && + sock->statichandle != NULL && !finish) + { ++ bool was_new_data = false; + uint8_t recv_buf[TLS_BUF_SIZE]; + INSIST(sock->tlsstream.state > TLS_HANDSHAKE); + while ((rv = SSL_read_ex(sock->tlsstream.tls, recv_buf, +@@ -510,7 +525,7 @@ tls_do_bio(isc_nmsocket_t *sock, isc_region_t *received_data, + isc_region_t region; + region = (isc_region_t){ .base = &recv_buf[0], + .length = len }; +- ++ was_new_data = true; + INSIST(VALID_NMHANDLE(sock->statichandle)); + sock->recv_cb(sock->statichandle, ISC_R_SUCCESS, + ®ion, sock->recv_cbarg); +@@ -547,8 +562,29 @@ tls_do_bio(isc_nmsocket_t *sock, isc_region_t *received_data, + break; + } + } ++ ++ if (was_new_data && !sock->manual_read_timer) { ++ /* ++ * Some data has been decrypted, it is the right ++ * time to stop the read timer as it will be ++ * restarted on the next read attempt. ++ */ ++ isc__nmsocket_timer_stop(sock); ++ } + } + } ++ ++ /* ++ * Setting 'finish' to 'true' means that we are about to close the ++ * TLS stream (we intend to send TLS shutdown message to the ++ * remote side). After that no new data can be received, so we ++ * should stop the timer regardless of the ++ * 'sock->manual_read_timer' value. ++ */ ++ if (finish) { ++ isc__nmsocket_timer_stop(sock); ++ } ++ + errno = 0; + tls_status = SSL_get_error(sock->tlsstream.tls, rv); + saved_errno = errno; +@@ -601,14 +637,7 @@ tls_do_bio(isc_nmsocket_t *sock, isc_region_t *received_data, + return; + } + +- INSIST(VALID_NMHANDLE(sock->outerhandle)); +- +- if (sock->tlsstream.reading) { +- isc_nm_resumeread(sock->outerhandle); +- } else if (sock->tlsstream.state == TLS_HANDSHAKE) { +- sock->tlsstream.reading = true; +- isc_nm_read(sock->outerhandle, tls_readcb, sock); +- } ++ tls_read_start(sock); + return; + default: + result = tls_error_to_result(tls_status, sock->tlsstream.state, +@@ -743,6 +772,7 @@ tlslisten_acceptcb(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) { + RUNTIME_CHECK(result == ISC_R_SUCCESS); + /* TODO: catch failure code, detach tlssock, and log the error */ + ++ isc__nmhandle_set_manual_timer(tlssock->outerhandle, true); + tls_do_bio(tlssock, NULL, NULL, false); + return (result); + } +@@ -898,6 +928,29 @@ isc__nm_tls_read(isc_nmhandle_t *handle, isc_nm_recv_cb_t cb, void *cbarg) { + (isc__netievent_t *)ievent); + } + ++static void ++tls_read_start(isc_nmsocket_t *sock) { ++ INSIST(VALID_NMHANDLE(sock->outerhandle)); ++ ++ if (sock->tlsstream.reading) { ++ isc_nm_resumeread(sock->outerhandle); ++ } else if (sock->tlsstream.state == TLS_HANDSHAKE) { ++ sock->tlsstream.reading = true; ++ isc_nm_read(sock->outerhandle, tls_readcb, sock); ++ } ++ ++ if (!sock->manual_read_timer) { ++ isc__nmsocket_timer_start(sock); ++ } ++} ++ ++static void ++tls_read_stop(isc_nmsocket_t *sock) { ++ if (sock->outerhandle != NULL) { ++ isc_nm_pauseread(sock->outerhandle); ++ } ++} ++ + void + isc__nm_tls_pauseread(isc_nmhandle_t *handle) { + REQUIRE(VALID_NMHANDLE(handle)); +@@ -906,9 +959,11 @@ isc__nm_tls_pauseread(isc_nmhandle_t *handle) { + if (atomic_compare_exchange_strong(&handle->sock->readpaused, + &(bool){ false }, true)) + { +- if (handle->sock->outerhandle != NULL) { +- isc_nm_pauseread(handle->sock->outerhandle); ++ if (!atomic_load(&handle->sock->manual_read_timer)) { ++ isc__nmsocket_timer_stop(handle->sock); + } ++ ++ tls_read_stop(handle->sock); + } + } + +@@ -937,6 +992,7 @@ tls_close_direct(isc_nmsocket_t *sock) { + * external references, we can close everything. + */ + if (sock->outerhandle != NULL) { ++ isc__nmsocket_timer_stop(sock); + isc_nm_pauseread(sock->outerhandle); + isc__nmsocket_clearcb(sock->outerhandle->sock); + isc_nmhandle_detach(&sock->outerhandle); +@@ -1085,6 +1141,7 @@ tcp_connected(isc_nmhandle_t *handle, isc_result_t result, void *cbarg) { + */ + handle->sock->tlsstream.tlssocket = tlssock; + ++ isc__nmhandle_set_manual_timer(tlssock->outerhandle, true); + tls_do_bio(tlssock, NULL, NULL, false); + return; + error: +@@ -1251,6 +1308,44 @@ isc__nmhandle_tls_setwritetimeout(isc_nmhandle_t *handle, + } + } + ++bool ++isc__nmsocket_tls_timer_running(isc_nmsocket_t *sock) { ++ REQUIRE(VALID_NMSOCK(sock)); ++ REQUIRE(sock->type == isc_nm_tlssocket); ++ ++ if (sock->outerhandle != NULL) { ++ INSIST(VALID_NMHANDLE(sock->outerhandle)); ++ REQUIRE(VALID_NMSOCK(sock->outerhandle->sock)); ++ return isc__nmsocket_timer_running(sock->outerhandle->sock); ++ } ++ ++ return false; ++} ++ ++void ++isc__nmsocket_tls_timer_restart(isc_nmsocket_t *sock) { ++ REQUIRE(VALID_NMSOCK(sock)); ++ REQUIRE(sock->type == isc_nm_tlssocket); ++ ++ if (sock->outerhandle != NULL) { ++ INSIST(VALID_NMHANDLE(sock->outerhandle)); ++ REQUIRE(VALID_NMSOCK(sock->outerhandle->sock)); ++ isc__nmsocket_timer_restart(sock->outerhandle->sock); ++ } ++} ++ ++void ++isc__nmsocket_tls_timer_stop(isc_nmsocket_t *sock) { ++ REQUIRE(VALID_NMSOCK(sock)); ++ REQUIRE(sock->type == isc_nm_tlssocket); ++ ++ if (sock->outerhandle != NULL) { ++ INSIST(VALID_NMHANDLE(sock->outerhandle)); ++ REQUIRE(VALID_NMSOCK(sock->outerhandle->sock)); ++ isc__nmsocket_timer_stop(sock->outerhandle->sock); ++ } ++} ++ + const char * + isc__nm_tls_verify_tls_peer_result_string(const isc_nmhandle_t *handle) { + isc_nmsocket_t *sock = NULL; +@@ -1351,3 +1446,15 @@ tls_try_shutdown(isc_tls_t *tls, const bool force) { + (void)SSL_shutdown(tls); + } + } ++ ++void ++isc__nmhandle_tls_set_manual_timer(isc_nmhandle_t *handle, const bool manual) { ++ isc_nmsocket_t *sock; ++ ++ REQUIRE(VALID_NMHANDLE(handle)); ++ sock = handle->sock; ++ REQUIRE(VALID_NMSOCK(sock)); ++ REQUIRE(sock->type == isc_nm_tlssocket); ++ ++ atomic_store(&sock->manual_read_timer, manual); ++} +-- +2.48.1 + diff --git a/SOURCES/bind-9.18-query-fname-relative.patch b/SOURCES/bind-9.18-query-fname-relative.patch new file mode 100644 index 0000000..219721a --- /dev/null +++ b/SOURCES/bind-9.18-query-fname-relative.patch @@ -0,0 +1,90 @@ +From 5bc7cd7a7b9c37e5c70ccf74c5485a02411aaef5 Mon Sep 17 00:00:00 2001 +From: Petr Mensik +Date: Fri, 25 Apr 2025 02:00:00 +0200 +Subject: [PATCH] Insert additional checks ensuring name is not relative + +Mitigation for crashes put in various places, where obviously relative +uninitialized name must not appear. This seems unnecessary once true +cause were identified, but may prevent similar places. +--- + lib/ns/query.c | 35 +++++++++++++++++++++++++++++++++++ + 1 file changed, 35 insertions(+) + +diff --git a/lib/ns/query.c b/lib/ns/query.c +index 11d2520..7e8a4d2 100644 +--- a/lib/ns/query.c ++++ b/lib/ns/query.c +@@ -2203,6 +2203,20 @@ regular: + CTRACE(ISC_LOG_DEBUG(3), "query_additional: done"); + } + ++static void ++log_query_relative(query_ctx_t *qctx, const char *func, const dns_name_t *name) { ++ if (isc_log_wouldlog(ns_lctx, ISC_LOG_DEBUG(1))) { ++ char namebuf[DNS_NAME_FORMATSIZE] = "!"; ++ dns_name_format(name, namebuf, sizeof(namebuf)); ++ ns_client_log( ++ qctx->client, NS_LOGCATEGORY_CLIENT, NS_LOGMODULE_QUERY, ++ ISC_LOG_DEBUG(1), ++ "%s: fname=%s leading to relative name, aborting query.", ++ func, namebuf ++ ); ++ } ++} ++ + static void + query_addrrset(query_ctx_t *qctx, dns_name_t **namep, + dns_rdataset_t **rdatasetp, dns_rdataset_t **sigrdatasetp, +@@ -2275,6 +2289,11 @@ query_addrrset(query_ctx_t *qctx, dns_name_t **namep, + client->query.attributes &= ~NS_QUERYATTR_SECURE; + } + ++ if (!qctx->is_zone && mname && !dns_name_isabsolute(mname)) { ++ log_query_relative(qctx, "query_addrrset", mname); ++ QUERY_ERROR(qctx, DNS_R_SERVFAIL); ++ return; ++ } + /* + * Update message name, set rdataset order, and do additional + * section processing if needed. +@@ -8074,6 +8093,11 @@ query_respond_any(query_ctx_t *qctx) { + : qctx->tname; + query_prefetch(qctx->client, name, + qctx->rdataset); ++ if (name && !dns_name_isabsolute(name)) { ++ log_query_relative(qctx, "query_respond_any", name); ++ result = DNS_R_DROP; ++ break; ++ } + } + + /* +@@ -10696,6 +10720,11 @@ query_cname(query_ctx_t *qctx) { + + if (!qctx->is_zone && RECURSIONOK(qctx->client)) { + query_prefetch(qctx->client, qctx->fname, qctx->rdataset); ++ if (qctx->fname && !dns_name_isabsolute(qctx->fname)) { ++ log_query_relative(qctx, "query_cname", qctx->fname); ++ QUERY_ERROR(qctx, DNS_R_SERVFAIL); ++ return (ns_query_done(qctx)); ++ } + } + + query_addrrset(qctx, &qctx->fname, &qctx->rdataset, sigrdatasetp, +@@ -10801,7 +10830,13 @@ query_dname(query_ctx_t *qctx) { + + if (!qctx->is_zone && RECURSIONOK(qctx->client)) { + query_prefetch(qctx->client, qctx->fname, qctx->rdataset); ++ if (qctx->fname && !dns_name_isabsolute(qctx->fname)) { ++ log_query_relative(qctx, "query_dname", qctx->fname); ++ QUERY_ERROR(qctx, DNS_R_SERVFAIL); ++ return (ns_query_done(qctx)); ++ } + } ++ + query_addrrset(qctx, &qctx->fname, &qctx->rdataset, sigrdatasetp, + qctx->dbuf, DNS_SECTION_ANSWER); + +-- +2.49.0 + diff --git a/SOURCES/bind-9.21-resume-qmin-cname.patch b/SOURCES/bind-9.21-resume-qmin-cname.patch new file mode 100644 index 0000000..05fd4b4 --- /dev/null +++ b/SOURCES/bind-9.21-resume-qmin-cname.patch @@ -0,0 +1,44 @@ +From ac0c3b0477d97fe5c968910f603bb8d04c740da7 Mon Sep 17 00:00:00 2001 +From: Petr Mensik +Date: Tue, 3 Jun 2025 21:00:58 +0200 +Subject: [PATCH] Handle CNAME and DNAME in resume_min in a special way + +When authoritative zone is loaded when query minimization query for the +same zone is already pending, it might receive unexpected result codes. + +Normally DNS_R_CNAME would follow to query_cname after processing sent +events, but dns_view_findzonecut does not fill CNAME target into +event->foundevent. Usual lookup via query_lookup would always have that +filled. + +Ideally we would restart the query with unmodified search name, if +unexpected change from recursing to local zone cut were detected. Until +dns_view_findzonecut is modified to export zone/cache source of the cut, +at least fail queries which went into unexpected state. +--- + lib/dns/resolver.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/lib/dns/resolver.c b/lib/dns/resolver.c +index 795791246b..39a294437e 100644 +--- a/lib/dns/resolver.c ++++ b/lib/dns/resolver.c +@@ -4497,6 +4497,15 @@ resume_qmin(isc_task_t *task, isc_event_t *event) { + if (result == DNS_R_NXDOMAIN) { + result = DNS_R_SERVFAIL; + } ++ /* ++ * CNAME or DNAME means zone were added with that record ++ * after the start of query minimization queries. It means ++ * we do not have initialized correct hevent->foundname ++ * and have to fail. ++ */ ++ if (result == DNS_R_CNAME || result == DNS_R_DNAME) { ++ result = DNS_R_SERVFAIL; ++ } + + if (result != ISC_R_SUCCESS) { + goto cleanup; +-- +2.49.0 + diff --git a/SPECS/bind9.18.spec b/SPECS/bind9.18.spec index 4ab7dbe..e1a46be 100644 --- a/SPECS/bind9.18.spec +++ b/SPECS/bind9.18.spec @@ -77,7 +77,7 @@ License: MPL-2.0 AND ISC AND MIT AND BSD-3-Clause AND BSD-2-Clause # ./lib/isc/tm.c BSD-2-clause and/or MPL-2.0 # ./lib/isccfg/parser.c BSD-2-clause and/or MPL-2.0 Version: 9.18.29 -Release: 2%{?dist} +Release: 4%{?dist} Epoch: 32 Url: https://www.isc.org/downloads/bind/ # @@ -119,6 +119,17 @@ Patch26: bind-9.18-unittest-netmgr-unstable.patch Patch27: bind-9.18-nsupdate-TLS.patch Patch28: bind-9.18-nsupdate-TLS-doc.patch Patch29: bind-9.18-nsupdate-TLS-tests.patch +# https://gitlab.isc.org/isc-projects/bind9/-/commit/c6e6a7af8ac6b575dd3657b0f5cf4248d734c2b0 +Patch30: bind-9.18-CVE-2024-11187-pre-test.patch +Patch31: bind-9.18-CVE-2024-11187.patch +# https://gitlab.isc.org/isc-projects/bind9/-/commit/e733e624147155d6cbee7f0f150c79c7ac6b54bb +Patch32: bind-9.18-CVE-2024-12705.patch +# https://gitlab.isc.org/isc-projects/bind9/-/merge_requests/10562 +# https://gitlab.isc.org/isc-projects/bind9/-/issues/5357 +# downstream patch fixing bind-dyndb-ldap causing issue +Patch33: bind-9.21-resume-qmin-cname.patch +# downstream only, extra check for above change, RHEL-30407 +Patch34: bind-9.18-query-fname-relative.patch %{?systemd_ordering} Requires: coreutils @@ -966,6 +977,14 @@ fi; %endif %changelog +* Tue Jun 10 2025 Petr Mensik - 32:9.18.29-4 +- Prevent name.c:670 attributes assertion failed (RHEL-30407) +- Add extra checks for relative names + +* Mon Feb 03 2025 Petr Menšík - 32:9.18.29-3 +- Limit additional section records CPU processing (CVE-2024-11187) +- Read HTTPS requests in limited chunks and prevent overload (CVE-2024-12705) + * Mon Jan 27 2025 Petr Menšík - 32:9.18.29-2 - Backport nsupdate TLS support into 9.18 (RHEL-76331) - Update nsupdate manual about new TLS options