diff --git a/SOURCES/bind-9.16-CVE-2023-2828.patch b/SOURCES/bind-9.16-CVE-2023-2828.patch new file mode 100644 index 0000000..466dc6e --- /dev/null +++ b/SOURCES/bind-9.16-CVE-2023-2828.patch @@ -0,0 +1,192 @@ +From ed920ea2ae1cc1214b42b82a5149758dbec941a5 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Ond=C5=99ej=20Sur=C3=BD?= +Date: Tue, 30 May 2023 08:46:17 +0200 +Subject: [PATCH] Improve RBT overmem cache cleaning + +When cache memory usage is over the configured cache size (overmem) and +we are cleaning unused entries, it might not be enough to clean just two +entries if the entries to be expired are smaller than the newly added +rdata. This could be abused by an attacker to cause a remote Denial of +Service by possibly running out of the operating system memory. + +Currently, the addrdataset() tries to do a single TTL-based cleaning +considering the serve-stale TTL and then optionally moves to overmem +cleaning if we are in that condition. Then the overmem_purge() tries to +do another single TTL based cleaning from the TTL heap and then continue +with LRU-based cleaning up to 2 entries cleaned. + +Squash the TTL-cleaning mechanism into single call from addrdataset(), +but ignore the serve-stale TTL if we are currently overmem. + +Then instead of having a fixed number of entries to clean, pass the size +of newly added rdatasetheader to the overmem_purge() function and +cleanup at least the size of the newly added data. This prevents the +cache going over the configured memory limit (`max-cache-size`). + +Additionally, refactor the overmem_purge() function to reduce for-loop +nesting for readability. + +(cherry picked from commit f1d9e9ee3859976f403914d20ad2a10855343702) +--- + lib/dns/rbtdb.c | 105 ++++++++++++++++++++++++++++++------------------ + 1 file changed, 65 insertions(+), 40 deletions(-) + +diff --git a/lib/dns/rbtdb.c b/lib/dns/rbtdb.c +index 51178cc877..75f97f5550 100644 +--- a/lib/dns/rbtdb.c ++++ b/lib/dns/rbtdb.c +@@ -599,7 +599,7 @@ static void + expire_header(dns_rbtdb_t *rbtdb, rdatasetheader_t *header, bool tree_locked, + expire_t reason); + static void +-overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start, isc_stdtime_t now, ++overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start, size_t purgesize, + bool tree_locked); + static isc_result_t + resign_insert(dns_rbtdb_t *rbtdb, int idx, rdatasetheader_t *newheader); +@@ -6802,6 +6802,16 @@ cleanup: + + static dns_dbmethods_t zone_methods; + ++static size_t ++rdataset_size(rdatasetheader_t *header) { ++ if (!NONEXISTENT(header)) { ++ return (dns_rdataslab_size((unsigned char *)header, ++ sizeof(*header))); ++ } ++ ++ return (sizeof(*header)); ++} ++ + static isc_result_t + addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version, + isc_stdtime_t now, dns_rdataset_t *rdataset, unsigned int options, +@@ -6965,7 +6975,8 @@ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version, + } + + if (cache_is_overmem) { +- overmem_purge(rbtdb, rbtnode->locknum, now, tree_locked); ++ overmem_purge(rbtdb, rbtnode->locknum, rdataset_size(newheader), ++ tree_locked); + } + + NODE_LOCK(&rbtdb->node_locks[rbtnode->locknum].lock, +@@ -6984,10 +6995,18 @@ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version, + } + + header = isc_heap_element(rbtdb->heaps[rbtnode->locknum], 1); +- if (header != NULL && header->rdh_ttl + rbtdb->serve_stale_ttl < +- now - RBTDB_VIRTUAL) +- { +- expire_header(rbtdb, header, tree_locked, expire_ttl); ++ if (header != NULL) { ++ dns_ttl_t rdh_ttl = header->rdh_ttl; ++ ++ /* Only account for stale TTL if cache is not overmem */ ++ if (!cache_is_overmem) { ++ rdh_ttl += rbtdb->serve_stale_ttl; ++ } ++ ++ if (rdh_ttl < now - RBTDB_VIRTUAL) { ++ expire_header(rbtdb, header, tree_locked, ++ expire_ttl); ++ } + } + + /* +@@ -10531,52 +10550,58 @@ update_header(dns_rbtdb_t *rbtdb, rdatasetheader_t *header, isc_stdtime_t now) { + ISC_LIST_PREPEND(rbtdb->rdatasets[header->node->locknum], header, link); + } + ++static size_t ++expire_lru_headers(dns_rbtdb_t *rbtdb, unsigned int locknum, size_t purgesize, ++ bool tree_locked) { ++ rdatasetheader_t *header, *header_prev; ++ size_t purged = 0; ++ ++ for (header = ISC_LIST_TAIL(rbtdb->rdatasets[locknum]); ++ header != NULL && purged <= purgesize; header = header_prev) ++ { ++ header_prev = ISC_LIST_PREV(header, link); ++ /* ++ * Unlink the entry at this point to avoid checking it ++ * again even if it's currently used someone else and ++ * cannot be purged at this moment. This entry won't be ++ * referenced any more (so unlinking is safe) since the ++ * TTL was reset to 0. ++ */ ++ ISC_LIST_UNLINK(rbtdb->rdatasets[locknum], header, link); ++ size_t header_size = rdataset_size(header); ++ expire_header(rbtdb, header, tree_locked, expire_lru); ++ purged += header_size; ++ } ++ ++ return (purged); ++} ++ + /*% +- * Purge some expired and/or stale (i.e. unused for some period) cache entries +- * under an overmem condition. To recover from this condition quickly, up to +- * 2 entries will be purged. This process is triggered while adding a new +- * entry, and we specifically avoid purging entries in the same LRU bucket as +- * the one to which the new entry will belong. Otherwise, we might purge +- * entries of the same name of different RR types while adding RRsets from a +- * single response (consider the case where we're adding A and AAAA glue records +- * of the same NS name). ++ * Purge some stale (i.e. unused for some period - LRU based cleaning) cache ++ * entries under the overmem condition. To recover from this condition quickly, ++ * we cleanup entries up to the size of newly added rdata (passed as purgesize). ++ * ++ * This process is triggered while adding a new entry, and we specifically avoid ++ * purging entries in the same LRU bucket as the one to which the new entry will ++ * belong. Otherwise, we might purge entries of the same name of different RR ++ * types while adding RRsets from a single response (consider the case where ++ * we're adding A and AAAA glue records of the same NS name). + */ + static void +-overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start, isc_stdtime_t now, ++overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start, size_t purgesize, + bool tree_locked) { +- rdatasetheader_t *header, *header_prev; + unsigned int locknum; +- int purgecount = 2; ++ size_t purged = 0; + + for (locknum = (locknum_start + 1) % rbtdb->node_lock_count; +- locknum != locknum_start && purgecount > 0; ++ locknum != locknum_start && purged <= purgesize; + locknum = (locknum + 1) % rbtdb->node_lock_count) + { + NODE_LOCK(&rbtdb->node_locks[locknum].lock, + isc_rwlocktype_write); + +- header = isc_heap_element(rbtdb->heaps[locknum], 1); +- if (header && header->rdh_ttl < now - RBTDB_VIRTUAL) { +- expire_header(rbtdb, header, tree_locked, expire_ttl); +- purgecount--; +- } +- +- for (header = ISC_LIST_TAIL(rbtdb->rdatasets[locknum]); +- header != NULL && purgecount > 0; header = header_prev) +- { +- header_prev = ISC_LIST_PREV(header, link); +- /* +- * Unlink the entry at this point to avoid checking it +- * again even if it's currently used someone else and +- * cannot be purged at this moment. This entry won't be +- * referenced any more (so unlinking is safe) since the +- * TTL was reset to 0. +- */ +- ISC_LIST_UNLINK(rbtdb->rdatasets[locknum], header, +- link); +- expire_header(rbtdb, header, tree_locked, expire_lru); +- purgecount--; +- } ++ purged += expire_lru_headers(rbtdb, locknum, purgesize - purged, ++ tree_locked); + + NODE_UNLOCK(&rbtdb->node_locks[locknum].lock, + isc_rwlocktype_write); +-- +2.40.1 + diff --git a/SOURCES/bind-9.16-CVE-2023-3341.patch b/SOURCES/bind-9.16-CVE-2023-3341.patch new file mode 100644 index 0000000..97a3bf1 --- /dev/null +++ b/SOURCES/bind-9.16-CVE-2023-3341.patch @@ -0,0 +1,168 @@ +From b137e12dc8118cddee20e372e480a495585e72b6 Mon Sep 17 00:00:00 2001 +From: Petr Mensik +Date: Tue, 19 Sep 2023 12:44:31 +0200 +Subject: [PATCH] Fix CVE-2023-3341 + +6245. [security] Limit the amount of recursion that can be performed + by isccc_cc_fromwire. (CVE-2023-3341) [GL #4152] +--- + lib/isccc/cc.c | 39 ++++++++++++++++++++++++-------- + lib/isccc/include/isccc/result.h | 4 +++- + lib/isccc/result.c | 4 +++- + 3 files changed, 35 insertions(+), 12 deletions(-) + +diff --git a/lib/isccc/cc.c b/lib/isccc/cc.c +index 0be28b9057..3744d0f037 100644 +--- a/lib/isccc/cc.c ++++ b/lib/isccc/cc.c +@@ -50,6 +50,10 @@ + + #define MAX_TAGS 256 + #define DUP_LIFETIME 900 ++#ifndef ISCCC_MAXDEPTH ++#define ISCCC_MAXDEPTH \ ++ 10 /* Big enough for rndc which just sends a string each way. */ ++#endif + + typedef isccc_sexpr_t *sexpr_ptr; + +@@ -480,19 +484,25 @@ verify(isccc_sexpr_t *alist, unsigned char *data, unsigned int length, + + static isc_result_t + table_fromwire(isccc_region_t *source, isccc_region_t *secret, +- uint32_t algorithm, isccc_sexpr_t **alistp); ++ uint32_t algorithm, unsigned int depth, isccc_sexpr_t **alistp); + + static isc_result_t +-list_fromwire(isccc_region_t *source, isccc_sexpr_t **listp); ++list_fromwire(isccc_region_t *source, unsigned int depth, ++ isccc_sexpr_t **listp); + + static isc_result_t +-value_fromwire(isccc_region_t *source, isccc_sexpr_t **valuep) { ++value_fromwire(isccc_region_t *source, unsigned int depth, ++ isccc_sexpr_t **valuep) { + unsigned int msgtype; + uint32_t len; + isccc_sexpr_t *value; + isccc_region_t active; + isc_result_t result; + ++ if (depth > ISCCC_MAXDEPTH) { ++ return (ISCCC_R_MAXDEPTH); ++ } ++ + if (REGION_SIZE(*source) < 1 + 4) { + return (ISC_R_UNEXPECTEDEND); + } +@@ -513,9 +523,9 @@ value_fromwire(isccc_region_t *source, isccc_sexpr_t **valuep) { + result = ISC_R_NOMEMORY; + } + } else if (msgtype == ISCCC_CCMSGTYPE_TABLE) { +- result = table_fromwire(&active, NULL, 0, valuep); ++ result = table_fromwire(&active, NULL, 0, depth + 1, valuep); + } else if (msgtype == ISCCC_CCMSGTYPE_LIST) { +- result = list_fromwire(&active, valuep); ++ result = list_fromwire(&active, depth + 1, valuep); + } else { + result = ISCCC_R_SYNTAX; + } +@@ -525,7 +535,7 @@ value_fromwire(isccc_region_t *source, isccc_sexpr_t **valuep) { + + static isc_result_t + table_fromwire(isccc_region_t *source, isccc_region_t *secret, +- uint32_t algorithm, isccc_sexpr_t **alistp) { ++ uint32_t algorithm, unsigned int depth, isccc_sexpr_t **alistp) { + char key[256]; + uint32_t len; + isc_result_t result; +@@ -535,6 +545,10 @@ table_fromwire(isccc_region_t *source, isccc_region_t *secret, + + REQUIRE(alistp != NULL && *alistp == NULL); + ++ if (depth > ISCCC_MAXDEPTH) { ++ return (ISCCC_R_MAXDEPTH); ++ } ++ + checksum_rstart = NULL; + first_tag = true; + alist = isccc_alist_create(); +@@ -551,7 +565,7 @@ table_fromwire(isccc_region_t *source, isccc_region_t *secret, + GET_MEM(key, len, source->rstart); + key[len] = '\0'; /* Ensure NUL termination. */ + value = NULL; +- result = value_fromwire(source, &value); ++ result = value_fromwire(source, depth + 1, &value); + if (result != ISC_R_SUCCESS) { + goto bad; + } +@@ -589,14 +603,19 @@ bad: + } + + static isc_result_t +-list_fromwire(isccc_region_t *source, isccc_sexpr_t **listp) { ++list_fromwire(isccc_region_t *source, unsigned int depth, ++ isccc_sexpr_t **listp) { + isccc_sexpr_t *list, *value; + isc_result_t result; + ++ if (depth > ISCCC_MAXDEPTH) { ++ return (ISCCC_R_MAXDEPTH); ++ } ++ + list = NULL; + while (!REGION_EMPTY(*source)) { + value = NULL; +- result = value_fromwire(source, &value); ++ result = value_fromwire(source, depth + 1, &value); + if (result != ISC_R_SUCCESS) { + isccc_sexpr_free(&list); + return (result); +@@ -628,7 +647,7 @@ isccc_cc_fromwire(isccc_region_t *source, isccc_sexpr_t **alistp, + return (ISCCC_R_UNKNOWNVERSION); + } + +- return (table_fromwire(source, secret, algorithm, alistp)); ++ return (table_fromwire(source, secret, algorithm, 0, alistp)); + } + + static isc_result_t +diff --git a/lib/isccc/include/isccc/result.h b/lib/isccc/include/isccc/result.h +index 5346babefc..5b6a876d1c 100644 +--- a/lib/isccc/include/isccc/result.h ++++ b/lib/isccc/include/isccc/result.h +@@ -46,8 +46,10 @@ + #define ISCCC_R_CLOCKSKEW (ISC_RESULTCLASS_ISCCC + 4) + /*% Duplicate */ + #define ISCCC_R_DUPLICATE (ISC_RESULTCLASS_ISCCC + 5) ++/*% Maximum recursion depth */ ++#define ISCCC_R_MAXDEPTH (ISC_RESULTCLASS_ISCCC + 6) + +-#define ISCCC_R_NRESULTS 6 /*%< Number of results */ ++#define ISCCC_R_NRESULTS 7 /*%< Number of results */ + + ISC_LANG_BEGINDECLS + +diff --git a/lib/isccc/result.c b/lib/isccc/result.c +index 9285435209..1956cb1655 100644 +--- a/lib/isccc/result.c ++++ b/lib/isccc/result.c +@@ -36,12 +36,14 @@ static const char *text[ISCCC_R_NRESULTS] = { + "bad auth", /* 3 */ + "expired", /* 4 */ + "clock skew", /* 5 */ +- "duplicate" /* 6 */ ++ "duplicate", /* 6 */ ++ "max depth" /* 7 */ + }; + + static const char *ids[ISCCC_R_NRESULTS] = { + "ISCCC_R_UNKNOWNVERSION", "ISCCC_R_SYNTAX", "ISCCC_R_BADAUTH", + "ISCCC_R_EXPIRED", "ISCCC_R_CLOCKSKEW", "ISCCC_R_DUPLICATE", ++ "ISCCC_R_MAXDEPTH" + }; + + #define ISCCC_RESULT_RESULTSET 2 +-- +2.41.0 + diff --git a/SPECS/bind9.16.spec b/SPECS/bind9.16.spec index 203e39b..3f6d34a 100644 --- a/SPECS/bind9.16.spec +++ b/SPECS/bind9.16.spec @@ -57,7 +57,7 @@ Summary: The Berkeley Internet Name Domain (BIND) DNS (Domain Name System) serv Name: bind9.16 License: MPLv2.0 Version: 9.16.23 -Release: 0.14%{?dist} +Release: 0.16%{?dist} Epoch: 32 Url: https://www.isc.org/downloads/bind/ # @@ -126,6 +126,9 @@ Patch185: bind-9.16-CVE-2022-3094-test.patch Patch186: bind-9.16-CVE-2022-3736.patch # https://gitlab.isc.org/isc-projects/bind9/commit/b4a65aaea19762a3712932aa2270e8a833fbde22 Patch187: bind-9.16-CVE-2022-3924.patch +# https://gitlab.isc.org/isc-projects/bind9/commit/f1d9e9ee3859976f403914d20ad2a10855343702 +Patch188: bind-9.16-CVE-2023-2828.patch +Patch189: bind-9.16-CVE-2023-3341.patch %{?systemd_ordering} Requires: coreutils @@ -437,6 +440,8 @@ in HTML and PDF format. %patch185 -p1 -b .CVE-2022-3094-test %patch186 -p1 -b .CVE-2022-3736 %patch187 -p1 -b .CVE-2022-3924 +%patch188 -p1 -b .CVE-2023-2828 +%patch189 -p1 -b .CVE-2023-3341 %if %{with PKCS11} %patch135 -p1 -b .config-pkcs11 @@ -1156,6 +1161,13 @@ fi; %endif %changelog +* Wed Sep 20 2023 Petr Menšík - 32:9.16.23-0.16 +- Limit the amount of recursion possible in control channel (CVE-2023-3341) + +* Tue Jun 20 2023 Petr Menšík - 32:9.16.23-0.15 +- Strengten cache cleaning to prevent overflowing configured limit + (CVE-2023-2828) + * Sat Feb 25 2023 Petr Menšík - 32:9.16.23-0.14 - Handle subtle difference between upstream and rhel (CVE-2022-3094)