1
0
forked from rpms/bind

Compare commits

...

No commits in common. "c8-beta" and "c8" have entirely different histories.
c8-beta ... c8

3 changed files with 371 additions and 2 deletions

View File

@ -0,0 +1,193 @@
From f3aa755ba5ae5148dd0567357f8c538072e2eabc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Ond=C5=99ej=20Sur=C3=BD?= <ondrej@isc.org>
Date: Tue, 30 May 2023 08:46:17 +0200
Subject: [PATCH] Improve RBT overmem cache cleaning
When cache memory usage is over the configured cache size (overmem) and
we are cleaning unused entries, it might not be enough to clean just two
entries if the entries to be expired are smaller than the newly added
rdata. This could be abused by an attacker to cause a remote Denial of
Service by possibly running out of the operating system memory.
Currently, the addrdataset() tries to do a single TTL-based cleaning
considering the serve-stale TTL and then optionally moves to overmem
cleaning if we are in that condition. Then the overmem_purge() tries to
do another single TTL based cleaning from the TTL heap and then continue
with LRU-based cleaning up to 2 entries cleaned.
Squash the TTL-cleaning mechanism into single call from addrdataset(),
but ignore the serve-stale TTL if we are currently overmem.
Then instead of having a fixed number of entries to clean, pass the size
of newly added rdatasetheader to the overmem_purge() function and
cleanup at least the size of the newly added data. This prevents the
cache going over the configured memory limit (`max-cache-size`).
Additionally, refactor the overmem_purge() function to reduce for-loop
nesting for readability.
---
lib/dns/rbtdb.c | 109 +++++++++++++++++++++++++++++-------------------
1 file changed, 67 insertions(+), 42 deletions(-)
diff --git a/lib/dns/rbtdb.c b/lib/dns/rbtdb.c
index 11203e4..cc40eae 100644
--- a/lib/dns/rbtdb.c
+++ b/lib/dns/rbtdb.c
@@ -834,7 +834,7 @@ static void update_header(dns_rbtdb_t *rbtdb, rdatasetheader_t *header,
static void expire_header(dns_rbtdb_t *rbtdb, rdatasetheader_t *header,
bool tree_locked, expire_t reason);
static void overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start,
- isc_stdtime_t now, bool tree_locked);
+ size_t purgesize, bool tree_locked);
static isc_result_t resign_insert(dns_rbtdb_t *rbtdb, int idx,
rdatasetheader_t *newheader);
static void resign_delete(dns_rbtdb_t *rbtdb, rbtdb_version_t *version,
@@ -6937,6 +6937,16 @@ addclosest(dns_rbtdb_t *rbtdb, rdatasetheader_t *newheader,
static dns_dbmethods_t zone_methods;
+static size_t
+rdataset_size(rdatasetheader_t *header) {
+ if (!NONEXISTENT(header)) {
+ return (dns_rdataslab_size((unsigned char *)header,
+ sizeof(*header)));
+ }
+
+ return (sizeof(*header));
+}
+
static isc_result_t
addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
isc_stdtime_t now, dns_rdataset_t *rdataset, unsigned int options,
@@ -7091,7 +7101,8 @@ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
}
if (cache_is_overmem)
- overmem_purge(rbtdb, rbtnode->locknum, now, tree_locked);
+ overmem_purge(rbtdb, rbtnode->locknum, rdataset_size(newheader),
+ tree_locked);
NODE_LOCK(&rbtdb->node_locks[rbtnode->locknum].lock,
isc_rwlocktype_write);
@@ -7106,9 +7117,19 @@ addrdataset(dns_db_t *db, dns_dbnode_t *node, dns_dbversion_t *version,
cleanup_dead_nodes(rbtdb, rbtnode->locknum);
header = isc_heap_element(rbtdb->heaps[rbtnode->locknum], 1);
- if (header && header->rdh_ttl < now - RBTDB_VIRTUAL)
- expire_header(rbtdb, header, tree_locked,
- expire_ttl);
+ if (header != NULL) {
+ dns_ttl_t rdh_ttl = header->rdh_ttl;
+
+ /* Only account for stale TTL if cache is not overmem */
+ if (!cache_is_overmem) {
+ rdh_ttl += rbtdb->serve_stale_ttl;
+ }
+
+ if (rdh_ttl < now - RBTDB_VIRTUAL) {
+ expire_header(rbtdb, header, tree_locked,
+ expire_ttl);
+ }
+ }
/*
* If we've been holding a write lock on the tree just for
@@ -10643,54 +10664,58 @@ update_header(dns_rbtdb_t *rbtdb, rdatasetheader_t *header,
ISC_LIST_PREPEND(rbtdb->rdatasets[header->node->locknum], header, link);
}
+static size_t
+expire_lru_headers(dns_rbtdb_t *rbtdb, unsigned int locknum, size_t purgesize,
+ bool tree_locked) {
+ rdatasetheader_t *header, *header_prev;
+ size_t purged = 0;
+
+ for (header = ISC_LIST_TAIL(rbtdb->rdatasets[locknum]);
+ header != NULL && purged <= purgesize; header = header_prev)
+ {
+ header_prev = ISC_LIST_PREV(header, link);
+ /*
+ * Unlink the entry at this point to avoid checking it
+ * again even if it's currently used someone else and
+ * cannot be purged at this moment. This entry won't be
+ * referenced any more (so unlinking is safe) since the
+ * TTL was reset to 0.
+ */
+ ISC_LIST_UNLINK(rbtdb->rdatasets[locknum], header, link);
+ size_t header_size = rdataset_size(header);
+ expire_header(rbtdb, header, tree_locked, expire_lru);
+ purged += header_size;
+ }
+
+ return (purged);
+}
+
/*%
- * Purge some expired and/or stale (i.e. unused for some period) cache entries
- * under an overmem condition. To recover from this condition quickly, up to
- * 2 entries will be purged. This process is triggered while adding a new
- * entry, and we specifically avoid purging entries in the same LRU bucket as
- * the one to which the new entry will belong. Otherwise, we might purge
- * entries of the same name of different RR types while adding RRsets from a
- * single response (consider the case where we're adding A and AAAA glue records
- * of the same NS name).
+ * Purge some stale (i.e. unused for some period - LRU based cleaning) cache
+ * entries under the overmem condition. To recover from this condition quickly,
+ * we cleanup entries up to the size of newly added rdata (passed as purgesize).
+ *
+ * This process is triggered while adding a new entry, and we specifically avoid
+ * purging entries in the same LRU bucket as the one to which the new entry will
+ * belong. Otherwise, we might purge entries of the same name of different RR
+ * types while adding RRsets from a single response (consider the case where
+ * we're adding A and AAAA glue records of the same NS name).
*/
static void
-overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start,
- isc_stdtime_t now, bool tree_locked)
+overmem_purge(dns_rbtdb_t *rbtdb, unsigned int locknum_start, size_t purgesize,
+ bool tree_locked)
{
- rdatasetheader_t *header, *header_prev;
unsigned int locknum;
- int purgecount = 2;
+ size_t purged = 0;
for (locknum = (locknum_start + 1) % rbtdb->node_lock_count;
- locknum != locknum_start && purgecount > 0;
+ locknum != locknum_start && purged <= purgesize;
locknum = (locknum + 1) % rbtdb->node_lock_count) {
NODE_LOCK(&rbtdb->node_locks[locknum].lock,
isc_rwlocktype_write);
- header = isc_heap_element(rbtdb->heaps[locknum], 1);
- if (header && header->rdh_ttl < now - RBTDB_VIRTUAL) {
- expire_header(rbtdb, header, tree_locked,
- expire_ttl);
- purgecount--;
- }
-
- for (header = ISC_LIST_TAIL(rbtdb->rdatasets[locknum]);
- header != NULL && purgecount > 0;
- header = header_prev) {
- header_prev = ISC_LIST_PREV(header, link);
- /*
- * Unlink the entry at this point to avoid checking it
- * again even if it's currently used someone else and
- * cannot be purged at this moment. This entry won't be
- * referenced any more (so unlinking is safe) since the
- * TTL was reset to 0.
- */
- ISC_LIST_UNLINK(rbtdb->rdatasets[locknum], header,
- link);
- expire_header(rbtdb, header, tree_locked,
- expire_lru);
- purgecount--;
- }
+ purged += expire_lru_headers(rbtdb, locknum, purgesize - purged,
+ tree_locked);
NODE_UNLOCK(&rbtdb->node_locks[locknum].lock,
isc_rwlocktype_write);
--
2.40.1

View File

@ -0,0 +1,166 @@
From 3883ec072e5feed1237dc864854ab95ded7302d6 Mon Sep 17 00:00:00 2001
From: Petr Mensik <pemensik@redhat.com>
Date: Tue, 19 Sep 2023 13:14:52 +0200
Subject: [PATCH] Backport of CVE-2023-3341 fix
Taken from BIND 9.16.44 change.
---
lib/isccc/cc.c | 36 +++++++++++++++++++++++---------
lib/isccc/include/isccc/result.h | 4 +++-
lib/isccc/result.c | 4 +++-
3 files changed, 32 insertions(+), 12 deletions(-)
diff --git a/lib/isccc/cc.c b/lib/isccc/cc.c
index 463a053..a54e60c 100644
--- a/lib/isccc/cc.c
+++ b/lib/isccc/cc.c
@@ -53,6 +53,10 @@
#define MAX_TAGS 256
#define DUP_LIFETIME 900
+#ifndef ISCCC_MAXDEPTH
+#define ISCCC_MAXDEPTH \
+ 10 /* Big enough for rndc which just sends a string each way. */
+#endif
typedef isccc_sexpr_t *sexpr_ptr;
@@ -573,19 +577,23 @@ verify(isccc_sexpr_t *alist, unsigned char *data, unsigned int length,
static isc_result_t
table_fromwire(isccc_region_t *source, isccc_region_t *secret,
- uint32_t algorithm, isccc_sexpr_t **alistp);
+ uint32_t algorithm, unsigned int depth, isccc_sexpr_t **alistp);
static isc_result_t
-list_fromwire(isccc_region_t *source, isccc_sexpr_t **listp);
+list_fromwire(isccc_region_t *source, unsigned int depth, isccc_sexpr_t **listp);
static isc_result_t
-value_fromwire(isccc_region_t *source, isccc_sexpr_t **valuep) {
+value_fromwire(isccc_region_t *source, unsigned int depth, isccc_sexpr_t **valuep) {
unsigned int msgtype;
uint32_t len;
isccc_sexpr_t *value;
isccc_region_t active;
isc_result_t result;
+ if (depth > ISCCC_MAXDEPTH) {
+ return (ISCCC_R_MAXDEPTH);
+ }
+
if (REGION_SIZE(*source) < 1 + 4)
return (ISC_R_UNEXPECTEDEND);
GET8(msgtype, source->rstart);
@@ -603,9 +611,9 @@ value_fromwire(isccc_region_t *source, isccc_sexpr_t **valuep) {
} else
result = ISC_R_NOMEMORY;
} else if (msgtype == ISCCC_CCMSGTYPE_TABLE)
- result = table_fromwire(&active, NULL, 0, valuep);
+ result = table_fromwire(&active, NULL, 0, depth + 1, valuep);
else if (msgtype == ISCCC_CCMSGTYPE_LIST)
- result = list_fromwire(&active, valuep);
+ result = list_fromwire(&active, depth + 1, valuep);
else
result = ISCCC_R_SYNTAX;
@@ -614,7 +622,7 @@ value_fromwire(isccc_region_t *source, isccc_sexpr_t **valuep) {
static isc_result_t
table_fromwire(isccc_region_t *source, isccc_region_t *secret,
- uint32_t algorithm, isccc_sexpr_t **alistp)
+ uint32_t algorithm, unsigned int depth, isccc_sexpr_t **alistp)
{
char key[256];
uint32_t len;
@@ -625,6 +633,10 @@ table_fromwire(isccc_region_t *source, isccc_region_t *secret,
REQUIRE(alistp != NULL && *alistp == NULL);
+ if (depth > ISCCC_MAXDEPTH) {
+ return (ISCCC_R_MAXDEPTH);
+ }
+
checksum_rstart = NULL;
first_tag = true;
alist = isccc_alist_create();
@@ -640,7 +652,7 @@ table_fromwire(isccc_region_t *source, isccc_region_t *secret,
GET_MEM(key, len, source->rstart);
key[len] = '\0'; /* Ensure NUL termination. */
value = NULL;
- result = value_fromwire(source, &value);
+ result = value_fromwire(source, depth + 1, &value);
if (result != ISC_R_SUCCESS)
goto bad;
if (isccc_alist_define(alist, key, value) == NULL) {
@@ -673,14 +685,18 @@ table_fromwire(isccc_region_t *source, isccc_region_t *secret,
}
static isc_result_t
-list_fromwire(isccc_region_t *source, isccc_sexpr_t **listp) {
+list_fromwire(isccc_region_t *source, unsigned int depth, isccc_sexpr_t **listp) {
isccc_sexpr_t *list, *value;
isc_result_t result;
+ if (depth > ISCCC_MAXDEPTH) {
+ return (ISCCC_R_MAXDEPTH);
+ }
+
list = NULL;
while (!REGION_EMPTY(*source)) {
value = NULL;
- result = value_fromwire(source, &value);
+ result = value_fromwire(source, depth + 1, &value);
if (result != ISC_R_SUCCESS) {
isccc_sexpr_free(&list);
return (result);
@@ -711,7 +727,7 @@ isccc_cc_fromwire(isccc_region_t *source, isccc_sexpr_t **alistp,
if (version != 1)
return (ISCCC_R_UNKNOWNVERSION);
- return (table_fromwire(source, secret, algorithm, alistp));
+ return (table_fromwire(source, secret, algorithm, 0, alistp));
}
static isc_result_t
diff --git a/lib/isccc/include/isccc/result.h b/lib/isccc/include/isccc/result.h
index 6c79dd7..b30b08a 100644
--- a/lib/isccc/include/isccc/result.h
+++ b/lib/isccc/include/isccc/result.h
@@ -47,8 +47,10 @@
#define ISCCC_R_CLOCKSKEW (ISC_RESULTCLASS_ISCCC + 4)
/*% Duplicate */
#define ISCCC_R_DUPLICATE (ISC_RESULTCLASS_ISCCC + 5)
+/*% Maximum recursion depth */
+#define ISCCC_R_MAXDEPTH (ISC_RESULTCLASS_ISCCC + 6)
-#define ISCCC_R_NRESULTS 6 /*%< Number of results */
+#define ISCCC_R_NRESULTS 7 /*%< Number of results */
ISC_LANG_BEGINDECLS
diff --git a/lib/isccc/result.c b/lib/isccc/result.c
index 8419bbb..a3a3b9a 100644
--- a/lib/isccc/result.c
+++ b/lib/isccc/result.c
@@ -40,7 +40,8 @@ static const char *text[ISCCC_R_NRESULTS] = {
"bad auth", /* 3 */
"expired", /* 4 */
"clock skew", /* 5 */
- "duplicate" /* 6 */
+ "duplicate", /* 6 */
+ "max depth", /* 7 */
};
static const char *ids[ISCCC_R_NRESULTS] = {
@@ -50,6 +51,7 @@ static const char *ids[ISCCC_R_NRESULTS] = {
"ISCCC_R_EXPIRED",
"ISCCC_R_CLOCKSKEW",
"ISCCC_R_DUPLICATE",
+ "ISCCC_R_MAXDEPTH"
};
#define ISCCC_RESULT_RESULTSET 2
--
2.41.0

View File

@ -68,7 +68,7 @@ Summary: The Berkeley Internet Name Domain (BIND) DNS (Domain Name System) serv
Name: bind
License: MPLv2.0
Version: 9.11.36
Release: 9%{?PATCHVER:.%{PATCHVER}}%{?PREVER:.%{PREVER}}%{?dist}
Release: 11%{?PATCHVER:.%{PATCHVER}}%{?PREVER:.%{PREVER}}%{?dist}
Epoch: 32
Url: https://www.isc.org/downloads/bind/
#
@ -172,6 +172,9 @@ Patch193: bind-9.16-CVE-2022-3094-1.patch
Patch194: bind-9.16-CVE-2022-3094-2.patch
Patch195: bind-9.16-CVE-2022-3094-3.patch
Patch196: bind-9.16-CVE-2022-3094-test.patch
# https://gitlab.isc.org/isc-projects/bind9/commit/f1d9e9ee3859976f403914d20ad2a10855343702
Patch197: bind-9.11-CVE-2023-2828.patch
Patch198: bind-9.16-CVE-2023-3341.patch
# SDB patches
Patch11: bind-9.3.2b2-sdbsrc.patch
@ -578,7 +581,8 @@ are used for building ISC DHCP.
%patch194 -p1 -b .CVE-2022-3094
%patch195 -p1 -b .CVE-2022-3094
%patch196 -p1 -b .CVE-2022-3094-test
%patch197 -p1 -b .CVE-2023-2828
%patch198 -p1 -b .CVE-2023-3341
mkdir lib/dns/tests/testdata/dstrandom
cp -a %{SOURCE50} lib/dns/tests/testdata/dstrandom/random.data
@ -1631,6 +1635,12 @@ rm -rf ${RPM_BUILD_ROOT}
%endif
%changelog
* Tue Sep 19 2023 Petr Menšík <pemensik@redhat.com> - 32:9.11.36-11
- Prevent exahustion of memory from control channel (CVE-2023-3341)
* Thu Jun 22 2023 Petr Menšík <pemensik@redhat.com> - 32:9.11.36-10
- Prevent the cache going over the configured limit (CVE-2023-2828)
* Wed Feb 08 2023 Petr Menšík <pemensik@redhat.com> - 32:9.11.36-9
- Prevent flooding with UPDATE requests (CVE-2022-3094)
- include upstream test for that change