import nftables-0.9.3-17.el8

This commit is contained in:
CentOS Sources 2021-03-30 15:07:37 -04:00 committed by Stepan Oksanichenko
parent ee8ec96d0a
commit 264b1152b5
17 changed files with 2076 additions and 1 deletions

View File

@ -0,0 +1,42 @@
From ab62f33df5ef33f6eff8d88d9475a01822a2f625 Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Tue, 30 Jun 2020 16:20:22 +0200
Subject: [PATCH] segtree: Fix missing expires value in prefixes
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1832235
Upstream Status: nftables commit 60ba9c22fecc0
commit 60ba9c22fecc0ca9bb2a61f6ad39bceed1aee38f
Author: Phil Sutter <phil@nwl.cc>
Date: Tue Apr 28 20:54:03 2020 +0200
segtree: Fix missing expires value in prefixes
This probable copy'n'paste bug prevented 'expiration' field from being
populated when turning a range into a prefix in
interval_map_decompose(). Consequently, interval sets with timeout did
print expiry value for ranges (such as 10.0.0.1-10.0.0.5) but not
prefixes (10.0.0.0/8, for instance).
Fixes: bb0e6d8a2851b ("segtree: incorrect handling of comments and timeouts with mapping")
Signed-off-by: Phil Sutter <phil@nwl.cc>
---
src/segtree.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/segtree.c b/src/segtree.c
index e859f84..1ba4363 100644
--- a/src/segtree.c
+++ b/src/segtree.c
@@ -1086,7 +1086,7 @@ void interval_map_decompose(struct expr *set)
prefix->comment = xstrdup(low->comment);
if (low->timeout)
prefix->timeout = low->timeout;
- if (low->left->expiration)
+ if (low->expiration)
prefix->expiration = low->expiration;
}
--
1.8.3.1

View File

@ -0,0 +1,55 @@
From 119fbcbd8c37aac314d6ffa6225ab24ee4b0e31e Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Tue, 30 Jun 2020 16:20:23 +0200
Subject: [PATCH] segtree: Use expr_clone in get_set_interval_*()
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1832235
Upstream Status: nftables commit a2eedcc89d2ed
commit a2eedcc89d2ed40411c26d53579300c4f1ccb83d
Author: Phil Sutter <phil@nwl.cc>
Date: Thu Apr 30 13:45:40 2020 +0200
segtree: Use expr_clone in get_set_interval_*()
Both functions perform interval set lookups with either start and end or
only start values as input. Interestingly, in practice they either see
values which are not contained or which match an existing range exactly.
Make use of the above and just return a clone of the matching entry
instead of creating a new one based on input data.
Signed-off-by: Phil Sutter <phil@nwl.cc>
---
src/segtree.c | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/src/segtree.c b/src/segtree.c
index 1ba4363..dc4db6b 100644
--- a/src/segtree.c
+++ b/src/segtree.c
@@ -695,9 +695,7 @@ static struct expr *get_set_interval_find(const struct table *table,
range_expr_value_high(high, i);
if (mpz_cmp(left->key->value, low) >= 0 &&
mpz_cmp(right->key->value, high) <= 0) {
- range = range_expr_alloc(&internal_location,
- expr_clone(left->key),
- expr_clone(right->key));
+ range = expr_clone(i->key);
goto out;
}
break;
@@ -729,9 +727,7 @@ static struct expr *get_set_interval_end(const struct table *table,
case EXPR_RANGE:
range_expr_value_low(low, i);
if (mpz_cmp(low, left->key->value) == 0) {
- range = range_expr_alloc(&internal_location,
- expr_clone(left->key),
- expr_clone(i->key->right));
+ range = expr_clone(i->key);
goto out;
}
break;
--
1.8.3.1

View File

@ -0,0 +1,131 @@
From 40cdcccf0fc6f4d0d4c2248d4bd9bf3193a922e9 Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Tue, 30 Jun 2020 16:20:23 +0200
Subject: [PATCH] segtree: Merge get_set_interval_find() and
get_set_interval_end()
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1832235
Upstream Status: nftables commit f21e73d6700b8
commit f21e73d6700b873eb1a295f43bbad9caaca577e2
Author: Phil Sutter <phil@nwl.cc>
Date: Thu Apr 30 13:57:35 2020 +0200
segtree: Merge get_set_interval_find() and get_set_interval_end()
Both functions were very similar already. Under the assumption that they
will always either see a range (or start of) that matches exactly or not
at all, reduce complexity and make get_set_interval_find() accept NULL
(left or) right values. This way it becomes a full replacement for
get_set_interval_end().
Signed-off-by: Phil Sutter <phil@nwl.cc>
---
src/segtree.c | 63 +++++++++++++++--------------------------------------------
1 file changed, 16 insertions(+), 47 deletions(-)
diff --git a/src/segtree.c b/src/segtree.c
index dc4db6b..6e1f696 100644
--- a/src/segtree.c
+++ b/src/segtree.c
@@ -681,63 +681,31 @@ static struct expr *get_set_interval_find(const struct table *table,
{
struct expr *range = NULL;
struct set *set;
- mpz_t low, high;
struct expr *i;
+ mpz_t val;
set = set_lookup(table, set_name);
- mpz_init2(low, set->key->len);
- mpz_init2(high, set->key->len);
+ mpz_init2(val, set->key->len);
list_for_each_entry(i, &set->init->expressions, list) {
switch (i->key->etype) {
case EXPR_RANGE:
- range_expr_value_low(low, i);
- range_expr_value_high(high, i);
- if (mpz_cmp(left->key->value, low) >= 0 &&
- mpz_cmp(right->key->value, high) <= 0) {
- range = expr_clone(i->key);
- goto out;
- }
- break;
- default:
- break;
- }
- }
-out:
- mpz_clear(low);
- mpz_clear(high);
-
- return range;
-}
-
-static struct expr *get_set_interval_end(const struct table *table,
- const char *set_name,
- struct expr *left)
-{
- struct expr *i, *range = NULL;
- struct set *set;
- mpz_t low, high;
+ range_expr_value_low(val, i);
+ if (left && mpz_cmp(left->key->value, val))
+ break;
- set = set_lookup(table, set_name);
- mpz_init2(low, set->key->len);
- mpz_init2(high, set->key->len);
+ range_expr_value_high(val, i);
+ if (right && mpz_cmp(right->key->value, val))
+ break;
- list_for_each_entry(i, &set->init->expressions, list) {
- switch (i->key->etype) {
- case EXPR_RANGE:
- range_expr_value_low(low, i);
- if (mpz_cmp(low, left->key->value) == 0) {
- range = expr_clone(i->key);
- goto out;
- }
- break;
+ range = expr_clone(i->key);
+ goto out;
default:
break;
}
}
out:
- mpz_clear(low);
- mpz_clear(high);
+ mpz_clear(val);
return range;
}
@@ -767,9 +735,9 @@ int get_set_decompose(struct table *table, struct set *set)
left = NULL;
} else {
if (left) {
- range = get_set_interval_end(table,
- set->handle.set.name,
- left);
+ range = get_set_interval_find(table,
+ set->handle.set.name,
+ left, NULL);
if (range)
compound_expr_add(new_init, range);
else
@@ -780,7 +748,8 @@ int get_set_decompose(struct table *table, struct set *set)
}
}
if (left) {
- range = get_set_interval_end(table, set->handle.set.name, left);
+ range = get_set_interval_find(table, set->handle.set.name,
+ left, NULL);
if (range)
compound_expr_add(new_init, range);
else
--
1.8.3.1

View File

@ -0,0 +1,41 @@
From 4337d4eafe66b594b56b43261c8742d6b65d5ee8 Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Tue, 30 Jun 2020 16:20:23 +0200
Subject: [PATCH] tests: 0034get_element_0: do not discard stderr
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1832235
Upstream Status: nftables commit ff29e6c09aed9
commit ff29e6c09aed922a42e0e0551c34dd5d87067512
Author: Florian Westphal <fw@strlen.de>
Date: Sat Feb 22 00:02:25 2020 +0100
tests: 0034get_element_0: do not discard stderr
run_tests.sh alreadty discards stderr by default, but will show it in
case the test script is run directly (passed as argument).
Discarding stderr also in the script prevents one from seeing
BUG() assertions and the like.
Signed-off-by: Florian Westphal <fw@strlen.de>
---
tests/shell/testcases/sets/0034get_element_0 | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/shell/testcases/sets/0034get_element_0 b/tests/shell/testcases/sets/0034get_element_0
index c7e7298..e23dbda 100755
--- a/tests/shell/testcases/sets/0034get_element_0
+++ b/tests/shell/testcases/sets/0034get_element_0
@@ -3,7 +3,7 @@
RC=0
check() { # (elems, expected)
- out=$($NFT get element ip t s "{ $1 }" 2>/dev/null)
+ out=$($NFT get element ip t s "{ $1 }")
out=$(grep "elements =" <<< "$out")
out="${out#* \{ }"
out="${out% \}}"
--
1.8.3.1

View File

@ -0,0 +1,135 @@
From 3a2016f539e46183965bada40946e259c33158d9 Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Tue, 30 Jun 2020 16:20:23 +0200
Subject: [PATCH] segtree: Fix get element command with prefixes
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1832235
Upstream Status: nftables commit 506fb113f7ca4
commit 506fb113f7ca4fbb3d6da09ef6f9dc2b31f54a1f
Author: Phil Sutter <phil@nwl.cc>
Date: Thu Apr 30 14:02:44 2020 +0200
segtree: Fix get element command with prefixes
Code wasn't aware of prefix elements in interval sets. With previous
changes in place, they merely need to be accepted in
get_set_interval_find() - value comparison and expression duplication is
identical to ranges.
Extend sets/0034get_element_0 test to cover prefixes as well. While
being at it, also cover concatenated ranges.
Signed-off-by: Phil Sutter <phil@nwl.cc>
---
src/segtree.c | 1 +
tests/shell/testcases/sets/0034get_element_0 | 62 ++++++++++++++++++++--------
2 files changed, 45 insertions(+), 18 deletions(-)
diff --git a/src/segtree.c b/src/segtree.c
index 6e1f696..073c6ec 100644
--- a/src/segtree.c
+++ b/src/segtree.c
@@ -689,6 +689,7 @@ static struct expr *get_set_interval_find(const struct table *table,
list_for_each_entry(i, &set->init->expressions, list) {
switch (i->key->etype) {
+ case EXPR_PREFIX:
case EXPR_RANGE:
range_expr_value_low(val, i);
if (left && mpz_cmp(left->key->value, val))
diff --git a/tests/shell/testcases/sets/0034get_element_0 b/tests/shell/testcases/sets/0034get_element_0
index e23dbda..3343529 100755
--- a/tests/shell/testcases/sets/0034get_element_0
+++ b/tests/shell/testcases/sets/0034get_element_0
@@ -2,43 +2,69 @@
RC=0
-check() { # (elems, expected)
- out=$($NFT get element ip t s "{ $1 }")
+check() { # (set, elems, expected)
+ out=$($NFT get element ip t $1 "{ $2 }")
out=$(grep "elements =" <<< "$out")
out="${out#* \{ }"
out="${out% \}}"
- [[ "$out" == "$2" ]] && return
- echo "ERROR: asked for '$1', expecting '$2' but got '$out'"
+ [[ "$out" == "$3" ]] && return
+ echo "ERROR: asked for '$2' in set $1, expecting '$3' but got '$out'"
((RC++))
}
RULESET="add table ip t
add set ip t s { type inet_service; flags interval; }
add element ip t s { 10, 20-30, 40, 50-60 }
+add set ip t ips { type ipv4_addr; flags interval; }
+add element ip t ips { 10.0.0.1, 10.0.0.5-10.0.0.8 }
+add element ip t ips { 10.0.0.128/25, 10.0.1.0/24, 10.0.2.3-10.0.2.12 }
+add set ip t cs { type ipv4_addr . inet_service; flags interval; }
+add element ip t cs { 10.0.0.1 . 22, 10.1.0.0/16 . 1-1024 }
+add element ip t cs { 10.2.0.1-10.2.0.8 . 1024-65535 }
"
$NFT -f - <<< "$RULESET"
# simple cases, (non-)existing values and ranges
-check 10 10
-check 11 ""
-check 20-30 20-30
-check 15-18 ""
+check s 10 10
+check s 11 ""
+check s 20-30 20-30
+check s 15-18 ""
# multiple single elements, ranges smaller than present
-check "10, 40" "10, 40"
-check "22-24, 26-28" "20-30, 20-30"
-check 21-29 20-30
+check s "10, 40" "10, 40"
+check s "22-24, 26-28" "20-30, 20-30"
+check s 21-29 20-30
# mixed single elements and ranges
-check "10, 20" "10, 20-30"
-check "10, 22" "10, 20-30"
-check "10, 22-24" "10, 20-30"
+check s "10, 20" "10, 20-30"
+check s "10, 22" "10, 20-30"
+check s "10, 22-24" "10, 20-30"
# non-existing ranges matching elements
-check 10-40 ""
-check 10-20 ""
-check 10-25 ""
-check 25-55 ""
+check s 10-40 ""
+check s 10-20 ""
+check s 10-25 ""
+check s 25-55 ""
+
+# playing with IPs, ranges and prefixes
+check ips 10.0.0.1 10.0.0.1
+check ips 10.0.0.2 ""
+check ips 10.0.1.0/24 10.0.1.0/24
+check ips 10.0.1.2/31 10.0.1.0/24
+check ips 10.0.1.0 10.0.1.0/24
+check ips 10.0.1.3 10.0.1.0/24
+check ips 10.0.1.255 10.0.1.0/24
+check ips 10.0.2.3-10.0.2.12 10.0.2.3-10.0.2.12
+check ips 10.0.2.10 10.0.2.3-10.0.2.12
+check ips 10.0.2.12 10.0.2.3-10.0.2.12
+
+# test concatenated ranges, i.e. Pi, Pa and Po
+check cs "10.0.0.1 . 22" "10.0.0.1 . 22"
+check cs "10.0.0.1 . 23" ""
+check cs "10.0.0.2 . 22" ""
+check cs "10.1.0.1 . 42" "10.1.0.0/16 . 1-1024"
+check cs "10.1.1.0/24 . 10-20" "10.1.0.0/16 . 1-1024"
+check cs "10.2.0.3 . 20000" "10.2.0.1-10.2.0.8 . 1024-65535"
exit $RC
--
1.8.3.1

View File

@ -0,0 +1,45 @@
From 77a93baa622f8aa33fa6182d72b380d980e39574 Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Sat, 8 Aug 2020 00:09:06 +0200
Subject: [PATCH] include: Resync nf_tables.h cache copy
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1820684
Upstream Status: nftables commit f1e5a0499c077
commit f1e5a0499c0773f18bc592dd0da0340120daa482
Author: Stefano Brivio <sbrivio@redhat.com>
Date: Mon Apr 13 21:48:02 2020 +0200
include: Resync nf_tables.h cache copy
Get this header in sync with nf.git as of commit ef516e8625dd.
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
include/linux/netfilter/nf_tables.h | 2 ++
1 file changed, 2 insertions(+)
diff --git a/include/linux/netfilter/nf_tables.h b/include/linux/netfilter/nf_tables.h
index 1a99df3..9b54a86 100644
--- a/include/linux/netfilter/nf_tables.h
+++ b/include/linux/netfilter/nf_tables.h
@@ -274,6 +274,7 @@ enum nft_rule_compat_attributes {
* @NFT_SET_TIMEOUT: set uses timeouts
* @NFT_SET_EVAL: set can be updated from the evaluation path
* @NFT_SET_OBJECT: set contains stateful objects
+ * @NFT_SET_CONCAT: set contains a concatenation
*/
enum nft_set_flags {
NFT_SET_ANONYMOUS = 0x1,
@@ -283,6 +284,7 @@ enum nft_set_flags {
NFT_SET_TIMEOUT = 0x10,
NFT_SET_EVAL = 0x20,
NFT_SET_OBJECT = 0x40,
+ NFT_SET_CONCAT = 0x80,
};
/**
--
1.8.3.1

View File

@ -0,0 +1,72 @@
From 5566405cc171c8fa84e0a13ea96b89245a3fb512 Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Sat, 8 Aug 2020 00:05:48 +0200
Subject: [PATCH] src: Set NFT_SET_CONCAT flag for sets with concatenated
ranges
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1820684
Upstream Status: nftables commit 09441b5e92cee
commit 09441b5e92ceea60198a35cd657904fa7a10ee54
Author: Stefano Brivio <sbrivio@redhat.com>
Date: Mon Apr 13 21:48:03 2020 +0200
src: Set NFT_SET_CONCAT flag for sets with concatenated ranges
Pablo reports that nft, after commit 8ac2f3b2fca3 ("src: Add support
for concatenated set ranges"), crashes with older kernels (< 5.6)
without support for concatenated set ranges: those sets will be sent
to the kernel, which adds them without notion of the fact that
different concatenated fields are actually included, and nft crashes
while trying to list this kind of malformed concatenation.
Use the NFT_SET_CONCAT flag introduced by kernel commit ef516e8625dd
("netfilter: nf_tables: reintroduce the NFT_SET_CONCAT flag") when
sets including concatenated ranges are sent to the kernel, so that
older kernels (with no knowledge of this flag itself) will refuse set
creation.
Note that, in expr_evaluate_set(), we have to check for the presence
of the flag, also on empty sets that might carry it in context data,
and actually set it in the actual set flags.
Reported-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
src/evaluate.c | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/src/evaluate.c b/src/evaluate.c
index 0c84816..f66251b 100644
--- a/src/evaluate.c
+++ b/src/evaluate.c
@@ -1360,10 +1360,16 @@ static int expr_evaluate_set(struct eval_ctx *ctx, struct expr **expr)
set->size += i->size - 1;
set->set_flags |= i->set_flags;
expr_free(i);
- } else if (!expr_is_singleton(i))
+ } else if (!expr_is_singleton(i)) {
set->set_flags |= NFT_SET_INTERVAL;
+ if (i->key->etype == EXPR_CONCAT)
+ set->set_flags |= NFT_SET_CONCAT;
+ }
}
+ if (ctx->set && (ctx->set->flags & NFT_SET_CONCAT))
+ set->set_flags |= NFT_SET_CONCAT;
+
set->set_flags |= NFT_SET_CONSTANT;
datatype_set(set, ctx->ectx.dtype);
@@ -3336,6 +3342,7 @@ static int set_evaluate(struct eval_ctx *ctx, struct set *set)
memcpy(&set->desc.field_len, &set->key->field_len,
sizeof(set->desc.field_len));
set->desc.field_count = set->key->field_count;
+ set->flags |= NFT_SET_CONCAT;
}
if (set_is_datamap(set->flags)) {
--
1.8.3.1

View File

@ -0,0 +1,503 @@
From 19da892698f1dce2125a796ad86239711896978f Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Mon, 7 Dec 2020 18:25:20 +0100
Subject: [PATCH] src: store expr, not dtype to track data in sets
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1877022
Upstream Status: nftables commit 343a51702656a
commit 343a51702656a6476e37cfb84609a82155c7fc5e
Author: Florian Westphal <fw@strlen.de>
Date: Tue Jul 16 19:03:55 2019 +0200
src: store expr, not dtype to track data in sets
This will be needed once we add support for the 'typeof' keyword to
handle maps that could e.g. store 'ct helper' "type" values.
Instead of:
set foo {
type ipv4_addr . mark;
this would allow
set foo {
typeof(ip saddr) . typeof(ct mark);
(exact syntax TBD).
This would be needed to allow sets that store variable-sized data types
(string, integer and the like) that can't be used at at the moment.
Adding special data types for everything is problematic due to the
large amount of different types needed.
For anonymous sets, e.g. "string" can be used because the needed size can
be inferred from the statement, e.g. 'osf name { "Windows", "Linux }',
but in case of named sets that won't work because 'type string' lacks the
context needed to derive the size information.
With 'typeof(osf name)' the context is there, but at the moment it won't
help because the expression is discarded instantly and only the data
type is retained.
Signed-off-by: Florian Westphal <fw@strlen.de>
---
include/datatype.h | 1 -
include/netlink.h | 1 -
include/rule.h | 6 ++----
src/datatype.c | 5 -----
src/evaluate.c | 58 +++++++++++++++++++++++++++++++++++++-----------------
src/expression.c | 2 +-
src/json.c | 4 ++--
src/mnl.c | 6 +++---
src/monitor.c | 2 +-
src/netlink.c | 32 ++++++++++++++----------------
src/parser_bison.y | 3 +--
src/parser_json.c | 8 ++++++--
src/rule.c | 8 ++++----
src/segtree.c | 8 ++++++--
14 files changed, 81 insertions(+), 63 deletions(-)
diff --git a/include/datatype.h b/include/datatype.h
index 49b8f60..04b4892 100644
--- a/include/datatype.h
+++ b/include/datatype.h
@@ -293,7 +293,6 @@ concat_subtype_lookup(uint32_t type, unsigned int n)
extern const struct datatype *
set_datatype_alloc(const struct datatype *orig_dtype, unsigned int byteorder);
-extern void set_datatype_destroy(const struct datatype *dtype);
extern void time_print(uint64_t msec, struct output_ctx *octx);
extern struct error_record *time_parse(const struct location *loc,
diff --git a/include/netlink.h b/include/netlink.h
index e694171..88d12ba 100644
--- a/include/netlink.h
+++ b/include/netlink.h
@@ -189,6 +189,5 @@ int netlink_events_trace_cb(const struct nlmsghdr *nlh, int type,
struct netlink_mon_handler *monh);
enum nft_data_types dtype_map_to_kernel(const struct datatype *dtype);
-const struct datatype *dtype_map_from_kernel(enum nft_data_types type);
#endif /* NFTABLES_NETLINK_H */
diff --git a/include/rule.h b/include/rule.h
index 626973e..3637462 100644
--- a/include/rule.h
+++ b/include/rule.h
@@ -283,8 +283,7 @@ extern struct rule *rule_lookup_by_index(const struct chain *chain,
* @gc_int: garbage collection interval
* @timeout: default timeout value
* @key: key expression (data type, length))
- * @datatype: mapping data type
- * @datalen: mapping data len
+ * @data: mapping data expression
* @objtype: mapping object type
* @init: initializer
* @rg_cache: cached range element (left)
@@ -303,8 +302,7 @@ struct set {
uint32_t gc_int;
uint64_t timeout;
struct expr *key;
- const struct datatype *datatype;
- unsigned int datalen;
+ struct expr *data;
uint32_t objtype;
struct expr *init;
struct expr *rg_cache;
diff --git a/src/datatype.c b/src/datatype.c
index b9e167e..189e1b4 100644
--- a/src/datatype.c
+++ b/src/datatype.c
@@ -1190,11 +1190,6 @@ const struct datatype *set_datatype_alloc(const struct datatype *orig_dtype,
return dtype;
}
-void set_datatype_destroy(const struct datatype *dtype)
-{
- datatype_free(dtype);
-}
-
static struct error_record *time_unit_parse(const struct location *loc,
const char *str, uint64_t *unit)
{
diff --git a/src/evaluate.c b/src/evaluate.c
index f66251b..578dcae 100644
--- a/src/evaluate.c
+++ b/src/evaluate.c
@@ -1383,6 +1383,7 @@ static int expr_evaluate_map(struct eval_ctx *ctx, struct expr **expr)
{
struct expr_ctx ectx = ctx->ectx;
struct expr *map = *expr, *mappings;
+ const struct datatype *dtype;
struct expr *key;
expr_set_context(&ctx->ectx, NULL, 0);
@@ -1405,10 +1406,14 @@ static int expr_evaluate_map(struct eval_ctx *ctx, struct expr **expr)
mappings = implicit_set_declaration(ctx, "__map%d",
key,
mappings);
- mappings->set->datatype =
- datatype_get(set_datatype_alloc(ectx.dtype,
- ectx.byteorder));
- mappings->set->datalen = ectx.len;
+
+ dtype = set_datatype_alloc(ectx.dtype, ectx.byteorder);
+
+ mappings->set->data = constant_expr_alloc(&netlink_location,
+ dtype, dtype->byteorder,
+ ectx.len, NULL);
+ if (ectx.len && mappings->set->data->len != ectx.len)
+ BUG("%d vs %d\n", mappings->set->data->len, ectx.len);
map->mappings = mappings;
@@ -1444,7 +1449,7 @@ static int expr_evaluate_map(struct eval_ctx *ctx, struct expr **expr)
map->mappings->set->key->dtype->desc,
map->map->dtype->desc);
- datatype_set(map, map->mappings->set->datatype);
+ datatype_set(map, map->mappings->set->data->dtype);
map->flags |= EXPR_F_CONSTANT;
/* Data for range lookups needs to be in big endian order */
@@ -1474,7 +1479,12 @@ static int expr_evaluate_mapping(struct eval_ctx *ctx, struct expr **expr)
"Key must be a constant");
mapping->flags |= mapping->left->flags & EXPR_F_SINGLETON;
- expr_set_context(&ctx->ectx, set->datatype, set->datalen);
+ if (set->data) {
+ expr_set_context(&ctx->ectx, set->data->dtype, set->data->len);
+ } else {
+ assert((set->flags & NFT_SET_MAP) == 0);
+ }
+
if (expr_evaluate(ctx, &mapping->right) < 0)
return -1;
if (!expr_is_constant(mapping->right))
@@ -2119,7 +2129,7 @@ static int stmt_evaluate_arg(struct eval_ctx *ctx, struct stmt *stmt,
(*expr)->len);
else if ((*expr)->dtype->type != TYPE_INTEGER &&
!datatype_equal((*expr)->dtype, dtype))
- return stmt_binary_error(ctx, *expr, stmt,
+ return stmt_binary_error(ctx, *expr, stmt, /* verdict vs invalid? */
"datatype mismatch: expected %s, "
"expression has type %s",
dtype->desc, (*expr)->dtype->desc);
@@ -3113,9 +3123,9 @@ static int stmt_evaluate_map(struct eval_ctx *ctx, struct stmt *stmt)
"Key expression comments are not supported");
if (stmt_evaluate_arg(ctx, stmt,
- stmt->map.set->set->datatype,
- stmt->map.set->set->datalen,
- stmt->map.set->set->datatype->byteorder,
+ stmt->map.set->set->data->dtype,
+ stmt->map.set->set->data->len,
+ stmt->map.set->set->data->byteorder,
&stmt->map.data->key) < 0)
return -1;
if (expr_is_constant(stmt->map.data))
@@ -3161,8 +3171,12 @@ static int stmt_evaluate_objref_map(struct eval_ctx *ctx, struct stmt *stmt)
mappings = implicit_set_declaration(ctx, "__objmap%d",
key, mappings);
- mappings->set->datatype = &string_type;
- mappings->set->datalen = NFT_OBJ_MAXNAMELEN * BITS_PER_BYTE;
+
+ mappings->set->data = constant_expr_alloc(&netlink_location,
+ &string_type,
+ BYTEORDER_HOST_ENDIAN,
+ NFT_OBJ_MAXNAMELEN * BITS_PER_BYTE,
+ NULL);
mappings->set->objtype = stmt->objref.type;
map->mappings = mappings;
@@ -3197,7 +3211,7 @@ static int stmt_evaluate_objref_map(struct eval_ctx *ctx, struct stmt *stmt)
map->mappings->set->key->dtype->desc,
map->map->dtype->desc);
- datatype_set(map, map->mappings->set->datatype);
+ datatype_set(map, map->mappings->set->data->dtype);
map->flags |= EXPR_F_CONSTANT;
/* Data for range lookups needs to be in big endian order */
@@ -3346,17 +3360,25 @@ static int set_evaluate(struct eval_ctx *ctx, struct set *set)
}
if (set_is_datamap(set->flags)) {
- if (set->datatype == NULL)
+ if (set->data == NULL)
return set_error(ctx, set, "map definition does not "
"specify mapping data type");
- set->datalen = set->datatype->size;
- if (set->datalen == 0 && set->datatype->type != TYPE_VERDICT)
+ if (set->data->len == 0 && set->data->dtype->type != TYPE_VERDICT)
return set_error(ctx, set, "unqualified mapping data "
"type specified in map definition");
} else if (set_is_objmap(set->flags)) {
- set->datatype = &string_type;
- set->datalen = NFT_OBJ_MAXNAMELEN * BITS_PER_BYTE;
+ if (set->data) {
+ assert(set->data->etype == EXPR_VALUE);
+ assert(set->data->dtype == &string_type);
+ }
+
+ assert(set->data == NULL);
+ set->data = constant_expr_alloc(&netlink_location, &string_type,
+ BYTEORDER_HOST_ENDIAN,
+ NFT_OBJ_MAXNAMELEN * BITS_PER_BYTE,
+ NULL);
+
}
ctx->set = set;
diff --git a/src/expression.c b/src/expression.c
index 5070b10..6fa2f1d 100644
--- a/src/expression.c
+++ b/src/expression.c
@@ -1010,7 +1010,7 @@ static void map_expr_print(const struct expr *expr, struct output_ctx *octx)
{
expr_print(expr->map, octx);
if (expr->mappings->etype == EXPR_SET_REF &&
- expr->mappings->set->datatype->type == TYPE_VERDICT)
+ expr->mappings->set->data->dtype->type == TYPE_VERDICT)
nft_print(octx, " vmap ");
else
nft_print(octx, " map ");
diff --git a/src/json.c b/src/json.c
index 3498e24..1906e7d 100644
--- a/src/json.c
+++ b/src/json.c
@@ -82,7 +82,7 @@ static json_t *set_print_json(struct output_ctx *octx, const struct set *set)
if (set_is_datamap(set->flags)) {
type = "map";
- datatype_ext = set->datatype->name;
+ datatype_ext = set->data->dtype->name;
} else if (set_is_objmap(set->flags)) {
type = "map";
datatype_ext = obj_type_name(set->objtype);
@@ -645,7 +645,7 @@ json_t *map_expr_json(const struct expr *expr, struct output_ctx *octx)
const char *type = "map";
if (expr->mappings->etype == EXPR_SET_REF &&
- expr->mappings->set->datatype->type == TYPE_VERDICT)
+ expr->mappings->set->data->dtype->type == TYPE_VERDICT)
type = "vmap";
return json_pack("{s:{s:o, s:o}}", type,
diff --git a/src/mnl.c b/src/mnl.c
index 221ee05..23341e6 100644
--- a/src/mnl.c
+++ b/src/mnl.c
@@ -839,9 +839,9 @@ int mnl_nft_set_add(struct netlink_ctx *ctx, const struct cmd *cmd,
div_round_up(set->key->len, BITS_PER_BYTE));
if (set_is_datamap(set->flags)) {
nftnl_set_set_u32(nls, NFTNL_SET_DATA_TYPE,
- dtype_map_to_kernel(set->datatype));
+ dtype_map_to_kernel(set->data->dtype));
nftnl_set_set_u32(nls, NFTNL_SET_DATA_LEN,
- set->datalen / BITS_PER_BYTE);
+ set->data->len / BITS_PER_BYTE);
}
if (set_is_objmap(set->flags))
nftnl_set_set_u32(nls, NFTNL_SET_OBJ_TYPE, set->objtype);
@@ -873,7 +873,7 @@ int mnl_nft_set_add(struct netlink_ctx *ctx, const struct cmd *cmd,
if (set_is_datamap(set->flags) &&
!nftnl_udata_put_u32(udbuf, NFTNL_UDATA_SET_DATABYTEORDER,
- set->datatype->byteorder))
+ set->data->byteorder))
memory_allocation_error();
if (set->automerge &&
diff --git a/src/monitor.c b/src/monitor.c
index fb803cf..7927b6f 100644
--- a/src/monitor.c
+++ b/src/monitor.c
@@ -401,7 +401,7 @@ static int netlink_events_setelem_cb(const struct nlmsghdr *nlh, int type,
*/
dummyset = set_alloc(monh->loc);
dummyset->key = expr_clone(set->key);
- dummyset->datatype = set->datatype;
+ dummyset->data = set->data;
dummyset->flags = set->flags;
dummyset->init = set_expr_alloc(monh->loc, set);
diff --git a/src/netlink.c b/src/netlink.c
index e0ba903..64e51e5 100644
--- a/src/netlink.c
+++ b/src/netlink.c
@@ -575,7 +575,7 @@ enum nft_data_types dtype_map_to_kernel(const struct datatype *dtype)
}
}
-const struct datatype *dtype_map_from_kernel(enum nft_data_types type)
+static const struct datatype *dtype_map_from_kernel(enum nft_data_types type)
{
switch (type) {
case NFT_DATA_VERDICT:
@@ -622,10 +622,10 @@ struct set *netlink_delinearize_set(struct netlink_ctx *ctx,
const struct nftnl_set *nls)
{
const struct nftnl_udata *ud[NFTNL_UDATA_SET_MAX + 1] = {};
- uint32_t flags, key, data, data_len, objtype = 0;
enum byteorder keybyteorder = BYTEORDER_INVALID;
enum byteorder databyteorder = BYTEORDER_INVALID;
- const struct datatype *keytype, *datatype;
+ const struct datatype *keytype, *datatype = NULL;
+ uint32_t flags, key, objtype = 0;
bool automerge = false;
const char *udata;
struct set *set;
@@ -659,6 +659,8 @@ struct set *netlink_delinearize_set(struct netlink_ctx *ctx,
flags = nftnl_set_get_u32(nls, NFTNL_SET_FLAGS);
if (set_is_datamap(flags)) {
+ uint32_t data;
+
data = nftnl_set_get_u32(nls, NFTNL_SET_DATA_TYPE);
datatype = dtype_map_from_kernel(data);
if (datatype == NULL) {
@@ -667,8 +669,7 @@ struct set *netlink_delinearize_set(struct netlink_ctx *ctx,
data);
return NULL;
}
- } else
- datatype = NULL;
+ }
if (set_is_objmap(flags)) {
objtype = nftnl_set_get_u32(nls, NFTNL_SET_OBJ_TYPE);
@@ -691,16 +692,13 @@ struct set *netlink_delinearize_set(struct netlink_ctx *ctx,
set->objtype = objtype;
+ set->data = NULL;
if (datatype)
- set->datatype = datatype_get(set_datatype_alloc(datatype,
- databyteorder));
- else
- set->datatype = NULL;
-
- if (nftnl_set_is_set(nls, NFTNL_SET_DATA_LEN)) {
- data_len = nftnl_set_get_u32(nls, NFTNL_SET_DATA_LEN);
- set->datalen = data_len * BITS_PER_BYTE;
- }
+ set->data = constant_expr_alloc(&netlink_location,
+ set_datatype_alloc(datatype, databyteorder),
+ databyteorder,
+ nftnl_set_get_u32(nls, NFTNL_SET_DATA_LEN) * BITS_PER_BYTE,
+ NULL);
if (nftnl_set_is_set(nls, NFTNL_SET_TIMEOUT))
set->timeout = nftnl_set_get_u64(nls, NFTNL_SET_TIMEOUT);
@@ -897,10 +895,10 @@ key_end:
goto out;
data = netlink_alloc_data(&netlink_location, &nld,
- set->datatype->type == TYPE_VERDICT ?
+ set->data->dtype->type == TYPE_VERDICT ?
NFT_REG_VERDICT : NFT_REG_1);
- datatype_set(data, set->datatype);
- data->byteorder = set->datatype->byteorder;
+ datatype_set(data, set->data->dtype);
+ data->byteorder = set->data->byteorder;
if (data->byteorder == BYTEORDER_HOST_ENDIAN)
mpz_switch_byteorder(data->value, data->len / BITS_PER_BYTE);
diff --git a/src/parser_bison.y b/src/parser_bison.y
index ea83f52..4cca31b 100644
--- a/src/parser_bison.y
+++ b/src/parser_bison.y
@@ -1749,9 +1749,8 @@ map_block : /* empty */ { $$ = $<set>-1; }
stmt_separator
{
$1->key = $3;
- $1->datatype = $5->dtype;
+ $1->data = $5;
- expr_free($5);
$1->flags |= NFT_SET_MAP;
$$ = $1;
}
diff --git a/src/parser_json.c b/src/parser_json.c
index ce8e566..ddc694f 100644
--- a/src/parser_json.c
+++ b/src/parser_json.c
@@ -2833,11 +2833,15 @@ static struct cmd *json_parse_cmd_add_set(struct json_ctx *ctx, json_t *root,
}
if (!json_unpack(root, "{s:s}", "map", &dtype_ext)) {
+ const struct datatype *dtype;
+
set->objtype = string_to_nft_object(dtype_ext);
if (set->objtype) {
set->flags |= NFT_SET_OBJECT;
- } else if (datatype_lookup_byname(dtype_ext)) {
- set->datatype = datatype_lookup_byname(dtype_ext);
+ } else if ((dtype = datatype_lookup_byname(dtype_ext))) {
+ set->data = constant_expr_alloc(&netlink_location,
+ dtype, dtype->byteorder,
+ dtype->size, NULL);
set->flags |= NFT_SET_MAP;
} else {
json_error(ctx, "Invalid map type '%s'.", dtype_ext);
diff --git a/src/rule.c b/src/rule.c
index e18237b..f7d888b 100644
--- a/src/rule.c
+++ b/src/rule.c
@@ -332,8 +332,8 @@ struct set *set_clone(const struct set *set)
new_set->gc_int = set->gc_int;
new_set->timeout = set->timeout;
new_set->key = expr_clone(set->key);
- new_set->datatype = datatype_get(set->datatype);
- new_set->datalen = set->datalen;
+ if (set->data)
+ new_set->data = expr_clone(set->data);
new_set->objtype = set->objtype;
new_set->policy = set->policy;
new_set->automerge = set->automerge;
@@ -356,7 +356,7 @@ void set_free(struct set *set)
expr_free(set->init);
handle_free(&set->handle);
expr_free(set->key);
- set_datatype_destroy(set->datatype);
+ expr_free(set->data);
xfree(set);
}
@@ -469,7 +469,7 @@ static void set_print_declaration(const struct set *set,
nft_print(octx, "%s%stype %s",
opts->tab, opts->tab, set->key->dtype->name);
if (set_is_datamap(set->flags))
- nft_print(octx, " : %s", set->datatype->name);
+ nft_print(octx, " : %s", set->data->dtype->name);
else if (set_is_objmap(set->flags))
nft_print(octx, " : %s", obj_type_name(set->objtype));
diff --git a/src/segtree.c b/src/segtree.c
index 073c6ec..d6e3ce2 100644
--- a/src/segtree.c
+++ b/src/segtree.c
@@ -79,8 +79,12 @@ static void seg_tree_init(struct seg_tree *tree, const struct set *set,
tree->root = RB_ROOT;
tree->keytype = set->key->dtype;
tree->keylen = set->key->len;
- tree->datatype = set->datatype;
- tree->datalen = set->datalen;
+ tree->datatype = NULL;
+ tree->datalen = 0;
+ if (set->data) {
+ tree->datatype = set->data->dtype;
+ tree->datalen = set->data->len;
+ }
tree->byteorder = first->byteorder;
tree->debug_mask = debug_mask;
}
--
1.8.3.1

View File

@ -0,0 +1,120 @@
From 785823a1f607a7bcd32d4cb42655422c223fcad5 Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Mon, 7 Dec 2020 18:25:20 +0100
Subject: [PATCH] evaluate: Perform set evaluation on implicitly declared
(anonymous) sets
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1877022
Upstream Status: nftables commit 7aa08d45031ec
commit 7aa08d45031ec7ce5dadb4979471d626367c09cd
Author: Stefano Brivio <sbrivio@redhat.com>
Date: Wed May 27 22:51:21 2020 +0200
evaluate: Perform set evaluation on implicitly declared (anonymous) sets
If a set is implicitly declared, set_evaluate() is not called as a
result of cmd_evaluate_add(), because we're adding in fact something
else (e.g. a rule). Expression-wise, evaluation still happens as the
implicit set expression is eventually found in the tree and handled
by expr_evaluate_set(), but context-wise evaluation (set_evaluate())
is skipped, and this might be relevant instead.
This is visible in the reported case of an anonymous set including
concatenated ranges:
# nft add rule t c ip saddr . tcp dport { 192.0.2.1 . 20-30 } accept
BUG: invalid range expression type concat
nft: expression.c:1160: range_expr_value_low: Assertion `0' failed.
Aborted
because we reach do_add_set() without properly evaluated flags and
set description, and eventually end up in expr_to_intervals(), which
can't handle that expression.
Explicitly call set_evaluate() as we add anonymous sets into the
context, and instruct the same function to:
- skip expression-wise set evaluation if the set is anonymous, as
that happens later anyway as part of the general tree evaluation
- skip the insertion in the set cache, as it makes no sense to have
sets that shouldn't be referenced there
For object maps, the allocation of the expression for set->data is
already handled by set_evaluate(), so we can now drop that from
stmt_evaluate_objref_map().
v2:
- skip insertion of set in cache (Pablo Neira Ayuso)
- drop double allocation of expression (and leak of the first
one) for object maps (Pablo Neira Ayuso)
Reported-by: Pablo Neira Ayuso <pablo@netfilter.org>
Reported-by: Phil Sutter <phil@nwl.cc>
Signed-off-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
src/evaluate.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)
diff --git a/src/evaluate.c b/src/evaluate.c
index 578dcae..fc45cef 100644
--- a/src/evaluate.c
+++ b/src/evaluate.c
@@ -75,6 +75,7 @@ static void key_fix_dtype_byteorder(struct expr *key)
datatype_set(key, set_datatype_alloc(dtype, key->byteorder));
}
+static int set_evaluate(struct eval_ctx *ctx, struct set *set);
static struct expr *implicit_set_declaration(struct eval_ctx *ctx,
const char *name,
struct expr *key,
@@ -105,6 +106,8 @@ static struct expr *implicit_set_declaration(struct eval_ctx *ctx,
list_add_tail(&cmd->list, &ctx->cmd->list);
}
+ set_evaluate(ctx, set);
+
return set_ref_expr_alloc(&expr->location, set);
}
@@ -3171,12 +3174,6 @@ static int stmt_evaluate_objref_map(struct eval_ctx *ctx, struct stmt *stmt)
mappings = implicit_set_declaration(ctx, "__objmap%d",
key, mappings);
-
- mappings->set->data = constant_expr_alloc(&netlink_location,
- &string_type,
- BYTEORDER_HOST_ENDIAN,
- NFT_OBJ_MAXNAMELEN * BITS_PER_BYTE,
- NULL);
mappings->set->objtype = stmt->objref.type;
map->mappings = mappings;
@@ -3381,6 +3378,13 @@ static int set_evaluate(struct eval_ctx *ctx, struct set *set)
}
+ /* Default timeout value implies timeout support */
+ if (set->timeout)
+ set->flags |= NFT_SET_TIMEOUT;
+
+ if (set_is_anonymous(set->flags))
+ return 0;
+
ctx->set = set;
if (set->init != NULL) {
expr_set_context(&ctx->ectx, set->key->dtype, set->key->len);
@@ -3392,10 +3396,6 @@ static int set_evaluate(struct eval_ctx *ctx, struct set *set)
if (set_lookup(table, set->handle.set.name) == NULL)
set_add_hash(set_get(set), table);
- /* Default timeout value implies timeout support */
- if (set->timeout)
- set->flags |= NFT_SET_TIMEOUT;
-
return 0;
}
--
1.8.3.1

View File

@ -0,0 +1,167 @@
From 3193f74613b16a42d7784452ebf4d53ccd33b887 Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Tue, 12 Jan 2021 10:34:35 +0100
Subject: [PATCH] evaluate: missing datatype definition in
implicit_set_declaration()
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1877022
Upstream Status: nftables commit 54eb1e16cc478
commit 54eb1e16cc4787906fe8206858f0ea0bfb9c1209
Author: Pablo Neira Ayuso <pablo@netfilter.org>
Date: Sun Jun 7 15:23:21 2020 +0200
evaluate: missing datatype definition in implicit_set_declaration()
set->data from implicit_set_declaration(), otherwise, set_evaluation()
bails out with:
# nft -f /etc/nftables/inet-filter.nft
/etc/nftables/inet-filter.nft:8:32-54: Error: map definition does not specify
mapping data type
tcp dport vmap { 22 : jump ssh_input }
^^^^^^^^^^^^^^^^^^^^^^^
/etc/nftables/inet-filter.nft:13:26-52: Error: map definition does not specify
mapping data type
iif vmap { "eth0" : jump wan_input }
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Add a test to cover this case.
Fixes: 7aa08d45031e ("evaluate: Perform set evaluation on implicitly declared (anonymous) sets")
Closes: https://bugzilla.kernel.org/show_bug.cgi?id=208093
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
src/evaluate.c | 22 ++++++++++++----------
tests/shell/testcases/maps/0009vmap_0 | 19 +++++++++++++++++++
tests/shell/testcases/maps/dumps/0009vmap_0 | 13 +++++++++++++
3 files changed, 44 insertions(+), 10 deletions(-)
create mode 100755 tests/shell/testcases/maps/0009vmap_0
create mode 100644 tests/shell/testcases/maps/dumps/0009vmap_0
diff --git a/src/evaluate.c b/src/evaluate.c
index fc45cef..a966ed4 100644
--- a/src/evaluate.c
+++ b/src/evaluate.c
@@ -79,6 +79,7 @@ static int set_evaluate(struct eval_ctx *ctx, struct set *set);
static struct expr *implicit_set_declaration(struct eval_ctx *ctx,
const char *name,
struct expr *key,
+ struct expr *data,
struct expr *expr)
{
struct cmd *cmd;
@@ -92,6 +93,7 @@ static struct expr *implicit_set_declaration(struct eval_ctx *ctx,
set->flags = NFT_SET_ANONYMOUS | expr->set_flags;
set->handle.set.name = xstrdup(name);
set->key = key;
+ set->data = data;
set->init = expr;
set->automerge = set->flags & NFT_SET_INTERVAL;
@@ -1387,7 +1389,7 @@ static int expr_evaluate_map(struct eval_ctx *ctx, struct expr **expr)
struct expr_ctx ectx = ctx->ectx;
struct expr *map = *expr, *mappings;
const struct datatype *dtype;
- struct expr *key;
+ struct expr *key, *data;
expr_set_context(&ctx->ectx, NULL, 0);
if (expr_evaluate(ctx, &map->map) < 0)
@@ -1406,15 +1408,14 @@ static int expr_evaluate_map(struct eval_ctx *ctx, struct expr **expr)
ctx->ectx.byteorder,
ctx->ectx.len, NULL);
+ dtype = set_datatype_alloc(ectx.dtype, ectx.byteorder);
+ data = constant_expr_alloc(&netlink_location, dtype,
+ dtype->byteorder, ectx.len, NULL);
+
mappings = implicit_set_declaration(ctx, "__map%d",
- key,
+ key, data,
mappings);
- dtype = set_datatype_alloc(ectx.dtype, ectx.byteorder);
-
- mappings->set->data = constant_expr_alloc(&netlink_location,
- dtype, dtype->byteorder,
- ectx.len, NULL);
if (ectx.len && mappings->set->data->len != ectx.len)
BUG("%d vs %d\n", mappings->set->data->len, ectx.len);
@@ -1857,7 +1858,8 @@ static int expr_evaluate_relational(struct eval_ctx *ctx, struct expr **expr)
case EXPR_SET:
right = rel->right =
implicit_set_declaration(ctx, "__set%d",
- expr_get(left), right);
+ expr_get(left), NULL,
+ right);
/* fall through */
case EXPR_SET_REF:
/* Data for range lookups needs to be in big endian order */
@@ -2335,7 +2337,7 @@ static int stmt_evaluate_meter(struct eval_ctx *ctx, struct stmt *stmt)
set->set_flags |= NFT_SET_TIMEOUT;
setref = implicit_set_declaration(ctx, stmt->meter.name,
- expr_get(key), set);
+ expr_get(key), NULL, set);
setref->set->desc.size = stmt->meter.size;
stmt->meter.set = setref;
@@ -3173,7 +3175,7 @@ static int stmt_evaluate_objref_map(struct eval_ctx *ctx, struct stmt *stmt)
ctx->ectx.len, NULL);
mappings = implicit_set_declaration(ctx, "__objmap%d",
- key, mappings);
+ key, NULL, mappings);
mappings->set->objtype = stmt->objref.type;
map->mappings = mappings;
diff --git a/tests/shell/testcases/maps/0009vmap_0 b/tests/shell/testcases/maps/0009vmap_0
new file mode 100755
index 0000000..7627c81
--- /dev/null
+++ b/tests/shell/testcases/maps/0009vmap_0
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+set -e
+
+EXPECTED="table inet filter {
+ chain ssh_input {
+ }
+
+ chain wan_input {
+ tcp dport vmap { 22 : jump ssh_input }
+ }
+
+ chain prerouting {
+ type filter hook prerouting priority -300; policy accept;
+ iif vmap { "lo" : jump wan_input }
+ }
+}"
+
+$NFT -f - <<< "$EXPECTED"
diff --git a/tests/shell/testcases/maps/dumps/0009vmap_0 b/tests/shell/testcases/maps/dumps/0009vmap_0
new file mode 100644
index 0000000..540a8af
--- /dev/null
+++ b/tests/shell/testcases/maps/dumps/0009vmap_0
@@ -0,0 +1,13 @@
+table inet filter {
+ chain ssh_input {
+ }
+
+ chain wan_input {
+ tcp dport vmap { 22 : jump ssh_input }
+ }
+
+ chain prerouting {
+ type filter hook prerouting priority -300; policy accept;
+ iif vmap { "lo" : jump wan_input }
+ }
+}
--
1.8.3.1

View File

@ -0,0 +1,88 @@
From 9d67918643e7d17c433e82eb6cdb039cb103c50f Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Mon, 7 Dec 2020 18:26:24 +0100
Subject: [PATCH] mergesort: unbreak listing with binops
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1891790
Upstream Status: nftables commit 3926a3369bb5a
commit 3926a3369bb5ada5c0706dadcbcf938517822a35
Author: Pablo Neira Ayuso <pablo@netfilter.org>
Date: Thu Aug 20 01:05:04 2020 +0200
mergesort: unbreak listing with binops
tcp flags == {syn, syn|ack}
tcp flags & (fin|syn|rst|psh|ack|urg) == {ack, psh|ack, fin, fin|psh|ack}
results in:
BUG: Unknown expression binop
nft: mergesort.c:47: expr_msort_cmp: Assertion `0' failed.
Aborted (core dumped)
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
src/mergesort.c | 2 ++
tests/py/inet/tcp.t | 2 ++
tests/py/inet/tcp.t.payload | 21 +++++++++++++++++++++
3 files changed, 25 insertions(+)
diff --git a/src/mergesort.c b/src/mergesort.c
index 649b780..02094b4 100644
--- a/src/mergesort.c
+++ b/src/mergesort.c
@@ -43,6 +43,8 @@ static int expr_msort_cmp(const struct expr *e1, const struct expr *e2)
return concat_expr_msort_cmp(e1, e2);
case EXPR_MAPPING:
return expr_msort_cmp(e1->left, e2->left);
+ case EXPR_BINOP:
+ return expr_msort_cmp(e1->left, e2->left);
default:
BUG("Unknown expression %s\n", expr_name(e1));
}
diff --git a/tests/py/inet/tcp.t b/tests/py/inet/tcp.t
index e0a83e2..29f06f5 100644
--- a/tests/py/inet/tcp.t
+++ b/tests/py/inet/tcp.t
@@ -79,6 +79,8 @@ tcp flags != cwr;ok
tcp flags == syn;ok
tcp flags & (syn|fin) == (syn|fin);ok;tcp flags & (fin | syn) == fin | syn
tcp flags & (fin | syn | rst | psh | ack | urg | ecn | cwr) == fin | syn | rst | psh | ack | urg | ecn | cwr;ok;tcp flags == 0xff
+tcp flags { syn, syn | ack };ok
+tcp flags & (fin | syn | rst | psh | ack | urg) == { fin, ack, psh | ack, fin | psh | ack };ok
tcp window 22222;ok
tcp window 22;ok
diff --git a/tests/py/inet/tcp.t.payload b/tests/py/inet/tcp.t.payload
index 55f1bc2..076e562 100644
--- a/tests/py/inet/tcp.t.payload
+++ b/tests/py/inet/tcp.t.payload
@@ -680,3 +680,24 @@ inet test-inet input
[ bitwise reg 1 = (reg=1 & 0x000000f0 ) ^ 0x00000000 ]
[ cmp eq reg 1 0x00000080 ]
+# tcp flags & (fin | syn | rst | psh | ack | urg) == { fin, ack, psh | ack, fin | psh | ack }
+__set%d test-inet 3
+__set%d test-inet 0
+ element 00000001 : 0 [end] element 00000010 : 0 [end] element 00000018 : 0 [end] element 00000019 : 0 [end]
+ip
+ [ meta load l4proto => reg 1 ]
+ [ cmp eq reg 1 0x00000006 ]
+ [ payload load 1b @ transport header + 13 => reg 1 ]
+ [ bitwise reg 1 = (reg=1 & 0x0000003f ) ^ 0x00000000 ]
+ [ lookup reg 1 set __set%d ]
+
+# tcp flags { syn, syn | ack }
+__set%d test-inet 3
+__set%d test-inet 0
+ element 00000002 : 0 [end] element 00000012 : 0 [end]
+inet
+ [ meta load l4proto => reg 1 ]
+ [ cmp eq reg 1 0x00000006 ]
+ [ payload load 1b @ transport header + 13 => reg 1 ]
+ [ lookup reg 1 set __set%d ]
+
--
1.8.3.1

View File

@ -0,0 +1,134 @@
From 876a1202351264f6d3b105258f10bde693870bd4 Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Mon, 7 Dec 2020 18:27:16 +0100
Subject: [PATCH] proto: add sctp crc32 checksum fixup
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1895804
Upstream Status: nftables commit 09a3b2ba0c822
commit 09a3b2ba0c8228d1c6bf0f030cae97addb397351
Author: Florian Westphal <fw@strlen.de>
Date: Tue Oct 6 23:16:32 2020 +0200
proto: add sctp crc32 checksum fixup
Stateless SCTP header mangling doesn't work reliably.
This tells the kernel to update the checksum field using
the sctp crc32 algorithm.
Note that this needs additional kernel support to work.
Signed-off-by: Florian Westphal <fw@strlen.de>
---
include/linux/netfilter/nf_tables.h | 2 ++
include/proto.h | 1 +
src/netlink_linearize.c | 2 +-
src/proto.c | 8 ++++++++
4 files changed, 12 insertions(+), 1 deletion(-)
diff --git a/include/linux/netfilter/nf_tables.h b/include/linux/netfilter/nf_tables.h
index 9b54a86..1328b8e 100644
--- a/include/linux/netfilter/nf_tables.h
+++ b/include/linux/netfilter/nf_tables.h
@@ -707,10 +707,12 @@ enum nft_payload_bases {
*
* @NFT_PAYLOAD_CSUM_NONE: no checksumming
* @NFT_PAYLOAD_CSUM_INET: internet checksum (RFC 791)
+ * @NFT_PAYLOAD_CSUM_SCTP: CRC-32c, for use in SCTP header (RFC 3309)
*/
enum nft_payload_csum_types {
NFT_PAYLOAD_CSUM_NONE,
NFT_PAYLOAD_CSUM_INET,
+ NFT_PAYLOAD_CSUM_SCTP,
};
enum nft_payload_csum_flags {
diff --git a/include/proto.h b/include/proto.h
index fab48c1..436cbe3 100644
--- a/include/proto.h
+++ b/include/proto.h
@@ -78,6 +78,7 @@ struct proto_hdr_template {
struct proto_desc {
const char *name;
enum proto_bases base;
+ enum nft_payload_csum_types checksum_type;
unsigned int checksum_key;
unsigned int protocol_key;
unsigned int length;
diff --git a/src/netlink_linearize.c b/src/netlink_linearize.c
index cb1b7fe..606d97a 100644
--- a/src/netlink_linearize.c
+++ b/src/netlink_linearize.c
@@ -937,7 +937,7 @@ static void netlink_gen_payload_stmt(struct netlink_linearize_ctx *ctx,
expr->len / BITS_PER_BYTE);
if (csum_off) {
nftnl_expr_set_u32(nle, NFTNL_EXPR_PAYLOAD_CSUM_TYPE,
- NFT_PAYLOAD_CSUM_INET);
+ desc->checksum_type);
nftnl_expr_set_u32(nle, NFTNL_EXPR_PAYLOAD_CSUM_OFFSET,
csum_off / BITS_PER_BYTE);
}
diff --git a/src/proto.c b/src/proto.c
index 40ce590..8360abf 100644
--- a/src/proto.c
+++ b/src/proto.c
@@ -345,6 +345,7 @@ const struct proto_desc proto_icmp = {
.name = "icmp",
.base = PROTO_BASE_TRANSPORT_HDR,
.checksum_key = ICMPHDR_CHECKSUM,
+ .checksum_type = NFT_PAYLOAD_CSUM_INET,
.templates = {
[ICMPHDR_TYPE] = ICMPHDR_TYPE("type", &icmp_type_type, type),
[ICMPHDR_CODE] = ICMPHDR_TYPE("code", &icmp_code_type, code),
@@ -397,6 +398,7 @@ const struct proto_desc proto_igmp = {
.name = "igmp",
.base = PROTO_BASE_TRANSPORT_HDR,
.checksum_key = IGMPHDR_CHECKSUM,
+ .checksum_type = NFT_PAYLOAD_CSUM_INET,
.templates = {
[IGMPHDR_TYPE] = IGMPHDR_TYPE("type", &igmp_type_type, igmp_type),
[IGMPHDR_MRT] = IGMPHDR_FIELD("mrt", igmp_code),
@@ -417,6 +419,7 @@ const struct proto_desc proto_udp = {
.name = "udp",
.base = PROTO_BASE_TRANSPORT_HDR,
.checksum_key = UDPHDR_CHECKSUM,
+ .checksum_type = NFT_PAYLOAD_CSUM_INET,
.templates = {
[UDPHDR_SPORT] = INET_SERVICE("sport", struct udphdr, source),
[UDPHDR_DPORT] = INET_SERVICE("dport", struct udphdr, dest),
@@ -474,6 +477,7 @@ const struct proto_desc proto_tcp = {
.name = "tcp",
.base = PROTO_BASE_TRANSPORT_HDR,
.checksum_key = TCPHDR_CHECKSUM,
+ .checksum_type = NFT_PAYLOAD_CSUM_INET,
.templates = {
[TCPHDR_SPORT] = INET_SERVICE("sport", struct tcphdr, source),
[TCPHDR_DPORT] = INET_SERVICE("dport", struct tcphdr, dest),
@@ -553,6 +557,8 @@ const struct proto_desc proto_dccp = {
const struct proto_desc proto_sctp = {
.name = "sctp",
.base = PROTO_BASE_TRANSPORT_HDR,
+ .checksum_key = SCTPHDR_CHECKSUM,
+ .checksum_type = NFT_PAYLOAD_CSUM_SCTP,
.templates = {
[SCTPHDR_SPORT] = INET_SERVICE("sport", struct sctphdr, source),
[SCTPHDR_DPORT] = INET_SERVICE("dport", struct sctphdr, dest),
@@ -650,6 +656,7 @@ const struct proto_desc proto_ip = {
.name = "ip",
.base = PROTO_BASE_NETWORK_HDR,
.checksum_key = IPHDR_CHECKSUM,
+ .checksum_type = NFT_PAYLOAD_CSUM_INET,
.protocols = {
PROTO_LINK(IPPROTO_ICMP, &proto_icmp),
PROTO_LINK(IPPROTO_IGMP, &proto_igmp),
@@ -746,6 +753,7 @@ const struct proto_desc proto_icmp6 = {
.name = "icmpv6",
.base = PROTO_BASE_TRANSPORT_HDR,
.checksum_key = ICMP6HDR_CHECKSUM,
+ .checksum_type = NFT_PAYLOAD_CSUM_INET,
.templates = {
[ICMP6HDR_TYPE] = ICMP6HDR_TYPE("type", &icmp6_type_type, icmp6_type),
[ICMP6HDR_CODE] = ICMP6HDR_TYPE("code", &icmpv6_code_type, icmp6_code),
--
1.8.3.1

View File

@ -0,0 +1,233 @@
From 70dc225b23708c6ac96e2895488f3c6dea9e201d Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Mon, 7 Dec 2020 18:28:27 +0100
Subject: [PATCH] proto: Fix ARP header field ordering
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1896334
Upstream Status: nftables commit f751753f92ea7
commit f751753f92ea76f582f7d5d1fef8b4d5677ba589
Author: Phil Sutter <phil@nwl.cc>
Date: Tue Nov 10 13:07:49 2020 +0100
proto: Fix ARP header field ordering
In ARP header, destination ether address sits between source IP and
destination IP addresses. Enum arp_hdr_fields had this wrong, which
in turn caused wrong ordering of entries in proto_arp->templates. When
expanding a combined payload expression, code assumes that template
entries are ordered by header offset, therefore the destination ether
address match was printed as raw if an earlier field was matched as
well:
| arp saddr ip 192.168.1.1 arp daddr ether 3e:d1:3f:d6:12:0b
was printed as:
| arp saddr ip 192.168.1.1 @nh,144,48 69068440080907
Note: Although strictly not necessary, reorder fields in
proto_arp->templates as well to match their actual ordering, just to
avoid confusion.
Fixes: 4b0f2a712b579 ("src: support for arp sender and target ethernet and IPv4 addresses")
Signed-off-by: Phil Sutter <phil@nwl.cc>
---
include/proto.h | 2 +-
src/proto.c | 2 +-
tests/py/arp/arp.t | 3 +++
tests/py/arp/arp.t.json | 56 +++++++++++++++++++++++++++++++++++++++
tests/py/arp/arp.t.json.output | 28 ++++++++++++++++++++
tests/py/arp/arp.t.payload | 10 +++++++
tests/py/arp/arp.t.payload.netdev | 14 ++++++++++
7 files changed, 113 insertions(+), 2 deletions(-)
diff --git a/include/proto.h b/include/proto.h
index 436cbe3..5a50059 100644
--- a/include/proto.h
+++ b/include/proto.h
@@ -184,8 +184,8 @@ enum arp_hdr_fields {
ARPHDR_PLN,
ARPHDR_OP,
ARPHDR_SADDR_ETHER,
- ARPHDR_DADDR_ETHER,
ARPHDR_SADDR_IP,
+ ARPHDR_DADDR_ETHER,
ARPHDR_DADDR_IP,
};
diff --git a/src/proto.c b/src/proto.c
index 8360abf..49c8c92 100644
--- a/src/proto.c
+++ b/src/proto.c
@@ -908,8 +908,8 @@ const struct proto_desc proto_arp = {
[ARPHDR_PLN] = ARPHDR_FIELD("plen", plen),
[ARPHDR_OP] = ARPHDR_TYPE("operation", &arpop_type, oper),
[ARPHDR_SADDR_ETHER] = ARPHDR_TYPE("saddr ether", &etheraddr_type, sha),
- [ARPHDR_DADDR_ETHER] = ARPHDR_TYPE("daddr ether", &etheraddr_type, tha),
[ARPHDR_SADDR_IP] = ARPHDR_TYPE("saddr ip", &ipaddr_type, spa),
+ [ARPHDR_DADDR_ETHER] = ARPHDR_TYPE("daddr ether", &etheraddr_type, tha),
[ARPHDR_DADDR_IP] = ARPHDR_TYPE("daddr ip", &ipaddr_type, tpa),
},
.format = {
diff --git a/tests/py/arp/arp.t b/tests/py/arp/arp.t
index 2540c0a..109d01d 100644
--- a/tests/py/arp/arp.t
+++ b/tests/py/arp/arp.t
@@ -61,4 +61,7 @@ arp daddr ip 4.3.2.1;ok
arp saddr ether aa:bb:cc:aa:bb:cc;ok
arp daddr ether aa:bb:cc:aa:bb:cc;ok
+arp saddr ip 192.168.1.1 arp daddr ether fe:ed:00:c0:ff:ee;ok
+arp daddr ether fe:ed:00:c0:ff:ee arp saddr ip 192.168.1.1;ok;arp saddr ip 192.168.1.1 arp daddr ether fe:ed:00:c0:ff:ee
+
meta iifname "invalid" arp ptype 0x0800 arp htype 1 arp hlen 6 arp plen 4 @nh,192,32 0xc0a88f10 @nh,144,48 set 0x112233445566;ok;iifname "invalid" arp htype 1 arp ptype ip arp hlen 6 arp plen 4 arp daddr ip 192.168.143.16 arp daddr ether set 11:22:33:44:55:66
diff --git a/tests/py/arp/arp.t.json b/tests/py/arp/arp.t.json
index 5f2f6cd..8508c17 100644
--- a/tests/py/arp/arp.t.json
+++ b/tests/py/arp/arp.t.json
@@ -901,6 +901,62 @@
}
]
+# arp saddr ip 192.168.1.1 arp daddr ether fe:ed:00:c0:ff:ee
+[
+ {
+ "match": {
+ "left": {
+ "payload": {
+ "field": "saddr ip",
+ "protocol": "arp"
+ }
+ },
+ "op": "==",
+ "right": "192.168.1.1"
+ }
+ },
+ {
+ "match": {
+ "left": {
+ "payload": {
+ "field": "daddr ether",
+ "protocol": "arp"
+ }
+ },
+ "op": "==",
+ "right": "fe:ed:00:c0:ff:ee"
+ }
+ }
+]
+
+# arp daddr ether fe:ed:00:c0:ff:ee arp saddr ip 192.168.1.1
+[
+ {
+ "match": {
+ "left": {
+ "payload": {
+ "field": "daddr ether",
+ "protocol": "arp"
+ }
+ },
+ "op": "==",
+ "right": "fe:ed:00:c0:ff:ee"
+ }
+ },
+ {
+ "match": {
+ "left": {
+ "payload": {
+ "field": "saddr ip",
+ "protocol": "arp"
+ }
+ },
+ "op": "==",
+ "right": "192.168.1.1"
+ }
+ }
+]
+
# meta iifname "invalid" arp ptype 0x0800 arp htype 1 arp hlen 6 arp plen 4 @nh,192,32 0xc0a88f10 @nh,144,48 set 0x112233445566
[
{
diff --git a/tests/py/arp/arp.t.json.output b/tests/py/arp/arp.t.json.output
index b8507bf..afa75b2 100644
--- a/tests/py/arp/arp.t.json.output
+++ b/tests/py/arp/arp.t.json.output
@@ -66,6 +66,34 @@
}
]
+# arp daddr ether fe:ed:00:c0:ff:ee arp saddr ip 192.168.1.1
+[
+ {
+ "match": {
+ "left": {
+ "payload": {
+ "field": "saddr ip",
+ "protocol": "arp"
+ }
+ },
+ "op": "==",
+ "right": "192.168.1.1"
+ }
+ },
+ {
+ "match": {
+ "left": {
+ "payload": {
+ "field": "daddr ether",
+ "protocol": "arp"
+ }
+ },
+ "op": "==",
+ "right": "fe:ed:00:c0:ff:ee"
+ }
+ }
+]
+
# meta iifname "invalid" arp ptype 0x0800 arp htype 1 arp hlen 6 arp plen 4 @nh,192,32 0xc0a88f10 @nh,144,48 set 0x112233445566
[
{
diff --git a/tests/py/arp/arp.t.payload b/tests/py/arp/arp.t.payload
index 52c9932..f819853 100644
--- a/tests/py/arp/arp.t.payload
+++ b/tests/py/arp/arp.t.payload
@@ -307,3 +307,13 @@ arp test-arp input
[ payload load 6b @ network header + 18 => reg 1 ]
[ cmp eq reg 1 0xaaccbbaa 0x0000ccbb ]
+# arp saddr ip 192.168.1.1 arp daddr ether fe:ed:00:c0:ff:ee
+arp
+ [ payload load 10b @ network header + 14 => reg 1 ]
+ [ cmp eq reg 1 0x0101a8c0 0xc000edfe 0x0000eeff ]
+
+# arp daddr ether fe:ed:00:c0:ff:ee arp saddr ip 192.168.1.1
+arp
+ [ payload load 10b @ network header + 14 => reg 1 ]
+ [ cmp eq reg 1 0x0101a8c0 0xc000edfe 0x0000eeff ]
+
diff --git a/tests/py/arp/arp.t.payload.netdev b/tests/py/arp/arp.t.payload.netdev
index 667691f..f57610c 100644
--- a/tests/py/arp/arp.t.payload.netdev
+++ b/tests/py/arp/arp.t.payload.netdev
@@ -409,3 +409,17 @@ netdev test-netdev ingress
[ payload load 6b @ network header + 18 => reg 1 ]
[ cmp eq reg 1 0xaaccbbaa 0x0000ccbb ]
+# arp saddr ip 192.168.1.1 arp daddr ether fe:ed:00:c0:ff:ee
+netdev
+ [ meta load protocol => reg 1 ]
+ [ cmp eq reg 1 0x00000608 ]
+ [ payload load 10b @ network header + 14 => reg 1 ]
+ [ cmp eq reg 1 0x0101a8c0 0xc000edfe 0x0000eeff ]
+
+# arp daddr ether fe:ed:00:c0:ff:ee arp saddr ip 192.168.1.1
+netdev
+ [ meta load protocol => reg 1 ]
+ [ cmp eq reg 1 0x00000608 ]
+ [ payload load 10b @ network header + 14 => reg 1 ]
+ [ cmp eq reg 1 0x0101a8c0 0xc000edfe 0x0000eeff ]
+
--
1.8.3.1

View File

@ -0,0 +1,108 @@
From 26c4f15080663a12006abf8539ebf28bb223e6d9 Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Mon, 7 Dec 2020 18:29:15 +0100
Subject: [PATCH] json: echo: Speedup seqnum_to_json()
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1900565
Upstream Status: nftables commit 389a0e1edc89a
commit 389a0e1edc89a4048a272e569d3349b1d43bc567
Author: Phil Sutter <phil@nwl.cc>
Date: Fri Nov 20 20:01:59 2020 +0100
json: echo: Speedup seqnum_to_json()
Derek Dai reports:
"If there are a lot of command in JSON node, seqnum_to_json() will slow
down application (eg: firewalld) dramatically since it iterate whole
command list every time."
He sent a patch implementing a lookup table, but we can do better: Speed
this up by introducing a hash table to store the struct json_cmd_assoc
objects in, taking their netlink sequence number as key.
Quickly tested restoring a ruleset containing about 19k rules:
| # time ./before/nft -jeaf large_ruleset.json >/dev/null
| 4.85user 0.47system 0:05.48elapsed 97%CPU (0avgtext+0avgdata 69732maxresident)k
| 0inputs+0outputs (15major+16937minor)pagefaults 0swaps
| # time ./after/nft -jeaf large_ruleset.json >/dev/null
| 0.18user 0.44system 0:00.70elapsed 89%CPU (0avgtext+0avgdata 68484maxresident)k
| 0inputs+0outputs (15major+16645minor)pagefaults 0swaps
Bugzilla: https://bugzilla.netfilter.org/show_bug.cgi?id=1479
Reported-by: Derek Dai <daiderek@gmail.com>
Signed-off-by: Phil Sutter <phil@nwl.cc>
---
src/parser_json.c | 28 ++++++++++++++++++----------
1 file changed, 18 insertions(+), 10 deletions(-)
diff --git a/src/parser_json.c b/src/parser_json.c
index ddc694f..107dc38 100644
--- a/src/parser_json.c
+++ b/src/parser_json.c
@@ -3646,42 +3646,50 @@ static int json_verify_metainfo(struct json_ctx *ctx, json_t *root)
}
struct json_cmd_assoc {
- struct json_cmd_assoc *next;
+ struct hlist_node hnode;
const struct cmd *cmd;
json_t *json;
};
-static struct json_cmd_assoc *json_cmd_list = NULL;
+#define CMD_ASSOC_HSIZE 512
+static struct hlist_head json_cmd_assoc_hash[CMD_ASSOC_HSIZE];
static void json_cmd_assoc_free(void)
{
struct json_cmd_assoc *cur;
+ struct hlist_node *pos, *n;
+ int i;
- while (json_cmd_list) {
- cur = json_cmd_list;
- json_cmd_list = cur->next;
- free(cur);
+ for (i = 0; i < CMD_ASSOC_HSIZE; i++) {
+ hlist_for_each_entry_safe(cur, pos, n,
+ &json_cmd_assoc_hash[i], hnode)
+ free(cur);
}
}
static void json_cmd_assoc_add(json_t *json, const struct cmd *cmd)
{
struct json_cmd_assoc *new = xzalloc(sizeof *new);
+ int key = cmd->seqnum % CMD_ASSOC_HSIZE;
- new->next = json_cmd_list;
new->json = json;
new->cmd = cmd;
- json_cmd_list = new;
+
+ hlist_add_head(&new->hnode, &json_cmd_assoc_hash[key]);
}
static json_t *seqnum_to_json(const uint32_t seqnum)
{
- const struct json_cmd_assoc *cur;
+ int key = seqnum % CMD_ASSOC_HSIZE;
+ struct json_cmd_assoc *cur;
+ struct hlist_node *n;
- for (cur = json_cmd_list; cur; cur = cur->next) {
+
+ hlist_for_each_entry(cur, n, &json_cmd_assoc_hash[key], hnode) {
if (cur->cmd->seqnum == seqnum)
return cur->json;
}
+
return NULL;
}
--
1.8.3.1

View File

@ -0,0 +1,116 @@
From 0dcfa1b0211fa50201d51d0f52869a8e2d93ba76 Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Mon, 7 Dec 2020 18:29:15 +0100
Subject: [PATCH] json: Fix seqnum_to_json() functionality
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1900565
Upstream Status: nftables commit 299ec575faa6b
commit 299ec575faa6b070940b483dc517ecd883b9f1a4
Author: Phil Sutter <phil@nwl.cc>
Date: Wed Dec 2 23:07:11 2020 +0100
json: Fix seqnum_to_json() functionality
Introduction of json_cmd_assoc_hash missed that by the time the hash
table insert happens, the struct cmd object's 'seqnum' field which is
used as key is not initialized yet. This doesn't happen until
nft_netlink() prepares the batch object which records the lowest seqnum.
Therefore push all json_cmd_assoc objects into a temporary list until
the first lookup happens. At this time, all referenced cmd objects have
their seqnum set and the list entries can be moved into the hash table
for fast lookups.
To expose such problems in the future, make json_events_cb() emit an
error message if the passed message has a handle but no assoc entry is
found for its seqnum.
Fixes: 389a0e1edc89a ("json: echo: Speedup seqnum_to_json()")
Cc: Derek Dai <daiderek@gmail.com>
Signed-off-by: Phil Sutter <phil@nwl.cc>
---
src/parser_json.c | 27 +++++++++++++++++++++++----
1 file changed, 23 insertions(+), 4 deletions(-)
diff --git a/src/parser_json.c b/src/parser_json.c
index 107dc38..785f0e7 100644
--- a/src/parser_json.c
+++ b/src/parser_json.c
@@ -3646,6 +3646,7 @@ static int json_verify_metainfo(struct json_ctx *ctx, json_t *root)
}
struct json_cmd_assoc {
+ struct json_cmd_assoc *next;
struct hlist_node hnode;
const struct cmd *cmd;
json_t *json;
@@ -3653,6 +3654,7 @@ struct json_cmd_assoc {
#define CMD_ASSOC_HSIZE 512
static struct hlist_head json_cmd_assoc_hash[CMD_ASSOC_HSIZE];
+static struct json_cmd_assoc *json_cmd_assoc_list;
static void json_cmd_assoc_free(void)
{
@@ -3660,6 +3662,12 @@ static void json_cmd_assoc_free(void)
struct hlist_node *pos, *n;
int i;
+ while (json_cmd_assoc_list) {
+ cur = json_cmd_assoc_list->next;
+ free(json_cmd_assoc_list);
+ json_cmd_assoc_list = cur;
+ }
+
for (i = 0; i < CMD_ASSOC_HSIZE; i++) {
hlist_for_each_entry_safe(cur, pos, n,
&json_cmd_assoc_hash[i], hnode)
@@ -3670,21 +3678,29 @@ static void json_cmd_assoc_free(void)
static void json_cmd_assoc_add(json_t *json, const struct cmd *cmd)
{
struct json_cmd_assoc *new = xzalloc(sizeof *new);
- int key = cmd->seqnum % CMD_ASSOC_HSIZE;
new->json = json;
new->cmd = cmd;
+ new->next = json_cmd_assoc_list;
- hlist_add_head(&new->hnode, &json_cmd_assoc_hash[key]);
+ json_cmd_assoc_list = new;
}
static json_t *seqnum_to_json(const uint32_t seqnum)
{
- int key = seqnum % CMD_ASSOC_HSIZE;
struct json_cmd_assoc *cur;
struct hlist_node *n;
+ int key;
+ while (json_cmd_assoc_list) {
+ cur = json_cmd_assoc_list;
+ json_cmd_assoc_list = cur->next;
+ key = cur->cmd->seqnum % CMD_ASSOC_HSIZE;
+ hlist_add_head(&cur->hnode, &json_cmd_assoc_hash[key]);
+ }
+
+ key = seqnum % CMD_ASSOC_HSIZE;
hlist_for_each_entry(cur, n, &json_cmd_assoc_hash[key], hnode) {
if (cur->cmd->seqnum == seqnum)
return cur->json;
@@ -3865,8 +3881,11 @@ int json_events_cb(const struct nlmsghdr *nlh, struct netlink_mon_handler *monh)
return MNL_CB_OK;
json = seqnum_to_json(nlh->nlmsg_seq);
- if (!json)
+ if (!json) {
+ json_echo_error(monh, "No JSON command found with seqnum %lu\n",
+ nlh->nlmsg_seq);
return MNL_CB_OK;
+ }
tmp = json_object_get(json, "add");
if (!tmp)
--
1.8.3.1

View File

@ -0,0 +1,47 @@
From b7964157c40066f09411ac52547acb07d1966aee Mon Sep 17 00:00:00 2001
From: Phil Sutter <psutter@redhat.com>
Date: Tue, 12 Jan 2021 15:49:43 +0100
Subject: [PATCH] json: don't leave dangling pointers on hlist
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1900565
Upstream Status: nftables commit 48917d876d51c
commit 48917d876d51cd6ba5bff07172acef05c9e12474
Author: Florian Westphal <fw@strlen.de>
Date: Mon Dec 14 16:53:29 2020 +0100
json: don't leave dangling pointers on hlist
unshare -n tests/json_echo/run-test.py
[..]
Adding chain c
free(): double free detected in tcache 2
Aborted (core dumped)
The element must be deleted from the hlist prior to freeing it.
Fixes: 389a0e1edc89a ("json: echo: Speedup seqnum_to_json()")
Signed-off-by: Florian Westphal <fw@strlen.de>
---
src/parser_json.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/src/parser_json.c b/src/parser_json.c
index 785f0e7..986f128 100644
--- a/src/parser_json.c
+++ b/src/parser_json.c
@@ -3670,8 +3670,10 @@ static void json_cmd_assoc_free(void)
for (i = 0; i < CMD_ASSOC_HSIZE; i++) {
hlist_for_each_entry_safe(cur, pos, n,
- &json_cmd_assoc_hash[i], hnode)
+ &json_cmd_assoc_hash[i], hnode) {
+ hlist_del(&cur->hnode);
free(cur);
+ }
}
}
--
1.8.3.1

View File

@ -1,5 +1,5 @@
%define rpmversion 0.9.3 %define rpmversion 0.9.3
%define specrelease 14%{?dist} %define specrelease 17%{?dist}
Name: nftables Name: nftables
Version: %{rpmversion} Version: %{rpmversion}
@ -41,6 +41,22 @@ Patch21: 0021-src-Add-support-for-concatenated-set-ranges.patch
Patch22: 0022-parser_json-Support-ranges-in-concat-expressions.patch Patch22: 0022-parser_json-Support-ranges-in-concat-expressions.patch
Patch23: 0023-doc-Document-notrack-statement.patch Patch23: 0023-doc-Document-notrack-statement.patch
Patch24: 0024-JSON-Improve-performance-of-json_events_cb.patch Patch24: 0024-JSON-Improve-performance-of-json_events_cb.patch
Patch25: 0025-segtree-Fix-missing-expires-value-in-prefixes.patch
Patch26: 0026-segtree-Use-expr_clone-in-get_set_interval_.patch
Patch27: 0027-segtree-Merge-get_set_interval_find-and-get_set_inte.patch
Patch28: 0028-tests-0034get_element_0-do-not-discard-stderr.patch
Patch29: 0029-segtree-Fix-get-element-command-with-prefixes.patch
Patch30: 0030-include-Resync-nf_tables.h-cache-copy.patch
Patch31: 0031-src-Set-NFT_SET_CONCAT-flag-for-sets-with-concatenat.patch
Patch32: 0032-src-store-expr-not-dtype-to-track-data-in-sets.patch
Patch33: 0033-evaluate-Perform-set-evaluation-on-implicitly-declar.patch
Patch34: 0034-evaluate-missing-datatype-definition-in-implicit_set.patch
Patch35: 0035-mergesort-unbreak-listing-with-binops.patch
Patch36: 0036-proto-add-sctp-crc32-checksum-fixup.patch
Patch37: 0037-proto-Fix-ARP-header-field-ordering.patch
Patch38: 0038-json-echo-Speedup-seqnum_to_json.patch
Patch39: 0039-json-Fix-seqnum_to_json-functionality.patch
Patch40: 0040-json-don-t-leave-dangling-pointers-on-hlist.patch
BuildRequires: autogen BuildRequires: autogen
BuildRequires: autoconf BuildRequires: autoconf
@ -157,6 +173,28 @@ touch -r %{SOURCE2} $RPM_BUILD_ROOT/%{python3_sitelib}/nftables/nftables.py
%{python3_sitelib}/nftables/ %{python3_sitelib}/nftables/
%changelog %changelog
* Tue Jan 12 2021 Phil Sutter <psutter@redhat.com> [0.9.3-17.el8]
- json: don't leave dangling pointers on hlist (Phil Sutter) [1900565]
- json: Fix seqnum_to_json() functionality (Phil Sutter) [1900565]
- json: echo: Speedup seqnum_to_json() (Phil Sutter) [1900565]
- proto: Fix ARP header field ordering (Phil Sutter) [1896334]
- proto: add sctp crc32 checksum fixup (Phil Sutter) [1895804]
- mergesort: unbreak listing with binops (Phil Sutter) [1891790]
- evaluate: missing datatype definition in implicit_set_declaration() (Phil Sutter) [1877022]
- evaluate: Perform set evaluation on implicitly declared (anonymous) sets (Phil Sutter) [1877022]
- src: store expr, not dtype to track data in sets (Phil Sutter) [1877022]
* Sat Aug 08 2020 Phil Sutter <psutter@redhat.com> [0.9.3-16.el8]
- src: Set NFT_SET_CONCAT flag for sets with concatenated ranges (Phil Sutter) [1820684]
- include: Resync nf_tables.h cache copy (Phil Sutter) [1820684]
* Tue Jun 30 2020 Phil Sutter <psutter@redhat.com> [0.9.3-15.el8]
- segtree: Fix get element command with prefixes (Phil Sutter) [1832235]
- tests: 0034get_element_0: do not discard stderr (Phil Sutter) [1832235]
- segtree: Merge get_set_interval_find() and get_set_interval_end() (Phil Sutter) [1832235]
- segtree: Use expr_clone in get_set_interval_*() (Phil Sutter) [1832235]
- segtree: Fix missing expires value in prefixes (Phil Sutter) [1832235]
* Wed Jun 24 2020 Phil Sutter <psutter@redhat.com> [0.9.3-14.el8] * Wed Jun 24 2020 Phil Sutter <psutter@redhat.com> [0.9.3-14.el8]
- JSON: Improve performance of json_events_cb() (Phil Sutter) [1835300] - JSON: Improve performance of json_events_cb() (Phil Sutter) [1835300]
- doc: Document notrack statement (Phil Sutter) [1841292] - doc: Document notrack statement (Phil Sutter) [1841292]