diff --git a/.gitignore b/.gitignore index cdf126b..c9b6c1e 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -SOURCES/nftables-0.9.3.tar.bz2 +SOURCES/nftables-1.0.4.tar.bz2 diff --git a/.nftables.metadata b/.nftables.metadata index 9c4c1a3..b9af18b 100644 --- a/.nftables.metadata +++ b/.nftables.metadata @@ -1 +1 @@ -20156858169fde135a0b4c22c4cd9437afcbb733 SOURCES/nftables-0.9.3.tar.bz2 +e2e8b324cece1409a311284ff4fe26c3a5554809 SOURCES/nftables-1.0.4.tar.bz2 diff --git a/SOURCES/0001-main-enforce-options-before-commands.patch b/SOURCES/0001-main-enforce-options-before-commands.patch deleted file mode 100644 index f1401bd..0000000 --- a/SOURCES/0001-main-enforce-options-before-commands.patch +++ /dev/null @@ -1,244 +0,0 @@ -From 5fac849eac7ecfde4ca6f9c9c406ace030f358f2 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Fri, 10 Jan 2020 19:54:16 +0100 -Subject: [PATCH] main: enforce options before commands - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1778883 -Upstream Status: nftables commit fb9cea50e8b37 - -commit fb9cea50e8b370b6931e7b53b1a881d3b95b1c91 -Author: Pablo Neira Ayuso -Date: Fri Dec 13 11:32:46 2019 +0100 - - main: enforce options before commands - - This patch turns on POSIXLY_CORRECT on the getopt parser to enforce - options before commands. Users get a hint in such a case: - - # nft list ruleset -a - Error: syntax error, options must be specified before commands - nft list ruleset -a - ^ ~~ - - This patch recovers 9fc71bc6b602 ("main: Fix for misleading error with - negative chain priority"). - - Tests have been updated. - - Signed-off-by: Pablo Neira Ayuso ---- - src/main.c | 46 ++++++++++++++++++- - .../testcases/cache/0001_cache_handling_0 | 2 +- - .../testcases/chains/0016delete_handle_0 | 4 +- - .../testcases/chains/0039negative_priority_0 | 8 ++++ - .../testcases/flowtable/0010delete_handle_0 | 2 +- - .../testcases/maps/0008interval_map_delete_0 | 2 +- - tests/shell/testcases/optionals/comments_0 | 2 +- - .../testcases/optionals/comments_handles_0 | 2 +- - .../optionals/delete_object_handles_0 | 4 +- - tests/shell/testcases/optionals/handles_0 | 2 +- - .../shell/testcases/sets/0028delete_handle_0 | 2 +- - 11 files changed, 64 insertions(+), 12 deletions(-) - create mode 100755 tests/shell/testcases/chains/0039negative_priority_0 - -diff --git a/src/main.c b/src/main.c -index fde8b15..74199f9 100644 ---- a/src/main.c -+++ b/src/main.c -@@ -46,7 +46,7 @@ enum opt_vals { - OPT_TERSE = 't', - OPT_INVALID = '?', - }; --#define OPTSTRING "hvcf:iI:jvnsNaeSupypTt" -+#define OPTSTRING "+hvcf:iI:jvnsNaeSupypTt" - - static const struct option options[] = { - { -@@ -202,6 +202,47 @@ static const struct { - }, - }; - -+static void nft_options_error(int argc, char * const argv[], int pos) -+{ -+ int i; -+ -+ fprintf(stderr, "Error: syntax error, options must be specified before commands\n"); -+ for (i = 0; i < argc; i++) -+ fprintf(stderr, "%s ", argv[i]); -+ printf("\n%4c%*s\n", '^', pos - 2, "~~"); -+} -+ -+static bool nft_options_check(int argc, char * const argv[]) -+{ -+ bool skip = false, nonoption = false; -+ int pos = 0, i; -+ -+ for (i = 1; i < argc; i++) { -+ pos += strlen(argv[i - 1]) + 1; -+ if (argv[i][0] == '{') { -+ break; -+ } else if (skip) { -+ skip = false; -+ continue; -+ } else if (argv[i][0] == '-') { -+ if (nonoption) { -+ nft_options_error(argc, argv, pos); -+ return false; -+ } else if (argv[i][1] == 'I' || -+ argv[i][1] == 'f' || -+ !strcmp(argv[i], "--includepath") || -+ !strcmp(argv[i], "--file")) { -+ skip = true; -+ continue; -+ } -+ } else if (argv[i][0] != '-') { -+ nonoption = true; -+ } -+ } -+ -+ return true; -+} -+ - int main(int argc, char * const *argv) - { - char *buf = NULL, *filename = NULL; -@@ -211,6 +252,9 @@ int main(int argc, char * const *argv) - unsigned int len; - int i, val, rc; - -+ if (!nft_options_check(argc, argv)) -+ exit(EXIT_FAILURE); -+ - nft = nft_ctx_new(NFT_CTX_DEFAULT); - - while (1) { -diff --git a/tests/shell/testcases/cache/0001_cache_handling_0 b/tests/shell/testcases/cache/0001_cache_handling_0 -index 431aada..0a68440 100755 ---- a/tests/shell/testcases/cache/0001_cache_handling_0 -+++ b/tests/shell/testcases/cache/0001_cache_handling_0 -@@ -20,7 +20,7 @@ TMP=$(mktemp) - echo "$RULESET" >> "$TMP" - $NFT "flush ruleset;include \"$TMP\"" - rm -f "$TMP" --rule_handle=$($NFT list ruleset -a | awk '/saddr/{print $NF}') -+rule_handle=$($NFT -a list ruleset | awk '/saddr/{print $NF}') - $NFT delete rule inet test test handle $rule_handle - $NFT delete set inet test test - $NFT -f - <<< "$RULESET" -diff --git a/tests/shell/testcases/chains/0016delete_handle_0 b/tests/shell/testcases/chains/0016delete_handle_0 -index 4633d77..8fd1ad8 100755 ---- a/tests/shell/testcases/chains/0016delete_handle_0 -+++ b/tests/shell/testcases/chains/0016delete_handle_0 -@@ -10,8 +10,8 @@ $NFT add chain ip6 test-ip6 x - $NFT add chain ip6 test-ip6 y - $NFT add chain ip6 test-ip6 z - --chain_y_handle=$($NFT list ruleset -a | awk -v n=1 '/chain y/ && !--n {print $NF; exit}'); --chain_z_handle=$($NFT list ruleset -a | awk -v n=2 '/chain z/ && !--n {print $NF; exit}'); -+chain_y_handle=$($NFT -a list ruleset | awk -v n=1 '/chain y/ && !--n {print $NF; exit}'); -+chain_z_handle=$($NFT -a list ruleset | awk -v n=2 '/chain z/ && !--n {print $NF; exit}'); - - $NFT delete chain test-ip handle $chain_y_handle - $NFT delete chain ip6 test-ip6 handle $chain_z_handle -diff --git a/tests/shell/testcases/chains/0039negative_priority_0 b/tests/shell/testcases/chains/0039negative_priority_0 -new file mode 100755 -index 0000000..ba17b8c ---- /dev/null -+++ b/tests/shell/testcases/chains/0039negative_priority_0 -@@ -0,0 +1,8 @@ -+#!/bin/bash -+ -+# Test parsing of negative priority values -+ -+set -e -+ -+$NFT add table t -+$NFT add chain t c { type filter hook input priority -30\; } -diff --git a/tests/shell/testcases/flowtable/0010delete_handle_0 b/tests/shell/testcases/flowtable/0010delete_handle_0 -index 303967d..985d4a3 100755 ---- a/tests/shell/testcases/flowtable/0010delete_handle_0 -+++ b/tests/shell/testcases/flowtable/0010delete_handle_0 -@@ -7,7 +7,7 @@ set -e - $NFT add table inet t - $NFT add flowtable inet t f { hook ingress priority filter\; devices = { lo }\; } - --FH=$($NFT list ruleset -a | awk '/flowtable f/ { print $NF }') -+FH=$($NFT -a list ruleset | awk '/flowtable f/ { print $NF }') - - $NFT delete flowtable inet t handle $FH - -diff --git a/tests/shell/testcases/maps/0008interval_map_delete_0 b/tests/shell/testcases/maps/0008interval_map_delete_0 -index a43fd28..7da6eb3 100755 ---- a/tests/shell/testcases/maps/0008interval_map_delete_0 -+++ b/tests/shell/testcases/maps/0008interval_map_delete_0 -@@ -24,7 +24,7 @@ $NFT delete element filter m { 127.0.0.3 } - $NFT add element filter m { 127.0.0.3 : 0x3 } - $NFT add element filter m { 127.0.0.2 : 0x2 } - --GET=$($NFT list ruleset -s) -+GET=$($NFT -s list ruleset) - if [ "$EXPECTED" != "$GET" ] ; then - DIFF="$(which diff)" - [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -diff --git a/tests/shell/testcases/optionals/comments_0 b/tests/shell/testcases/optionals/comments_0 -index 29b8506..ab85936 100755 ---- a/tests/shell/testcases/optionals/comments_0 -+++ b/tests/shell/testcases/optionals/comments_0 -@@ -5,4 +5,4 @@ - $NFT add table test - $NFT add chain test test - $NFT add rule test test tcp dport 22 counter accept comment test_comment --$NFT list table test -a | grep 'accept comment \"test_comment\"' >/dev/null -+$NFT -a list table test | grep 'accept comment \"test_comment\"' >/dev/null -diff --git a/tests/shell/testcases/optionals/comments_handles_0 b/tests/shell/testcases/optionals/comments_handles_0 -index 30539bf..a01df1d 100755 ---- a/tests/shell/testcases/optionals/comments_handles_0 -+++ b/tests/shell/testcases/optionals/comments_handles_0 -@@ -6,5 +6,5 @@ $NFT add table test - $NFT add chain test test - $NFT add rule test test tcp dport 22 counter accept comment test_comment - set -e --$NFT list table test -a | grep 'accept comment \"test_comment\" # handle '[[:digit:]]$ >/dev/null -+$NFT -a list table test | grep 'accept comment \"test_comment\" # handle '[[:digit:]]$ >/dev/null - $NFT list table test | grep 'accept comment \"test_comment\"' | grep -v '# handle '[[:digit:]]$ >/dev/null -diff --git a/tests/shell/testcases/optionals/delete_object_handles_0 b/tests/shell/testcases/optionals/delete_object_handles_0 -index d5d9654..a2ae422 100755 ---- a/tests/shell/testcases/optionals/delete_object_handles_0 -+++ b/tests/shell/testcases/optionals/delete_object_handles_0 -@@ -10,8 +10,8 @@ $NFT add quota ip6 test-ip6 http-quota over 25 mbytes - $NFT add counter ip6 test-ip6 http-traffic - $NFT add quota ip6 test-ip6 ssh-quota 10 mbytes - --counter_handle=$($NFT list ruleset -a | awk '/https-traffic/{print $NF}') --quota_handle=$($NFT list ruleset -a | awk '/ssh-quota/{print $NF}') -+counter_handle=$($NFT -a list ruleset | awk '/https-traffic/{print $NF}') -+quota_handle=$($NFT -a list ruleset | awk '/ssh-quota/{print $NF}') - $NFT delete counter test-ip handle $counter_handle - $NFT delete quota ip6 test-ip6 handle $quota_handle - -diff --git a/tests/shell/testcases/optionals/handles_0 b/tests/shell/testcases/optionals/handles_0 -index 7c6a437..80f3c5b 100755 ---- a/tests/shell/testcases/optionals/handles_0 -+++ b/tests/shell/testcases/optionals/handles_0 -@@ -5,4 +5,4 @@ - $NFT add table test - $NFT add chain test test - $NFT add rule test test tcp dport 22 counter accept --$NFT list table test -a | grep 'accept # handle '[[:digit:]]$ >/dev/null -+$NFT -a list table test | grep 'accept # handle '[[:digit:]]$ >/dev/null -diff --git a/tests/shell/testcases/sets/0028delete_handle_0 b/tests/shell/testcases/sets/0028delete_handle_0 -index 4e8b322..5ad17c2 100755 ---- a/tests/shell/testcases/sets/0028delete_handle_0 -+++ b/tests/shell/testcases/sets/0028delete_handle_0 -@@ -7,7 +7,7 @@ $NFT add set test-ip y { type inet_service \; timeout 3h45s \;} - $NFT add set test-ip z { type ipv4_addr\; flags constant , interval\;} - $NFT add set test-ip c {type ipv4_addr \; flags timeout \; elements={192.168.1.1 timeout 10s, 192.168.1.2 timeout 30s} \;} - --set_handle=$($NFT list ruleset -a | awk '/set c/{print $NF}') -+set_handle=$($NFT -a list ruleset | awk '/set c/{print $NF}') - $NFT delete set test-ip handle $set_handle - - EXPECTED="table ip test-ip { --- -2.31.1 - diff --git a/SOURCES/0001-tests-shell-runtime-set-element-automerge.patch b/SOURCES/0001-tests-shell-runtime-set-element-automerge.patch new file mode 100644 index 0000000..2973639 --- /dev/null +++ b/SOURCES/0001-tests-shell-runtime-set-element-automerge.patch @@ -0,0 +1,97 @@ +From c994f1d2a31a2b03557b3eb1c8c2de34b97edce1 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Fri, 24 Jun 2022 16:02:59 +0200 +Subject: [PATCH] tests: shell: runtime set element automerge + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 8fafe4e6b5b30 + +commit 8fafe4e6b5b30f2539f16403da8d5c5f819e523b +Author: Pablo Neira Ayuso +Date: Mon Jun 13 17:05:22 2022 +0200 + + tests: shell: runtime set element automerge + + Add a test to cover runtime set element automerge. + + Signed-off-by: Pablo Neira Ayuso + +Signed-off-by: Phil Sutter +--- + tests/shell/testcases/sets/automerge_0 | 64 ++++++++++++++++++++++++++ + 1 file changed, 64 insertions(+) + create mode 100755 tests/shell/testcases/sets/automerge_0 + +diff --git a/tests/shell/testcases/sets/automerge_0 b/tests/shell/testcases/sets/automerge_0 +new file mode 100755 +index 0000000..c9fb609 +--- /dev/null ++++ b/tests/shell/testcases/sets/automerge_0 +@@ -0,0 +1,64 @@ ++#!/bin/bash ++ ++set -e ++ ++RULESET="table inet x { ++ set y { ++ type inet_service ++ flags interval ++ auto-merge ++ } ++}" ++ ++$NFT -f - <<< $RULESET ++ ++tmpfile=$(mktemp) ++echo -n "add element inet x y { " > $tmpfile ++for ((i=0;i<65535;i+=2)) ++do ++ echo -n "$i, " >> $tmpfile ++ if [ $i -eq 65534 ] ++ then ++ echo -n "$i" >> $tmpfile ++ fi ++done ++echo "}" >> $tmpfile ++ ++$NFT -f $tmpfile ++ ++tmpfile2=$(mktemp) ++for ((i=1;i<65535;i+=2)) ++do ++ echo "$i" >> $tmpfile2 ++done ++ ++tmpfile3=$(mktemp) ++shuf $tmpfile2 > $tmpfile3 ++i=0 ++cat $tmpfile3 | while read line && [ $i -lt 10 ] ++do ++ $NFT add element inet x y { $line } ++ i=$((i+1)) ++done ++ ++for ((i=0;i<10;i++)) ++do ++ from=$(($RANDOM%65535)) ++ to=$(($from+100)) ++ $NFT add element inet x y { $from-$to } ++ if [ $? -ne 0 ] ++ then ++ echo "failed to add $from-$to" ++ exit 1 ++ fi ++ $NFT get element inet x y { $from-$to } ++ if [ $? -ne 0 ] ++ then ++ echo "failed to get $from-$to" ++ exit 1 ++ fi ++done ++ ++rm -f $tmpfile ++rm -f $tmpfile2 ++rm -f $tmpfile3 +-- +2.41.0.rc1 + diff --git a/SOURCES/0002-main-restore-debug.patch b/SOURCES/0002-main-restore-debug.patch deleted file mode 100644 index 9bd8b72..0000000 --- a/SOURCES/0002-main-restore-debug.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 0c808b1ee29d4a0974f4cc5c0586138730361a41 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Fri, 10 Jan 2020 19:54:16 +0100 -Subject: [PATCH] main: restore --debug - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1778883 -Upstream Status: nftables commit ea5af85371bd1 - -commit ea5af85371bd18658ea2ffa0a6c9c48e2c64684b -Author: Pablo Neira Ayuso -Date: Thu Jan 9 18:16:18 2020 +0100 - - main: restore --debug - - Broken since options are mandatory before commands. - - Fixes: fb9cea50e8b3 ("main: enforce options before commands") - Signed-off-by: Pablo Neira Ayuso ---- - src/main.c | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/src/main.c b/src/main.c -index 74199f9..6ab1b89 100644 ---- a/src/main.c -+++ b/src/main.c -@@ -46,7 +46,7 @@ enum opt_vals { - OPT_TERSE = 't', - OPT_INVALID = '?', - }; --#define OPTSTRING "+hvcf:iI:jvnsNaeSupypTt" -+#define OPTSTRING "+hvd:cf:iI:jvnsNaeSupypTt" - - static const struct option options[] = { - { -@@ -228,8 +228,10 @@ static bool nft_options_check(int argc, char * const argv[]) - if (nonoption) { - nft_options_error(argc, argv, pos); - return false; -- } else if (argv[i][1] == 'I' || -+ } else if (argv[i][1] == 'd' || -+ argv[i][1] == 'I' || - argv[i][1] == 'f' || -+ !strcmp(argv[i], "--debug") || - !strcmp(argv[i], "--includepath") || - !strcmp(argv[i], "--file")) { - skip = true; --- -2.31.1 - diff --git a/SOURCES/0002-rule-collapse-set-element-commands.patch b/SOURCES/0002-rule-collapse-set-element-commands.patch new file mode 100644 index 0000000..aea8e2c --- /dev/null +++ b/SOURCES/0002-rule-collapse-set-element-commands.patch @@ -0,0 +1,236 @@ +From 33792b491be79cb50d163c4ecc553f1258b82159 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Fri, 24 Jun 2022 16:02:59 +0200 +Subject: [PATCH] rule: collapse set element commands + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 498a5f0c219d8 + +commit 498a5f0c219d8a118af4f172f248647d9b077101 +Author: Pablo Neira Ayuso +Date: Mon Jun 13 17:22:44 2022 +0200 + + rule: collapse set element commands + + Robots might generate a long list of singleton element commands such as: + + add element t s { 1.0.1.0/24 } + ... + add element t s { 1.0.2.0/23 } + + collapse them into one single command before the evaluation step, ie. + + add element t s { 1.0.1.0/24, ..., 1.0.2.0/23 } + + this speeds up overlap detection and set element automerge operations in + this worst case scenario. + + Since 3da9643fb9ff9 ("intervals: add support to automerge with kernel + elements"), the new interval tracking relies on mergesort. The pattern + above triggers the set sorting for each element. + + This patch adds a list to cmd objects that store collapsed commands. + Moreover, expressions also contain a reference to the original command, + to uncollapse the commands after the evaluation step. + + These commands are uncollapsed after the evaluation step to ensure error + reporting works as expected (command and netlink message are mapped + 1:1). + + For the record: + + - nftables versions <= 1.0.2 did not perform any kind of overlap + check for the described scenario above (because set cache only contained + elements in the kernel in this case). This is a problem for kernels < 5.7 + which rely on userspace to detect overlaps. + + - the overlap detection could be skipped for kernels >= 5.7. + + - The extended netlink error reporting available for set elements + since 5.19-rc might allow to remove the uncollapse step, in this case, + error reporting does not rely on the netlink sequence to refer to the + command triggering the problem. + + Signed-off-by: Pablo Neira Ayuso + +Signed-off-by: Phil Sutter +--- + include/expression.h | 1 + + include/rule.h | 3 ++ + src/libnftables.c | 17 ++++++++-- + src/rule.c | 75 ++++++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 93 insertions(+), 3 deletions(-) + +diff --git a/include/expression.h b/include/expression.h +index 2c3818e..53194c9 100644 +--- a/include/expression.h ++++ b/include/expression.h +@@ -243,6 +243,7 @@ struct expr { + enum expr_types etype:8; + enum ops op:8; + unsigned int len; ++ struct cmd *cmd; + + union { + struct { +diff --git a/include/rule.h b/include/rule.h +index e232b97..9081225 100644 +--- a/include/rule.h ++++ b/include/rule.h +@@ -700,6 +700,7 @@ struct cmd { + enum cmd_obj obj; + struct handle handle; + uint32_t seqnum; ++ struct list_head collapse_list; + union { + void *data; + struct expr *expr; +@@ -728,6 +729,8 @@ extern struct cmd *cmd_alloc(enum cmd_ops op, enum cmd_obj obj, + const struct handle *h, const struct location *loc, + void *data); + extern void nft_cmd_expand(struct cmd *cmd); ++extern bool nft_cmd_collapse(struct list_head *cmds); ++extern void nft_cmd_uncollapse(struct list_head *cmds); + extern struct cmd *cmd_alloc_obj_ct(enum cmd_ops op, int type, + const struct handle *h, + const struct location *loc, struct obj *obj); +diff --git a/src/libnftables.c b/src/libnftables.c +index 6a22ea0..aac682b 100644 +--- a/src/libnftables.c ++++ b/src/libnftables.c +@@ -501,7 +501,9 @@ static int nft_evaluate(struct nft_ctx *nft, struct list_head *msgs, + { + struct nft_cache_filter *filter; + struct cmd *cmd, *next; ++ bool collapsed = false; + unsigned int flags; ++ int err = 0; + + filter = nft_cache_filter_init(); + flags = nft_cache_evaluate(nft, cmds, filter); +@@ -512,17 +514,26 @@ static int nft_evaluate(struct nft_ctx *nft, struct list_head *msgs, + + nft_cache_filter_fini(filter); + ++ if (nft_cmd_collapse(cmds)) ++ collapsed = true; ++ + list_for_each_entry_safe(cmd, next, cmds, list) { + struct eval_ctx ectx = { + .nft = nft, + .msgs = msgs, + }; ++ + if (cmd_evaluate(&ectx, cmd) < 0 && +- ++nft->state->nerrs == nft->parser_max_errors) +- return -1; ++ ++nft->state->nerrs == nft->parser_max_errors) { ++ err = -1; ++ break; ++ } + } + +- if (nft->state->nerrs) ++ if (collapsed) ++ nft_cmd_uncollapse(cmds); ++ ++ if (err < 0 || nft->state->nerrs) + return -1; + + list_for_each_entry(cmd, cmds, list) { +diff --git a/src/rule.c b/src/rule.c +index 7f61bdc..0526a14 100644 +--- a/src/rule.c ++++ b/src/rule.c +@@ -1279,6 +1279,8 @@ struct cmd *cmd_alloc(enum cmd_ops op, enum cmd_obj obj, + cmd->handle = *h; + cmd->location = *loc; + cmd->data = data; ++ init_list_head(&cmd->collapse_list); ++ + return cmd; + } + +@@ -1379,6 +1381,79 @@ void nft_cmd_expand(struct cmd *cmd) + } + } + ++bool nft_cmd_collapse(struct list_head *cmds) ++{ ++ struct cmd *cmd, *next, *elems = NULL; ++ struct expr *expr, *enext; ++ bool collapse = false; ++ ++ list_for_each_entry_safe(cmd, next, cmds, list) { ++ if (cmd->op != CMD_ADD && ++ cmd->op != CMD_CREATE) { ++ elems = NULL; ++ continue; ++ } ++ ++ if (cmd->obj != CMD_OBJ_ELEMENTS) { ++ elems = NULL; ++ continue; ++ } ++ ++ if (!elems) { ++ elems = cmd; ++ continue; ++ } ++ ++ if (cmd->op != elems->op) { ++ elems = cmd; ++ continue; ++ } ++ ++ if (strcmp(elems->handle.table.name, cmd->handle.table.name) || ++ strcmp(elems->handle.set.name, cmd->handle.set.name)) { ++ elems = cmd; ++ continue; ++ } ++ ++ collapse = true; ++ list_for_each_entry_safe(expr, enext, &cmd->expr->expressions, list) { ++ expr->cmd = cmd; ++ list_move_tail(&expr->list, &elems->expr->expressions); ++ } ++ elems->expr->size += cmd->expr->size; ++ list_move_tail(&cmd->list, &elems->collapse_list); ++ } ++ ++ return collapse; ++} ++ ++void nft_cmd_uncollapse(struct list_head *cmds) ++{ ++ struct cmd *cmd, *cmd_next, *collapse_cmd, *collapse_cmd_next; ++ struct expr *expr, *next; ++ ++ list_for_each_entry_safe(cmd, cmd_next, cmds, list) { ++ if (list_empty(&cmd->collapse_list)) ++ continue; ++ ++ assert(cmd->obj == CMD_OBJ_ELEMENTS); ++ ++ list_for_each_entry_safe(expr, next, &cmd->expr->expressions, list) { ++ if (!expr->cmd) ++ continue; ++ ++ list_move_tail(&expr->list, &expr->cmd->expr->expressions); ++ cmd->expr->size--; ++ expr->cmd = NULL; ++ } ++ ++ list_for_each_entry_safe(collapse_cmd, collapse_cmd_next, &cmd->collapse_list, list) { ++ collapse_cmd->elem.set = set_get(cmd->elem.set); ++ list_add(&collapse_cmd->list, &cmd->list); ++ } ++ } ++} ++ + struct markup *markup_alloc(uint32_t format) + { + struct markup *markup; +-- +2.41.0.rc1 + diff --git a/SOURCES/0003-intervals-do-not-report-exact-overlaps-for-new-eleme.patch b/SOURCES/0003-intervals-do-not-report-exact-overlaps-for-new-eleme.patch new file mode 100644 index 0000000..5e92713 --- /dev/null +++ b/SOURCES/0003-intervals-do-not-report-exact-overlaps-for-new-eleme.patch @@ -0,0 +1,84 @@ +From af9045e2f2029b6573db32bd15ab861d797b86a6 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Fri, 24 Jun 2022 16:02:59 +0200 +Subject: [PATCH] intervals: do not report exact overlaps for new elements + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 87ba510fc704f + +commit 87ba510fc704f766b5417d3bfc326e8ab9378c2a +Author: Pablo Neira Ayuso +Date: Mon Jun 13 17:22:47 2022 +0200 + + intervals: do not report exact overlaps for new elements + + Two new elements that represent an exact overlap should not trigger an error. + + add table t + add set t s { type ipv4_addr; flags interval; } + add element t s { 1.0.1.0/24 } + ... + add element t s { 1.0.1.0/24 } + + result in a bogus error. + + # nft -f set.nft + set.nft:1002:19-28: Error: conflicting intervals specified + add element t s { 1.0.1.0/24 } + ^^^^^^^^^^ + + Fixes: 3da9643fb9ff ("intervals: add support to automerge with kernel elements") + Signed-off-by: Pablo Neira Ayuso + +Signed-off-by: Phil Sutter +--- + src/intervals.c | 3 +-- + tests/shell/testcases/sets/exact_overlap_0 | 22 ++++++++++++++++++++++ + 2 files changed, 23 insertions(+), 2 deletions(-) + create mode 100755 tests/shell/testcases/sets/exact_overlap_0 + +diff --git a/src/intervals.c b/src/intervals.c +index bc414d6..89f5c33 100644 +--- a/src/intervals.c ++++ b/src/intervals.c +@@ -540,8 +540,7 @@ static int setelem_overlap(struct list_head *msgs, struct set *set, + } + + if (mpz_cmp(prev_range.low, range.low) == 0 && +- mpz_cmp(prev_range.high, range.high) == 0 && +- (elem->flags & EXPR_F_KERNEL || prev->flags & EXPR_F_KERNEL)) ++ mpz_cmp(prev_range.high, range.high) == 0) + goto next; + + if (mpz_cmp(prev_range.low, range.low) <= 0 && +diff --git a/tests/shell/testcases/sets/exact_overlap_0 b/tests/shell/testcases/sets/exact_overlap_0 +new file mode 100755 +index 0000000..1ce9304 +--- /dev/null ++++ b/tests/shell/testcases/sets/exact_overlap_0 +@@ -0,0 +1,22 @@ ++#!/bin/bash ++ ++RULESET="add table t ++add set t s { type ipv4_addr; flags interval; } ++add element t s { 1.0.1.0/24 } ++add element t s { 1.0.2.0/23 } ++add element t s { 1.0.8.0/21 } ++add element t s { 1.0.32.0/19 } ++add element t s { 1.1.0.0/24 } ++add element t s { 1.1.2.0/23 } ++add element t s { 1.1.4.0/22 } ++add element t s { 1.1.8.0/24 } ++add element t s { 1.1.9.0/24 } ++add element t s { 1.1.10.0/23 } ++add element t s { 1.1.12.0/22 } ++add element t s { 1.1.16.0/20 } ++add element t s { 1.1.32.0/19 } ++add element t s { 1.0.1.0/24 }" ++ ++$NFT -f - <<< $RULESET || exit 1 ++ ++$NFT add element t s { 1.0.1.0/24 } +-- +2.41.0.rc1 + diff --git a/SOURCES/0003-monitor-Do-not-decompose-non-anonymous-sets.patch b/SOURCES/0003-monitor-Do-not-decompose-non-anonymous-sets.patch deleted file mode 100644 index 6611382..0000000 --- a/SOURCES/0003-monitor-Do-not-decompose-non-anonymous-sets.patch +++ /dev/null @@ -1,68 +0,0 @@ -From 13bd961c3ba83e4189dcffdcf570c5a4391fd5f9 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Fri, 10 Jan 2020 19:58:29 +0100 -Subject: [PATCH] monitor: Do not decompose non-anonymous sets - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1774742 -Upstream Status: nftables commit 5d57fa3e99bb9 - -commit 5d57fa3e99bb9f2044e236d4ddb7d874cfefe1dd -Author: Phil Sutter -Date: Thu Jan 9 13:34:20 2020 +0100 - - monitor: Do not decompose non-anonymous sets - - They have been decomposed already, trying to do that again causes a - segfault. This is a similar fix as in commit 8ecb885589591 ("src: - restore --echo with anonymous sets"). - - Signed-off-by: Phil Sutter - Acked-by: Pablo Neira Ayuso ---- - src/monitor.c | 2 +- - tests/monitor/testcases/set-interval.t | 20 ++++++++++++++++++++ - 2 files changed, 21 insertions(+), 1 deletion(-) - create mode 100644 tests/monitor/testcases/set-interval.t - -diff --git a/src/monitor.c b/src/monitor.c -index ea0393c..0da9858 100644 ---- a/src/monitor.c -+++ b/src/monitor.c -@@ -500,7 +500,7 @@ static int netlink_events_obj_cb(const struct nlmsghdr *nlh, int type, - - static void rule_map_decompose_cb(struct set *s, void *data) - { -- if (s->flags & NFT_SET_INTERVAL) -+ if (s->flags & (NFT_SET_INTERVAL & NFT_SET_ANONYMOUS)) - interval_map_decompose(s->init); - } - -diff --git a/tests/monitor/testcases/set-interval.t b/tests/monitor/testcases/set-interval.t -new file mode 100644 -index 0000000..59930c5 ---- /dev/null -+++ b/tests/monitor/testcases/set-interval.t -@@ -0,0 +1,20 @@ -+# setup first -+I add table ip t -+I add chain ip t c -+O - -+J {"add": {"table": {"family": "ip", "name": "t", "handle": 0}}} -+J {"add": {"chain": {"family": "ip", "table": "t", "name": "c", "handle": 0}}} -+ -+# add set with elements, monitor output expectedly differs -+I add set ip t s { type inet_service; flags interval; elements = { 20, 30-40 }; } -+O add set ip t s { type inet_service; flags interval; } -+O add element ip t s { 20 } -+O add element ip t s { 30-40 } -+J {"add": {"set": {"family": "ip", "name": "s", "table": "t", "type": "inet_service", "handle": 0, "flags": ["interval"]}}} -+J {"add": {"element": {"family": "ip", "table": "t", "name": "s", "elem": {"set": [20]}}}} -+J {"add": {"element": {"family": "ip", "table": "t", "name": "s", "elem": {"set": [{"range": [30, 40]}]}}}} -+ -+# this would crash nft -+I add rule ip t c tcp dport @s -+O - -+J {"add": {"rule": {"family": "ip", "table": "t", "chain": "c", "handle": 0, "expr": [{"match": {"op": "==", "left": {"payload": {"protocol": "tcp", "field": "dport"}}, "right": "@s"}}]}}} --- -2.31.1 - diff --git a/SOURCES/0004-intervals-do-not-empty-cache-for-maps.patch b/SOURCES/0004-intervals-do-not-empty-cache-for-maps.patch new file mode 100644 index 0000000..bfbdfac --- /dev/null +++ b/SOURCES/0004-intervals-do-not-empty-cache-for-maps.patch @@ -0,0 +1,55 @@ +From cfb1670ece6414c3d2aad5dd7df572b0cc07acd5 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Fri, 24 Jun 2022 16:02:59 +0200 +Subject: [PATCH] intervals: do not empty cache for maps + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit d434de8b50dcf + +commit d434de8b50dcf3f5f4ca027e122a7df9d4e5d8e1 +Author: Pablo Neira Ayuso +Date: Thu Jun 16 10:53:56 2022 +0200 + + intervals: do not empty cache for maps + + Translate set element to range and sort in maps for the NFT_SET_MAP + case, which does not support for automerge yet. + + Fixes: 81e36530fcac ("src: replace interval segment tree overlap and automerge") + Signed-off-by: Pablo Neira Ayuso + +Signed-off-by: Phil Sutter +--- + src/intervals.c | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/src/intervals.c b/src/intervals.c +index 89f5c33..e203413 100644 +--- a/src/intervals.c ++++ b/src/intervals.c +@@ -216,6 +216,12 @@ int set_automerge(struct list_head *msgs, struct cmd *cmd, struct set *set, + struct cmd *purge_cmd; + struct handle h = {}; + ++ if (set->flags & NFT_SET_MAP) { ++ set_to_range(init); ++ list_expr_sort(&init->expressions); ++ return 0; ++ } ++ + if (existing_set) { + if (existing_set->init) { + list_splice_init(&existing_set->init->expressions, +@@ -229,9 +235,6 @@ int set_automerge(struct list_head *msgs, struct cmd *cmd, struct set *set, + set_to_range(init); + list_expr_sort(&init->expressions); + +- if (set->flags & NFT_SET_MAP) +- return 0; +- + ctx.purge = set_expr_alloc(&internal_location, set); + + setelem_automerge(&ctx); +-- +2.41.0.rc1 + diff --git a/SOURCES/0004-monitor-Fix-output-for-ranges-in-anonymous-sets.patch b/SOURCES/0004-monitor-Fix-output-for-ranges-in-anonymous-sets.patch deleted file mode 100644 index 90f2aea..0000000 --- a/SOURCES/0004-monitor-Fix-output-for-ranges-in-anonymous-sets.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 2e7cb6c2d46d9b8b91ff4b5d6797b7544c23ba44 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 13 Jan 2020 16:58:57 +0100 -Subject: [PATCH] monitor: Fix output for ranges in anonymous sets - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1774742 -Upstream Status: nftables commit ddbacd70d061e - -commit ddbacd70d061eb1b6808f501969809bfb5d03001 -Author: Phil Sutter -Date: Mon Jan 13 14:53:24 2020 +0100 - - monitor: Fix output for ranges in anonymous sets - - Previous fix for named interval sets was simply wrong: Instead of - limiting decomposing to anonymous interval sets, it effectively disabled - it entirely. - - Since code needs to check for both interval and anonymous bits - separately, introduce set_is_interval() helper to keep the code - readable. - - Also extend test case to assert ranges in anonymous sets are correctly - printed by echo or monitor modes. Without this fix, range boundaries are - printed as individual set elements. - - Fixes: 5d57fa3e99bb9 ("monitor: Do not decompose non-anonymous sets") - Signed-off-by: Phil Sutter - Reviewed-by: Pablo Neira Ayuso ---- - include/rule.h | 5 +++++ - src/monitor.c | 2 +- - tests/monitor/testcases/set-interval.t | 5 +++++ - 3 files changed, 11 insertions(+), 1 deletion(-) - -diff --git a/include/rule.h b/include/rule.h -index 0b2eba3..47eb29f 100644 ---- a/include/rule.h -+++ b/include/rule.h -@@ -363,6 +363,11 @@ static inline bool set_is_meter(uint32_t set_flags) - return set_is_anonymous(set_flags) && (set_flags & NFT_SET_EVAL); - } - -+static inline bool set_is_interval(uint32_t set_flags) -+{ -+ return set_flags & NFT_SET_INTERVAL; -+} -+ - #include - - struct counter { -diff --git a/src/monitor.c b/src/monitor.c -index 0da9858..fb803cf 100644 ---- a/src/monitor.c -+++ b/src/monitor.c -@@ -500,7 +500,7 @@ static int netlink_events_obj_cb(const struct nlmsghdr *nlh, int type, - - static void rule_map_decompose_cb(struct set *s, void *data) - { -- if (s->flags & (NFT_SET_INTERVAL & NFT_SET_ANONYMOUS)) -+ if (set_is_interval(s->flags) && set_is_anonymous(s->flags)) - interval_map_decompose(s->init); - } - -diff --git a/tests/monitor/testcases/set-interval.t b/tests/monitor/testcases/set-interval.t -index 59930c5..1fbcfe2 100644 ---- a/tests/monitor/testcases/set-interval.t -+++ b/tests/monitor/testcases/set-interval.t -@@ -18,3 +18,8 @@ J {"add": {"element": {"family": "ip", "table": "t", "name": "s", "elem": {"set" - I add rule ip t c tcp dport @s - O - - J {"add": {"rule": {"family": "ip", "table": "t", "chain": "c", "handle": 0, "expr": [{"match": {"op": "==", "left": {"payload": {"protocol": "tcp", "field": "dport"}}, "right": "@s"}}]}}} -+ -+# test anonymous interval sets as well -+I add rule ip t c tcp dport { 20, 30-40 } -+O - -+J {"add": {"rule": {"family": "ip", "table": "t", "chain": "c", "handle": 0, "expr": [{"match": {"op": "==", "left": {"payload": {"protocol": "tcp", "field": "dport"}}, "right": {"set": [20, {"range": [30, 40]}]}}}]}}} --- -2.31.1 - diff --git a/SOURCES/0005-intervals-Do-not-sort-cached-set-elements-over-and-o.patch b/SOURCES/0005-intervals-Do-not-sort-cached-set-elements-over-and-o.patch new file mode 100644 index 0000000..ecab071 --- /dev/null +++ b/SOURCES/0005-intervals-Do-not-sort-cached-set-elements-over-and-o.patch @@ -0,0 +1,139 @@ +From 5c5128094c75a184e54e82f2ad43c67423184c3e Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Fri, 24 Jun 2022 16:02:59 +0200 +Subject: [PATCH] intervals: Do not sort cached set elements over and over + again + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 59e3a59221fb8 + +commit 59e3a59221fb81c289a0868a85140dd452fb1c30 +Author: Phil Sutter +Date: Thu Jun 16 10:56:12 2022 +0200 + + intervals: Do not sort cached set elements over and over again + + When adding element(s) to a non-empty set, code merged the two lists and + sorted the result. With many individual 'add element' commands this + causes substantial overhead. Make use of the fact that + existing_set->init is sorted already, sort only the list of new elements + and use list_splice_sorted() to merge the two sorted lists. + + Add set_sort_splice() and use it for set element overlap detection and + automerge. + + A test case adding ~25k elements in individual commands completes in + about 1/4th of the time with this patch applied. + + Joint work with Pablo. + + Fixes: 3da9643fb9ff9 ("intervals: add support to automerge with kernel elements") + Signed-off-by: Phil Sutter + Signed-off-by: Pablo Neira Ayuso + +Signed-off-by: Phil Sutter +--- + include/expression.h | 1 + + src/intervals.c | 46 +++++++++++++++++++++----------------------- + src/mergesort.c | 2 +- + 3 files changed, 24 insertions(+), 25 deletions(-) + +diff --git a/include/expression.h b/include/expression.h +index 53194c9..cf7319b 100644 +--- a/include/expression.h ++++ b/include/expression.h +@@ -481,6 +481,7 @@ extern struct expr *compound_expr_alloc(const struct location *loc, + extern void compound_expr_add(struct expr *compound, struct expr *expr); + extern void compound_expr_remove(struct expr *compound, struct expr *expr); + extern void list_expr_sort(struct list_head *head); ++extern void list_splice_sorted(struct list_head *list, struct list_head *head); + + extern struct expr *concat_expr_alloc(const struct location *loc); + +diff --git a/src/intervals.c b/src/intervals.c +index e203413..dcc06d1 100644 +--- a/src/intervals.c ++++ b/src/intervals.c +@@ -118,6 +118,26 @@ static bool merge_ranges(struct set_automerge_ctx *ctx, + return false; + } + ++static void set_sort_splice(struct expr *init, struct set *set) ++{ ++ struct set *existing_set = set->existing_set; ++ ++ set_to_range(init); ++ list_expr_sort(&init->expressions); ++ ++ if (!existing_set) ++ return; ++ ++ if (existing_set->init) { ++ set_to_range(existing_set->init); ++ list_splice_sorted(&existing_set->init->expressions, ++ &init->expressions); ++ init_list_head(&existing_set->init->expressions); ++ } else { ++ existing_set->init = set_expr_alloc(&internal_location, set); ++ } ++} ++ + static void setelem_automerge(struct set_automerge_ctx *ctx) + { + struct expr *i, *next, *prev = NULL; +@@ -222,18 +242,7 @@ int set_automerge(struct list_head *msgs, struct cmd *cmd, struct set *set, + return 0; + } + +- if (existing_set) { +- if (existing_set->init) { +- list_splice_init(&existing_set->init->expressions, +- &init->expressions); +- } else { +- existing_set->init = set_expr_alloc(&internal_location, +- set); +- } +- } +- +- set_to_range(init); +- list_expr_sort(&init->expressions); ++ set_sort_splice(init, set); + + ctx.purge = set_expr_alloc(&internal_location, set); + +@@ -591,18 +600,7 @@ int set_overlap(struct list_head *msgs, struct set *set, struct expr *init) + struct expr *i, *n, *clone; + int err; + +- if (existing_set) { +- if (existing_set->init) { +- list_splice_init(&existing_set->init->expressions, +- &init->expressions); +- } else { +- existing_set->init = set_expr_alloc(&internal_location, +- set); +- } +- } +- +- set_to_range(init); +- list_expr_sort(&init->expressions); ++ set_sort_splice(init, set); + + err = setelem_overlap(msgs, set, init); + +diff --git a/src/mergesort.c b/src/mergesort.c +index 8e6aac5..dca7142 100644 +--- a/src/mergesort.c ++++ b/src/mergesort.c +@@ -70,7 +70,7 @@ static int expr_msort_cmp(const struct expr *e1, const struct expr *e2) + return ret; + } + +-static void list_splice_sorted(struct list_head *list, struct list_head *head) ++void list_splice_sorted(struct list_head *list, struct list_head *head) + { + struct list_head *h = head->next; + struct list_head *l = list->next; +-- +2.41.0.rc1 + diff --git a/SOURCES/0005-xfrm-spi-is-big-endian.patch b/SOURCES/0005-xfrm-spi-is-big-endian.patch deleted file mode 100644 index e7ee4af..0000000 --- a/SOURCES/0005-xfrm-spi-is-big-endian.patch +++ /dev/null @@ -1,51 +0,0 @@ -From ca4d1604b18abf7189ecfd5e06cb74abc3694076 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Thu, 16 Jan 2020 18:40:52 +0100 -Subject: [PATCH] xfrm: spi is big-endian - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1790963 -Upstream Status: nftables commit 488356b895024 - -commit 488356b895024d0944b20feb1f930558726e0877 -Author: Florian Westphal -Date: Tue Jan 14 13:37:28 2020 +0100 - - xfrm: spi is big-endian - - the kernel stores spi in a __be32, so fix up the byteorder annotation. - - Signed-off-by: Florian Westphal - Acked-by: Pablo Neira Ayuso ---- - src/xfrm.c | 2 +- - tests/py/inet/ipsec.t.payload | 1 - - 2 files changed, 1 insertion(+), 2 deletions(-) - -diff --git a/src/xfrm.c b/src/xfrm.c -index 4dd53c3..336e8c9 100644 ---- a/src/xfrm.c -+++ b/src/xfrm.c -@@ -39,7 +39,7 @@ const struct xfrm_template xfrm_templates[] = { - [NFT_XFRM_KEY_DADDR_IP6] = XFRM_TEMPLATE_BE("daddr", &ip6addr_type, 16 * BITS_PER_BYTE), - [NFT_XFRM_KEY_SADDR_IP6] = XFRM_TEMPLATE_BE("saddr", &ip6addr_type, 16 * BITS_PER_BYTE), - [NFT_XFRM_KEY_REQID] = XFRM_TEMPLATE_HE("reqid", &integer_type, 4 * BITS_PER_BYTE), -- [NFT_XFRM_KEY_SPI] = XFRM_TEMPLATE_HE("spi", &integer_type, 4 * BITS_PER_BYTE), -+ [NFT_XFRM_KEY_SPI] = XFRM_TEMPLATE_BE("spi", &integer_type, 4 * BITS_PER_BYTE), - }; - - static void xfrm_expr_print(const struct expr *expr, struct output_ctx *octx) -diff --git a/tests/py/inet/ipsec.t.payload b/tests/py/inet/ipsec.t.payload -index 6049c66..c46a226 100644 ---- a/tests/py/inet/ipsec.t.payload -+++ b/tests/py/inet/ipsec.t.payload -@@ -16,7 +16,6 @@ ip ipsec-ip4 ipsec-input - # ipsec out spi 1-561 - inet ipsec-inet ipsec-post - [ xfrm load out 0 spi => reg 1 ] -- [ byteorder reg 1 = hton(reg 1, 4, 4) ] - [ cmp gte reg 1 0x01000000 ] - [ cmp lte reg 1 0x31020000 ] - --- -2.31.1 - diff --git a/SOURCES/0006-doc-Document-limitations-of-ipsec-expression-with-xf.patch b/SOURCES/0006-doc-Document-limitations-of-ipsec-expression-with-xf.patch new file mode 100644 index 0000000..b77a4fe --- /dev/null +++ b/SOURCES/0006-doc-Document-limitations-of-ipsec-expression-with-xf.patch @@ -0,0 +1,44 @@ +From a2e5f4f59c0d4a3880a4de5e95adffc553216d2e Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 10:15:02 +0100 +Subject: [PATCH] doc: Document limitations of ipsec expression with + xfrm_interface + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 446e76dbde713 + +commit 446e76dbde713327358f17a8af6ce86b8541c836 +Author: Phil Sutter +Date: Thu Jun 23 17:49:20 2022 +0200 + + doc: Document limitations of ipsec expression with xfrm_interface + + Point at a possible solution to match IPsec info of locally generated + traffic routed to an xfrm-type interface. + + Signed-off-by: Phil Sutter + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + doc/primary-expression.txt | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/doc/primary-expression.txt b/doc/primary-expression.txt +index f97778b..4d6b087 100644 +--- a/doc/primary-expression.txt ++++ b/doc/primary-expression.txt +@@ -428,6 +428,10 @@ Destination address of the tunnel| + ipv4_addr/ipv6_addr + |================================= + ++*Note:* When using xfrm_interface, this expression is not useable in output ++hook as the plain packet does not traverse it with IPsec info attached - use a ++chain in postrouting hook instead. ++ + NUMGEN EXPRESSION + ~~~~~~~~~~~~~~~~~ + +-- +2.41.0.rc1 + diff --git a/SOURCES/0006-tests-shell-Search-diff-tool-once-and-for-all.patch b/SOURCES/0006-tests-shell-Search-diff-tool-once-and-for-all.patch deleted file mode 100644 index e1e9c1f..0000000 --- a/SOURCES/0006-tests-shell-Search-diff-tool-once-and-for-all.patch +++ /dev/null @@ -1,573 +0,0 @@ -From 8537751e48dfacee11d48ad3f050bdacc930284c Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Fri, 17 Jan 2020 12:50:23 +0100 -Subject: [PATCH] tests: shell: Search diff tool once and for all - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1790793 -Upstream Status: nftables commit 68310ba0f9c20 - -commit 68310ba0f9c2066f7463d66a1a1938b66fb8a4c4 -Author: Phil Sutter -Date: Tue Jan 14 16:50:35 2020 +0100 - - tests: shell: Search diff tool once and for all - - Instead of calling 'which diff' over and over again, just detect the - tool's presence in run-tests.sh and pass $DIFF to each testcase just - like with nft binary. - - Fall back to using 'true' command to avoid the need for any conditional - calling in test cases. - - While being at it, unify potential diff calls so that a string - comparison in shell happens irrespective of diff presence. - - Signed-off-by: Phil Sutter - Acked-by: Pablo Neira Ayuso ---- - tests/shell/run-tests.sh | 7 ++++++- - tests/shell/testcases/flowtable/0010delete_handle_0 | 3 +-- - tests/shell/testcases/listing/0003table_0 | 6 ++---- - tests/shell/testcases/listing/0004table_0 | 3 +-- - tests/shell/testcases/listing/0005ruleset_ip_0 | 3 +-- - tests/shell/testcases/listing/0006ruleset_ip6_0 | 3 +-- - tests/shell/testcases/listing/0007ruleset_inet_0 | 3 +-- - tests/shell/testcases/listing/0008ruleset_arp_0 | 3 +-- - tests/shell/testcases/listing/0009ruleset_bridge_0 | 3 +-- - tests/shell/testcases/listing/0010sets_0 | 3 +-- - tests/shell/testcases/listing/0011sets_0 | 3 +-- - tests/shell/testcases/listing/0012sets_0 | 3 +-- - tests/shell/testcases/listing/0013objects_0 | 3 +-- - tests/shell/testcases/listing/0014objects_0 | 6 ++---- - tests/shell/testcases/listing/0015dynamic_0 | 3 +-- - tests/shell/testcases/listing/0017objects_0 | 3 +-- - tests/shell/testcases/listing/0018data_0 | 3 +-- - tests/shell/testcases/listing/0019set_0 | 3 +-- - tests/shell/testcases/listing/0020flowtable_0 | 3 +-- - .../shell/testcases/maps/0003map_add_many_elements_0 | 3 +-- - .../testcases/maps/0004interval_map_create_once_0 | 3 +-- - tests/shell/testcases/maps/0008interval_map_delete_0 | 3 +-- - tests/shell/testcases/netns/0001nft-f_0 | 3 +-- - tests/shell/testcases/netns/0002loosecommands_0 | 3 +-- - tests/shell/testcases/netns/0003many_0 | 3 +-- - tests/shell/testcases/nft-f/0016redefines_1 | 3 +-- - .../testcases/optionals/delete_object_handles_0 | 3 +-- - .../testcases/optionals/update_object_handles_0 | 3 +-- - .../rule_management/0001addinsertposition_0 | 12 ++++-------- - tests/shell/testcases/sets/0028delete_handle_0 | 3 +-- - .../testcases/sets/0036add_set_element_expiration_0 | 5 ++++- - tests/shell/testcases/transactions/0003table_0 | 4 +--- - tests/shell/testcases/transactions/0040set_0 | 3 +-- - 33 files changed, 46 insertions(+), 75 deletions(-) - -diff --git a/tests/shell/run-tests.sh b/tests/shell/run-tests.sh -index 632ccce..29a2c39 100755 ---- a/tests/shell/run-tests.sh -+++ b/tests/shell/run-tests.sh -@@ -43,6 +43,11 @@ if [ ! -x "$MODPROBE" ] ; then - msg_error "no modprobe binary found" - fi - -+DIFF="$(which diff)" -+if [ ! -x "$DIFF" ] ; then -+ DIFF=true -+fi -+ - if [ "$1" == "-v" ] ; then - VERBOSE=y - shift -@@ -96,7 +101,7 @@ do - kernel_cleanup - - msg_info "[EXECUTING] $testfile" -- test_output=$(NFT=$NFT ${testfile} 2>&1) -+ test_output=$(NFT=$NFT DIFF=$DIFF ${testfile} 2>&1) - rc_got=$? - echo -en "\033[1A\033[K" # clean the [EXECUTING] foobar line - -diff --git a/tests/shell/testcases/flowtable/0010delete_handle_0 b/tests/shell/testcases/flowtable/0010delete_handle_0 -index 985d4a3..8dd8d9f 100755 ---- a/tests/shell/testcases/flowtable/0010delete_handle_0 -+++ b/tests/shell/testcases/flowtable/0010delete_handle_0 -@@ -16,7 +16,6 @@ EXPECTED="table inet t { - - GET="$($NFT list ruleset)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/listing/0003table_0 b/tests/shell/testcases/listing/0003table_0 -index 1b288e4..5060be0 100755 ---- a/tests/shell/testcases/listing/0003table_0 -+++ b/tests/shell/testcases/listing/0003table_0 -@@ -11,15 +11,13 @@ $NFT add table test - - GET="$($NFT list table test)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi - - # also this way - GET="$($NFT list table ip test)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/listing/0004table_0 b/tests/shell/testcases/listing/0004table_0 -index 2c7c995..1d69119 100755 ---- a/tests/shell/testcases/listing/0004table_0 -+++ b/tests/shell/testcases/listing/0004table_0 -@@ -12,8 +12,7 @@ $NFT add table test2 - - GET="$($NFT list table test)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi - -diff --git a/tests/shell/testcases/listing/0005ruleset_ip_0 b/tests/shell/testcases/listing/0005ruleset_ip_0 -index c326680..39c0328 100755 ---- a/tests/shell/testcases/listing/0005ruleset_ip_0 -+++ b/tests/shell/testcases/listing/0005ruleset_ip_0 -@@ -15,7 +15,6 @@ $NFT add table bridge test - - GET="$($NFT list ruleset ip)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/listing/0006ruleset_ip6_0 b/tests/shell/testcases/listing/0006ruleset_ip6_0 -index 093d5a5..1b67f50 100755 ---- a/tests/shell/testcases/listing/0006ruleset_ip6_0 -+++ b/tests/shell/testcases/listing/0006ruleset_ip6_0 -@@ -15,7 +15,6 @@ $NFT add table bridge test - - GET="$($NFT list ruleset ip6)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/listing/0007ruleset_inet_0 b/tests/shell/testcases/listing/0007ruleset_inet_0 -index b24cc4c..257c7a9 100755 ---- a/tests/shell/testcases/listing/0007ruleset_inet_0 -+++ b/tests/shell/testcases/listing/0007ruleset_inet_0 -@@ -15,7 +15,6 @@ $NFT add table bridge test - - GET="$($NFT list ruleset inet)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/listing/0008ruleset_arp_0 b/tests/shell/testcases/listing/0008ruleset_arp_0 -index fff0fee..be42c47 100755 ---- a/tests/shell/testcases/listing/0008ruleset_arp_0 -+++ b/tests/shell/testcases/listing/0008ruleset_arp_0 -@@ -15,7 +15,6 @@ $NFT add table bridge test - - GET="$($NFT list ruleset arp)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/listing/0009ruleset_bridge_0 b/tests/shell/testcases/listing/0009ruleset_bridge_0 -index 247ed47..c6a99f5 100755 ---- a/tests/shell/testcases/listing/0009ruleset_bridge_0 -+++ b/tests/shell/testcases/listing/0009ruleset_bridge_0 -@@ -15,7 +15,6 @@ $NFT add table bridge test - - GET="$($NFT list ruleset bridge)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/listing/0010sets_0 b/tests/shell/testcases/listing/0010sets_0 -index 855cceb..0f5f2bd 100755 ---- a/tests/shell/testcases/listing/0010sets_0 -+++ b/tests/shell/testcases/listing/0010sets_0 -@@ -57,7 +57,6 @@ $NFT add set inet filter set2 { type icmpv6_type \; } - - GET="$($NFT list sets)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/listing/0011sets_0 b/tests/shell/testcases/listing/0011sets_0 -index aac9eac..b6f12b5 100755 ---- a/tests/shell/testcases/listing/0011sets_0 -+++ b/tests/shell/testcases/listing/0011sets_0 -@@ -38,7 +38,6 @@ $NFT add rule inet filter test tcp dport {80, 443} - GET="$($NFT list sets)" - - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/listing/0012sets_0 b/tests/shell/testcases/listing/0012sets_0 -index da16d94..6e4c959 100755 ---- a/tests/shell/testcases/listing/0012sets_0 -+++ b/tests/shell/testcases/listing/0012sets_0 -@@ -33,7 +33,6 @@ $NFT add set inet filter set2 { type icmpv6_type \; } - - GET="$($NFT list sets inet)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/listing/0013objects_0 b/tests/shell/testcases/listing/0013objects_0 -index f691579..4d39143 100755 ---- a/tests/shell/testcases/listing/0013objects_0 -+++ b/tests/shell/testcases/listing/0013objects_0 -@@ -42,7 +42,6 @@ $NFT add table test-ip - - GET="$($NFT list table test)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/listing/0014objects_0 b/tests/shell/testcases/listing/0014objects_0 -index 20f6840..31d94f8 100755 ---- a/tests/shell/testcases/listing/0014objects_0 -+++ b/tests/shell/testcases/listing/0014objects_0 -@@ -17,15 +17,13 @@ $NFT add table test-ip - - GET="$($NFT list quotas)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi - - GET="$($NFT list quota test https-quota)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi - -diff --git a/tests/shell/testcases/listing/0015dynamic_0 b/tests/shell/testcases/listing/0015dynamic_0 -index 4ff74e3..65fbe62 100755 ---- a/tests/shell/testcases/listing/0015dynamic_0 -+++ b/tests/shell/testcases/listing/0015dynamic_0 -@@ -16,8 +16,7 @@ $NFT -f - <<< "$EXPECTED" - - GET="$($NFT list set ip filter test_set)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi - -diff --git a/tests/shell/testcases/listing/0017objects_0 b/tests/shell/testcases/listing/0017objects_0 -index 8a586e8..c4e72db 100755 ---- a/tests/shell/testcases/listing/0017objects_0 -+++ b/tests/shell/testcases/listing/0017objects_0 -@@ -13,7 +13,6 @@ $NFT flush map inet filter countermap - - GET="$($NFT list map inet filter countermap)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/listing/0018data_0 b/tests/shell/testcases/listing/0018data_0 -index 544b6bf..4af253d 100755 ---- a/tests/shell/testcases/listing/0018data_0 -+++ b/tests/shell/testcases/listing/0018data_0 -@@ -13,7 +13,6 @@ $NFT flush map inet filter ipmap - - GET="$($NFT list map inet filter ipmap)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/listing/0019set_0 b/tests/shell/testcases/listing/0019set_0 -index 54a8a06..6e8cb4d 100755 ---- a/tests/shell/testcases/listing/0019set_0 -+++ b/tests/shell/testcases/listing/0019set_0 -@@ -13,7 +13,6 @@ $NFT flush set inet filter ipset - - GET="$($NFT list set inet filter ipset)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/listing/0020flowtable_0 b/tests/shell/testcases/listing/0020flowtable_0 -index 6f630f1..2f0a98d 100755 ---- a/tests/shell/testcases/listing/0020flowtable_0 -+++ b/tests/shell/testcases/listing/0020flowtable_0 -@@ -15,7 +15,6 @@ $NFT -f - <<< "$EXPECTED" - - GET="$($NFT list flowtable inet filter f)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/maps/0003map_add_many_elements_0 b/tests/shell/testcases/maps/0003map_add_many_elements_0 -index 047f949..2b254c5 100755 ---- a/tests/shell/testcases/maps/0003map_add_many_elements_0 -+++ b/tests/shell/testcases/maps/0003map_add_many_elements_0 -@@ -61,8 +61,7 @@ EXPECTED="table ip x { - }" - GET=$($NFT list ruleset) - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi - -diff --git a/tests/shell/testcases/maps/0004interval_map_create_once_0 b/tests/shell/testcases/maps/0004interval_map_create_once_0 -index 58b399c..3de0c9d 100755 ---- a/tests/shell/testcases/maps/0004interval_map_create_once_0 -+++ b/tests/shell/testcases/maps/0004interval_map_create_once_0 -@@ -60,8 +60,7 @@ EXPECTED="table ip x { - }" - GET=$($NFT list ruleset) - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi - -diff --git a/tests/shell/testcases/maps/0008interval_map_delete_0 b/tests/shell/testcases/maps/0008interval_map_delete_0 -index 7da6eb3..39ea312 100755 ---- a/tests/shell/testcases/maps/0008interval_map_delete_0 -+++ b/tests/shell/testcases/maps/0008interval_map_delete_0 -@@ -26,7 +26,6 @@ $NFT add element filter m { 127.0.0.2 : 0x2 } - - GET=$($NFT -s list ruleset) - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/netns/0001nft-f_0 b/tests/shell/testcases/netns/0001nft-f_0 -index 8194226..a591f2c 100755 ---- a/tests/shell/testcases/netns/0001nft-f_0 -+++ b/tests/shell/testcases/netns/0001nft-f_0 -@@ -93,8 +93,7 @@ fi - KERNEL_RULESET="$($IP netns exec $NETNS_NAME $NFT list ruleset)" - $IP netns del $NETNS_NAME - if [ "$RULESET" != "$KERNEL_RULESET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$RULESET") <(echo "$KERNEL_RULESET") -+ $DIFF -u <(echo "$RULESET") <(echo "$KERNEL_RULESET") - exit 1 - fi - exit 0 -diff --git a/tests/shell/testcases/netns/0002loosecommands_0 b/tests/shell/testcases/netns/0002loosecommands_0 -index 465c2e8..231f1fb 100755 ---- a/tests/shell/testcases/netns/0002loosecommands_0 -+++ b/tests/shell/testcases/netns/0002loosecommands_0 -@@ -56,7 +56,6 @@ RULESET="table ip t { - KERNEL_RULESET="$($IP netns exec $NETNS_NAME $NFT list ruleset)" - $IP netns del $NETNS_NAME - if [ "$RULESET" != "$KERNEL_RULESET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$RULESET") <(echo "$KERNEL_RULESET") -+ $DIFF -u <(echo "$RULESET") <(echo "$KERNEL_RULESET") - exit 1 - fi -diff --git a/tests/shell/testcases/netns/0003many_0 b/tests/shell/testcases/netns/0003many_0 -index a5fcb5d..afe9117 100755 ---- a/tests/shell/testcases/netns/0003many_0 -+++ b/tests/shell/testcases/netns/0003many_0 -@@ -97,8 +97,7 @@ function test_netns() - KERNEL_RULESET="$($IP netns exec $NETNS_NAME $NFT list ruleset)" - if [ "$RULESET" != "$KERNEL_RULESET" ] ; then - echo "E: ruleset in netns $NETNS_NAME differs from the loaded" >&2 -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$RULESET") <(echo "$KERNEL_RULESET") -+ $DIFF -u <(echo "$RULESET") <(echo "$KERNEL_RULESET") - $IP netns del $NETNS_NAME - exit 1 - fi -diff --git a/tests/shell/testcases/nft-f/0016redefines_1 b/tests/shell/testcases/nft-f/0016redefines_1 -index 4c26b37..1f59f6b 100755 ---- a/tests/shell/testcases/nft-f/0016redefines_1 -+++ b/tests/shell/testcases/nft-f/0016redefines_1 -@@ -26,8 +26,7 @@ $NFT -f - <<< "$RULESET" - GET="$($NFT list ruleset)" - - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi - -diff --git a/tests/shell/testcases/optionals/delete_object_handles_0 b/tests/shell/testcases/optionals/delete_object_handles_0 -index a2ae422..9b65e67 100755 ---- a/tests/shell/testcases/optionals/delete_object_handles_0 -+++ b/tests/shell/testcases/optionals/delete_object_handles_0 -@@ -37,7 +37,6 @@ table ip6 test-ip6 { - GET="$($NFT list ruleset)" - - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/optionals/update_object_handles_0 b/tests/shell/testcases/optionals/update_object_handles_0 -index 17c0c86..8b12b8c 100755 ---- a/tests/shell/testcases/optionals/update_object_handles_0 -+++ b/tests/shell/testcases/optionals/update_object_handles_0 -@@ -19,7 +19,6 @@ EXPECTED="table ip test-ip { - - GET="$($NFT list ruleset)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/rule_management/0001addinsertposition_0 b/tests/shell/testcases/rule_management/0001addinsertposition_0 -index bb3fda5..237e9e3 100755 ---- a/tests/shell/testcases/rule_management/0001addinsertposition_0 -+++ b/tests/shell/testcases/rule_management/0001addinsertposition_0 -@@ -30,8 +30,7 @@ for arg in "position 2" "handle 2" "index 0"; do - - GET="$($NFT list ruleset)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi - done -@@ -42,8 +41,7 @@ for arg in "position 3" "handle 3" "index 1"; do - - GET="$($NFT list ruleset)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi - done -@@ -62,8 +60,7 @@ for arg in "position 3" "handle 3" "index 1"; do - - GET="$($NFT list ruleset)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi - done -@@ -82,8 +79,7 @@ for arg in "position 2" "handle 2" "index 0"; do - - GET="$($NFT list ruleset)" - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi - done -diff --git a/tests/shell/testcases/sets/0028delete_handle_0 b/tests/shell/testcases/sets/0028delete_handle_0 -index 5ad17c2..c6d1253 100755 ---- a/tests/shell/testcases/sets/0028delete_handle_0 -+++ b/tests/shell/testcases/sets/0028delete_handle_0 -@@ -29,7 +29,6 @@ EXPECTED="table ip test-ip { - GET="$($NFT list ruleset)" - - if [ "$EXPECTED" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") - exit 1 - fi -diff --git a/tests/shell/testcases/sets/0036add_set_element_expiration_0 b/tests/shell/testcases/sets/0036add_set_element_expiration_0 -index 8dfed6c..51ed0f2 100755 ---- a/tests/shell/testcases/sets/0036add_set_element_expiration_0 -+++ b/tests/shell/testcases/sets/0036add_set_element_expiration_0 -@@ -8,6 +8,9 @@ add element ip x y { 1.1.1.1 timeout 30s expires 15s }" - - test_output=$($NFT -e -f - <<< "$RULESET" 2>&1) - --diff -u <(echo "$test_output") <(echo "$RULESET") -+if [ "$test_output" != "$RULESET" ] ; then -+ $DIFF -u <(echo "$test_output") <(echo "$RULESET") -+ exit 1 -+fi - - $NFT "add chain ip x c; add rule ip x c ip saddr @y" -diff --git a/tests/shell/testcases/transactions/0003table_0 b/tests/shell/testcases/transactions/0003table_0 -index 6861eab..91186de 100755 ---- a/tests/shell/testcases/transactions/0003table_0 -+++ b/tests/shell/testcases/transactions/0003table_0 -@@ -14,7 +14,6 @@ fi - - KERNEL_RULESET="$($NFT list ruleset)" - if [ "" != "$KERNEL_RULESET" ] ; then -- DIFF="$(which diff)" - echo "Got a ruleset, but expected empty: " - echo "$KERNEL_RULESET" - exit 1 -@@ -42,7 +41,6 @@ $NFT -f - <<< "$RULESETFAIL" && exit 2 - - KERNEL_RULESET="$($NFT list ruleset)" - if [ "$RULESET" != "$KERNEL_RULESET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$RULESET") <(echo "$KERNEL_RULESET") -+ $DIFF -u <(echo "$RULESET") <(echo "$KERNEL_RULESET") - exit 1 - fi -diff --git a/tests/shell/testcases/transactions/0040set_0 b/tests/shell/testcases/transactions/0040set_0 -index a404abc..468816b 100755 ---- a/tests/shell/testcases/transactions/0040set_0 -+++ b/tests/shell/testcases/transactions/0040set_0 -@@ -29,8 +29,7 @@ fi - GET="$($NFT list ruleset)" - - if [ "$RULESET" != "$GET" ] ; then -- DIFF="$(which diff)" -- [ -x $DIFF ] && $DIFF -u <(echo "$RULESET") <(echo "$GET") -+ $DIFF -u <(echo "$RULESET") <(echo "$GET") - exit 1 - fi - --- -2.31.1 - diff --git a/SOURCES/0007-cache-Fix-for-doubled-output-after-reset-command.patch b/SOURCES/0007-cache-Fix-for-doubled-output-after-reset-command.patch deleted file mode 100644 index 2374687..0000000 --- a/SOURCES/0007-cache-Fix-for-doubled-output-after-reset-command.patch +++ /dev/null @@ -1,85 +0,0 @@ -From a44bd9f4b6cf77cb75c5f596908100270893e8d5 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Fri, 17 Jan 2020 12:50:23 +0100 -Subject: [PATCH] cache: Fix for doubled output after reset command - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1790793 -Upstream Status: nftables commit 7def18395d118 - -commit 7def18395d118e22a009de7e2e8de7f77906580b -Author: Phil Sutter -Date: Tue Jan 14 17:25:35 2020 +0100 - - cache: Fix for doubled output after reset command - - Reset command causes a dump of the objects to reset and adds those to - cache. Yet it ignored if the object in question was already there and up - to now CMD_RESET was flagged as NFT_CACHE_FULL. - - Tackle this from two angles: First, reduce cache requirements of reset - command to the necessary bits which is table cache. This alone would - suffice if there wasn't interactive mode (and other libnftables users): - A cache containing the objects to reset might be in place already, so - add dumped objects to cache only if they don't exist already. - - Signed-off-by: Phil Sutter - Acked-by: Pablo Neira Ayuso ---- - src/cache.c | 4 +++- - src/rule.c | 3 ++- - tests/shell/testcases/sets/0024named_objects_0 | 12 +++++++++++- - 3 files changed, 16 insertions(+), 3 deletions(-) - -diff --git a/src/cache.c b/src/cache.c -index 0c28a28..05f0d68 100644 ---- a/src/cache.c -+++ b/src/cache.c -@@ -138,8 +138,10 @@ unsigned int cache_evaluate(struct nft_ctx *nft, struct list_head *cmds) - case CMD_GET: - flags = evaluate_cache_get(cmd, flags); - break; -- case CMD_LIST: - case CMD_RESET: -+ flags |= NFT_CACHE_TABLE; -+ break; -+ case CMD_LIST: - case CMD_EXPORT: - case CMD_MONITOR: - flags |= NFT_CACHE_FULL; -diff --git a/src/rule.c b/src/rule.c -index d985d3a..3ca1805 100644 ---- a/src/rule.c -+++ b/src/rule.c -@@ -2554,7 +2554,8 @@ static int do_command_reset(struct netlink_ctx *ctx, struct cmd *cmd) - ret = netlink_reset_objs(ctx, cmd, type, dump); - list_for_each_entry_safe(obj, next, &ctx->list, list) { - table = table_lookup(&obj->handle, &ctx->nft->cache); -- list_move(&obj->list, &table->objs); -+ if (!obj_lookup(table, obj->handle.obj.name, obj->type)) -+ list_move(&obj->list, &table->objs); - } - if (ret < 0) - return ret; -diff --git a/tests/shell/testcases/sets/0024named_objects_0 b/tests/shell/testcases/sets/0024named_objects_0 -index 3bd16f2..21200c3 100755 ---- a/tests/shell/testcases/sets/0024named_objects_0 -+++ b/tests/shell/testcases/sets/0024named_objects_0 -@@ -35,4 +35,14 @@ table inet x { - set -e - $NFT -f - <<< "$RULESET" - --$NFT reset counter inet x user321 -+EXPECTED="table inet x { -+ counter user321 { -+ packets 12 bytes 1433 -+ } -+}" -+ -+GET="$($NFT reset counter inet x user321)" -+if [ "$EXPECTED" != "$GET" ] ; then -+ $DIFF -u <(echo "$EXPECTED") <(echo "$GET") -+ exit 1 -+fi --- -2.31.1 - diff --git a/SOURCES/0007-tests-py-Add-a-test-for-failing-ipsec-after-counter.patch b/SOURCES/0007-tests-py-Add-a-test-for-failing-ipsec-after-counter.patch new file mode 100644 index 0000000..91bb404 --- /dev/null +++ b/SOURCES/0007-tests-py-Add-a-test-for-failing-ipsec-after-counter.patch @@ -0,0 +1,86 @@ +From 23e6c3545b6c416a0eb7d3c7ac97c74215dcc19c Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 10:18:10 +0100 +Subject: [PATCH] tests/py: Add a test for failing ipsec after counter + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit ed2426bccd3ea + +commit ed2426bccd3ea954adc8a010bf1736e8ed6a81b9 +Author: Phil Sutter +Date: Thu Jun 23 16:28:42 2022 +0200 + + tests/py: Add a test for failing ipsec after counter + + This is a bug in parser/scanner due to scoping: + + | Error: syntax error, unexpected string, expecting saddr or daddr + | add rule ip ipsec-ip4 ipsec-forw counter ipsec out ip daddr 192.168.1.2 + | ^^^^^ + + Signed-off-by: Phil Sutter + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + tests/py/inet/ipsec.t | 2 ++ + tests/py/inet/ipsec.t.json | 21 +++++++++++++++++++++ + tests/py/inet/ipsec.t.payload | 6 ++++++ + 3 files changed, 29 insertions(+) + +diff --git a/tests/py/inet/ipsec.t b/tests/py/inet/ipsec.t +index e924e9b..b18df39 100644 +--- a/tests/py/inet/ipsec.t ++++ b/tests/py/inet/ipsec.t +@@ -19,3 +19,5 @@ ipsec in ip6 daddr dead::beef;ok + ipsec out ip6 saddr dead::feed;ok + + ipsec in spnum 256 reqid 1;fail ++ ++counter ipsec out ip daddr 192.168.1.2;ok +diff --git a/tests/py/inet/ipsec.t.json b/tests/py/inet/ipsec.t.json +index d7d3a03..18a64f3 100644 +--- a/tests/py/inet/ipsec.t.json ++++ b/tests/py/inet/ipsec.t.json +@@ -134,3 +134,24 @@ + } + } + ] ++ ++# counter ipsec out ip daddr 192.168.1.2 ++[ ++ { ++ "counter": null ++ }, ++ { ++ "match": { ++ "left": { ++ "ipsec": { ++ "dir": "out", ++ "family": "ip", ++ "key": "daddr", ++ "spnum": 0 ++ } ++ }, ++ "op": "==", ++ "right": "192.168.1.2" ++ } ++ } ++] +diff --git a/tests/py/inet/ipsec.t.payload b/tests/py/inet/ipsec.t.payload +index c46a226..9648255 100644 +--- a/tests/py/inet/ipsec.t.payload ++++ b/tests/py/inet/ipsec.t.payload +@@ -37,3 +37,9 @@ ip ipsec-ip4 ipsec-forw + [ xfrm load out 0 saddr6 => reg 1 ] + [ cmp eq reg 1 0x0000adde 0x00000000 0x00000000 0xedfe0000 ] + ++# counter ipsec out ip daddr 192.168.1.2 ++ip ipsec-ip4 ipsec-forw ++ [ counter pkts 0 bytes 0 ] ++ [ xfrm load out 0 daddr4 => reg 1 ] ++ [ cmp eq reg 1 0x0201a8c0 ] ++ +-- +2.41.0.rc1 + diff --git a/SOURCES/0008-netlink-Fix-leak-in-unterminated-string-deserializer.patch b/SOURCES/0008-netlink-Fix-leak-in-unterminated-string-deserializer.patch deleted file mode 100644 index 414c39f..0000000 --- a/SOURCES/0008-netlink-Fix-leak-in-unterminated-string-deserializer.patch +++ /dev/null @@ -1,51 +0,0 @@ -From cc70f19e588a0a33ed86c4a059b56a8f5b0c7a82 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 27 Jan 2020 16:11:41 +0100 -Subject: [PATCH] netlink: Fix leak in unterminated string deserializer - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1793030 -Upstream Status: nftables commit c3f6be3f2dced - -commit c3f6be3f2dcedf6d79751c0b975315ebc3184364 -Author: Phil Sutter -Date: Mon Jan 20 13:52:10 2020 +0100 - - netlink: Fix leak in unterminated string deserializer - - Allocated 'mask' expression is not freed before returning to caller, - although it is used temporarily only. - - Fixes: b851ba4731d9f ("src: add interface wildcard matching") - Signed-off-by: Phil Sutter - Acked-by: Pablo Neira Ayuso ---- - src/netlink_delinearize.c | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/src/netlink_delinearize.c b/src/netlink_delinearize.c -index 154353b..06a0312 100644 ---- a/src/netlink_delinearize.c -+++ b/src/netlink_delinearize.c -@@ -2030,7 +2030,7 @@ static bool __expr_postprocess_string(struct expr **exprp) - - static struct expr *expr_postprocess_string(struct expr *expr) - { -- struct expr *mask; -+ struct expr *mask, *out; - - assert(expr_basetype(expr)->type == TYPE_STRING); - if (__expr_postprocess_string(&expr)) -@@ -2040,7 +2040,9 @@ static struct expr *expr_postprocess_string(struct expr *expr) - BYTEORDER_HOST_ENDIAN, - expr->len + BITS_PER_BYTE, NULL); - mpz_init_bitmask(mask->value, expr->len); -- return string_wildcard_expr_alloc(&expr->location, mask, expr); -+ out = string_wildcard_expr_alloc(&expr->location, mask, expr); -+ expr_free(mask); -+ return out; - } - - static void expr_postprocess(struct rule_pp_ctx *ctx, struct expr **exprp) --- -2.31.1 - diff --git a/SOURCES/0008-parser-add-missing-synproxy-scope-closure.patch b/SOURCES/0008-parser-add-missing-synproxy-scope-closure.patch new file mode 100644 index 0000000..fe6996c --- /dev/null +++ b/SOURCES/0008-parser-add-missing-synproxy-scope-closure.patch @@ -0,0 +1,38 @@ +From d0d4d54136f10c23e279da40aae188b8fdc09293 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 10:18:10 +0100 +Subject: [PATCH] parser: add missing synproxy scope closure + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 994bf5004b365 + +commit 994bf5004b365904029f0fe8c2de587178583712 +Author: Florian Westphal +Date: Thu Jun 23 18:28:14 2022 +0200 + + parser: add missing synproxy scope closure + + Fixes: 232f2c3287fc ("scanner: synproxy: Move to own scope") + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + src/parser_bison.y | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/parser_bison.y b/src/parser_bison.y +index ca5c488..b548d5b 100644 +--- a/src/parser_bison.y ++++ b/src/parser_bison.y +@@ -2016,7 +2016,7 @@ map_block_obj_type : COUNTER close_scope_counter { $$ = NFT_OBJECT_COUNTER; } + | QUOTA close_scope_quota { $$ = NFT_OBJECT_QUOTA; } + | LIMIT close_scope_limit { $$ = NFT_OBJECT_LIMIT; } + | SECMARK close_scope_secmark { $$ = NFT_OBJECT_SECMARK; } +- | SYNPROXY { $$ = NFT_OBJECT_SYNPROXY; } ++ | SYNPROXY close_scope_synproxy { $$ = NFT_OBJECT_SYNPROXY; } + ; + + map_block : /* empty */ { $$ = $-1; } +-- +2.41.0.rc1 + diff --git a/SOURCES/0009-netlink-Fix-leaks-in-netlink_parse_cmp.patch b/SOURCES/0009-netlink-Fix-leaks-in-netlink_parse_cmp.patch deleted file mode 100644 index 9043fb1..0000000 --- a/SOURCES/0009-netlink-Fix-leaks-in-netlink_parse_cmp.patch +++ /dev/null @@ -1,75 +0,0 @@ -From 6ecccc872b9cbed921af10e32d1a628eb6a74c01 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 27 Jan 2020 16:11:41 +0100 -Subject: [PATCH] netlink: Fix leaks in netlink_parse_cmp() - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1793030 -Upstream Status: nftables commit e957bd9f10d5e - -commit e957bd9f10d5e36671a0b0398e2037fc6201275b -Author: Phil Sutter -Date: Mon Jan 20 14:48:26 2020 +0100 - - netlink: Fix leaks in netlink_parse_cmp() - - This fixes several problems at once: - - * Err path would leak expr 'right' in two places and 'left' in one. - * Concat case would leak 'right' by overwriting the pointer. Introduce a - temporary variable to hold the new pointer. - - Fixes: 6377380bc265f ("netlink_delinearize: handle relational and lookup concat expressions") - Signed-off-by: Phil Sutter - Acked-by: Pablo Neira Ayuso ---- - src/netlink_delinearize.c | 19 +++++++++++++------ - 1 file changed, 13 insertions(+), 6 deletions(-) - -diff --git a/src/netlink_delinearize.c b/src/netlink_delinearize.c -index 06a0312..88dbd5a 100644 ---- a/src/netlink_delinearize.c -+++ b/src/netlink_delinearize.c -@@ -274,7 +274,7 @@ static void netlink_parse_cmp(struct netlink_parse_ctx *ctx, - { - struct nft_data_delinearize nld; - enum nft_registers sreg; -- struct expr *expr, *left, *right; -+ struct expr *expr, *left, *right, *tmp; - enum ops op; - - sreg = netlink_parse_register(nle, NFTNL_EXPR_CMP_SREG); -@@ -291,19 +291,26 @@ static void netlink_parse_cmp(struct netlink_parse_ctx *ctx, - - if (left->len > right->len && - expr_basetype(left) != &string_type) { -- return netlink_error(ctx, loc, "Relational expression size mismatch"); -+ netlink_error(ctx, loc, "Relational expression size mismatch"); -+ goto err_free; - } else if (left->len > 0 && left->len < right->len) { - expr_free(left); - left = netlink_parse_concat_expr(ctx, loc, sreg, right->len); - if (left == NULL) -- return; -- right = netlink_parse_concat_data(ctx, loc, sreg, right->len, right); -- if (right == NULL) -- return; -+ goto err_free; -+ tmp = netlink_parse_concat_data(ctx, loc, sreg, right->len, right); -+ if (tmp == NULL) -+ goto err_free; -+ expr_free(right); -+ right = tmp; - } - - expr = relational_expr_alloc(loc, op, left, right); - ctx->stmt = expr_stmt_alloc(loc, expr); -+ return; -+err_free: -+ expr_free(left); -+ expr_free(right); - } - - static void netlink_parse_lookup(struct netlink_parse_ctx *ctx, --- -2.31.1 - diff --git a/SOURCES/0009-scanner-don-t-pop-active-flex-scanner-scope.patch b/SOURCES/0009-scanner-don-t-pop-active-flex-scanner-scope.patch new file mode 100644 index 0000000..f785f7b --- /dev/null +++ b/SOURCES/0009-scanner-don-t-pop-active-flex-scanner-scope.patch @@ -0,0 +1,144 @@ +From 80b1505ca2ef8432375dc524cc6763e7ef795b1a Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 10:18:10 +0100 +Subject: [PATCH] scanner: don't pop active flex scanner scope + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 8623772af0610 + +commit 8623772af06103ed4ccca3d07e55afbf3d952d6d +Author: Florian Westphal +Date: Thu Jun 23 19:56:19 2022 +0200 + + scanner: don't pop active flex scanner scope + + Currently we can pop a flex scope that is still active, i.e. the + scanner_pop_start_cond() for the scope has not been done. + + Example: + counter ipsec out ip daddr 192.168.1.2 counter name "ipsec_out" + + Here, parser fails because 'daddr' is parsed as STRING, not as DADDR token. + + Bug is as follows: + COUNTER changes scope to COUNTER. (COUNTER). + Next, IPSEC scope gets pushed, stack is: COUNTER, IPSEC. + + Then, the 'COUNTER' scope close happens. Because active scope has changed, + we cannot pop (we would pop the 'ipsec' scope in flex). + The pop operation gets delayed accordingly. + + Next, IP gets pushed, stack is: COUNTER, IPSEC, IP, plus the information + that one scope closure/pop was delayed. + + Then, the IP scope is closed. Because a pop operation was delayed, we pop again, + which brings us back to COUNTER state. + + This is bogus: The pop operation CANNOT be done yet, because the ipsec scope + is still open, but the existing code lacks the information to detect this. + + After popping the IP scope, we must remain in IPSEC scope until bison + parser calls scanner_pop_start_cond(, IPSEC). + + This adds a counter per flex scope so that we can detect this case. + In above case, after the IP scope gets closed, the "new" (previous) + scope (IPSEC) will be treated as active and its close is attempted again + on the next call to scanner_pop_start_cond(). + + After this patch, transition in above rule is: + + push counter (COUNTER) + push IPSEC (COUNTER, IPSEC) + pop COUNTER (delayed: COUNTER, IPSEC, pending-pop for COUNTER), + push IP (COUNTER, IPSEC, IP, pending-pop for COUNTER) + pop IP (COUNTER, IPSEC, pending-pop for COUNTER) + parse DADDR (we're in IPSEC scope, its valid token) + pop IPSEC (pops all remaining scopes). + + We could also resurrect the commit: + "scanner: flags: move to own scope", the test case passes with the + new scope closure logic. + + Fixes: bff106c5b277 ("scanner: add support for scope nesting") + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + include/parser.h | 3 +++ + src/scanner.l | 11 +++++++++++ + 2 files changed, 14 insertions(+) + +diff --git a/include/parser.h b/include/parser.h +index f32154c..5e5ad28 100644 +--- a/include/parser.h ++++ b/include/parser.h +@@ -26,6 +26,7 @@ struct parser_state { + unsigned int flex_state_pop; + unsigned int startcond_type; + struct list_head *cmds; ++ unsigned int *startcond_active; + }; + + enum startcond_type { +@@ -82,6 +83,8 @@ enum startcond_type { + PARSER_SC_STMT_REJECT, + PARSER_SC_STMT_SYNPROXY, + PARSER_SC_STMT_TPROXY, ++ ++ __SC_MAX + }; + + struct mnl_socket; +diff --git a/src/scanner.l b/src/scanner.l +index 2154281..ed7256b 100644 +--- a/src/scanner.l ++++ b/src/scanner.l +@@ -1148,6 +1148,8 @@ void *scanner_init(struct parser_state *state) + yylex_init_extra(state, &scanner); + yyset_out(NULL, scanner); + ++ state->startcond_active = xzalloc_array(__SC_MAX, ++ sizeof(*state->startcond_active)); + return scanner; + } + +@@ -1177,6 +1179,8 @@ void scanner_destroy(struct nft_ctx *nft) + struct parser_state *state = yyget_extra(nft->scanner); + + input_descriptor_list_destroy(state); ++ xfree(state->startcond_active); ++ + yylex_destroy(nft->scanner); + } + +@@ -1185,6 +1189,7 @@ static void scanner_push_start_cond(void *scanner, enum startcond_type type) + struct parser_state *state = yyget_extra(scanner); + + state->startcond_type = type; ++ state->startcond_active[type]++; + + yy_push_state((int)type, scanner); + } +@@ -1193,6 +1198,8 @@ void scanner_pop_start_cond(void *scanner, enum startcond_type t) + { + struct parser_state *state = yyget_extra(scanner); + ++ state->startcond_active[t]--; ++ + if (state->startcond_type != t) { + state->flex_state_pop++; + return; /* Can't pop just yet! */ +@@ -1202,6 +1209,10 @@ void scanner_pop_start_cond(void *scanner, enum startcond_type t) + state->flex_state_pop--; + state->startcond_type = yy_top_state(scanner); + yy_pop_state(scanner); ++ ++ t = state->startcond_type; ++ if (state->startcond_active[t]) ++ return; + } + + state->startcond_type = yy_top_state(scanner); +-- +2.41.0.rc1 + diff --git a/SOURCES/0010-intervals-fix-crash-when-trying-to-remove-element-in.patch b/SOURCES/0010-intervals-fix-crash-when-trying-to-remove-element-in.patch new file mode 100644 index 0000000..5ac725f --- /dev/null +++ b/SOURCES/0010-intervals-fix-crash-when-trying-to-remove-element-in.patch @@ -0,0 +1,67 @@ +From babfd73139d19750a7b1f94fdc1b5405f5affe61 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 10:25:59 +0100 +Subject: [PATCH] intervals: fix crash when trying to remove element in empty + set + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 5357cb7b5cb93 + +commit 5357cb7b5cb93fc9b20d4d95b093d6b9f86b7727 +Author: Pablo Neira Ayuso +Date: Thu Jun 23 14:20:17 2022 +0200 + + intervals: fix crash when trying to remove element in empty set + + The set deletion routine expects an initialized set, otherwise it crashes. + + Fixes: 3e8d934e4f72 ("intervals: support to partial deletion with automerge") + Signed-off-by: Pablo Neira Ayuso + +Signed-off-by: Phil Sutter +--- + src/intervals.c | 6 +++++- + tests/shell/testcases/sets/errors_0 | 14 ++++++++++++++ + 2 files changed, 19 insertions(+), 1 deletion(-) + create mode 100755 tests/shell/testcases/sets/errors_0 + +diff --git a/src/intervals.c b/src/intervals.c +index dcc06d1..c21b3ee 100644 +--- a/src/intervals.c ++++ b/src/intervals.c +@@ -475,7 +475,11 @@ int set_delete(struct list_head *msgs, struct cmd *cmd, struct set *set, + if (set->automerge) + automerge_delete(msgs, set, init, debug_mask); + +- set_to_range(existing_set->init); ++ if (existing_set->init) { ++ set_to_range(existing_set->init); ++ } else { ++ existing_set->init = set_expr_alloc(&internal_location, set); ++ } + + list_splice_init(&init->expressions, &del_list); + +diff --git a/tests/shell/testcases/sets/errors_0 b/tests/shell/testcases/sets/errors_0 +new file mode 100755 +index 0000000..2960b69 +--- /dev/null ++++ b/tests/shell/testcases/sets/errors_0 +@@ -0,0 +1,14 @@ ++#!/bin/bash ++ ++set -e ++ ++RULESET="table ip x { ++ set y { ++ type ipv4_addr ++ flags interval ++ } ++} ++ ++delete element ip x y { 2.3.4.5 }" ++ ++$NFT -f - <<< $RULESET || exit 0 +-- +2.41.0.rc1 + diff --git a/SOURCES/0010-netlink-Avoid-potential-NULL-pointer-deref-in-netlin.patch b/SOURCES/0010-netlink-Avoid-potential-NULL-pointer-deref-in-netlin.patch deleted file mode 100644 index b772afc..0000000 --- a/SOURCES/0010-netlink-Avoid-potential-NULL-pointer-deref-in-netlin.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 55c537734f476d04c18f67083642b96bbead6219 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 27 Jan 2020 16:11:41 +0100 -Subject: [PATCH] netlink: Avoid potential NULL-pointer deref in - netlink_gen_payload_stmt() - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1793030 -Upstream Status: nftables commit c9ddf0bff363f - -commit c9ddf0bff363fc9101b563b592db600bdf4d65c5 -Author: Phil Sutter -Date: Mon Jan 20 16:32:40 2020 +0100 - - netlink: Avoid potential NULL-pointer deref in netlink_gen_payload_stmt() - - With payload_needs_l4csum_update_pseudohdr() unconditionally - dereferencing passed 'desc' parameter and a previous check for it to be - non-NULL, make sure to call the function only if input is sane. - - Fixes: 68de70f2b3fc6 ("netlink_linearize: fix IPv6 layer 4 checksum mangling") - Signed-off-by: Phil Sutter - Acked-by: Pablo Neira Ayuso ---- - src/netlink_linearize.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/netlink_linearize.c b/src/netlink_linearize.c -index 498326d..cb1b7fe 100644 ---- a/src/netlink_linearize.c -+++ b/src/netlink_linearize.c -@@ -941,7 +941,7 @@ static void netlink_gen_payload_stmt(struct netlink_linearize_ctx *ctx, - nftnl_expr_set_u32(nle, NFTNL_EXPR_PAYLOAD_CSUM_OFFSET, - csum_off / BITS_PER_BYTE); - } -- if (expr->payload.base == PROTO_BASE_NETWORK_HDR && -+ if (expr->payload.base == PROTO_BASE_NETWORK_HDR && desc && - payload_needs_l4csum_update_pseudohdr(expr, desc)) - nftnl_expr_set_u32(nle, NFTNL_EXPR_PAYLOAD_FLAGS, - NFT_PAYLOAD_L4CSUM_PSEUDOHDR); --- -2.31.1 - diff --git a/SOURCES/0011-intervals-check-for-EXPR_F_REMOVE-in-case-of-element.patch b/SOURCES/0011-intervals-check-for-EXPR_F_REMOVE-in-case-of-element.patch new file mode 100644 index 0000000..fd1cbc3 --- /dev/null +++ b/SOURCES/0011-intervals-check-for-EXPR_F_REMOVE-in-case-of-element.patch @@ -0,0 +1,80 @@ +From 3ea1e90779e232776e72548e9a768df1771e0f2c Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 10:25:59 +0100 +Subject: [PATCH] intervals: check for EXPR_F_REMOVE in case of element + mismatch + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 6d1ee9267e7e5 + +commit 6d1ee9267e7e5e429a84d7bb8a8644f9eebddb22 +Author: Pablo Neira Ayuso +Date: Thu Jun 23 18:41:21 2022 +0200 + + intervals: check for EXPR_F_REMOVE in case of element mismatch + + If auto-merge is disable and element to be deleted finds no exact + matching, then bail out. + + Fixes: 3e8d934e4f72 ("intervals: support to partial deletion with automerge") + Signed-off-by: Pablo Neira Ayuso + +Signed-off-by: Phil Sutter +--- + src/intervals.c | 4 ++++ + tests/shell/testcases/sets/errors_0 | 20 ++++++++++++++++++-- + 2 files changed, 22 insertions(+), 2 deletions(-) + +diff --git a/src/intervals.c b/src/intervals.c +index c21b3ee..13009ca 100644 +--- a/src/intervals.c ++++ b/src/intervals.c +@@ -421,6 +421,10 @@ static int setelem_delete(struct list_head *msgs, struct set *set, + expr_error(msgs, i, "element does not exist"); + err = -1; + goto err; ++ } else if (i->flags & EXPR_F_REMOVE) { ++ expr_error(msgs, i, "element does not exist"); ++ err = -1; ++ goto err; + } + prev = NULL; + } +diff --git a/tests/shell/testcases/sets/errors_0 b/tests/shell/testcases/sets/errors_0 +index 2960b69..a676ac7 100755 +--- a/tests/shell/testcases/sets/errors_0 ++++ b/tests/shell/testcases/sets/errors_0 +@@ -1,7 +1,5 @@ + #!/bin/bash + +-set -e +- + RULESET="table ip x { + set y { + type ipv4_addr +@@ -11,4 +9,22 @@ RULESET="table ip x { + + delete element ip x y { 2.3.4.5 }" + ++$NFT -f - <<< $RULESET ++if [ $? -eq 0 ] ++then ++ exit 1 ++fi ++ ++RULESET="table ip x { ++ set y { ++ type ipv4_addr ++ flags interval ++ } ++} ++ ++add element x y { 1.1.1.1/24 } ++delete element x y { 1.1.1.1/24 } ++add element x y { 1.1.1.1/24 } ++delete element x y { 2.2.2.2/24 }" ++ + $NFT -f - <<< $RULESET || exit 0 +-- +2.41.0.rc1 + diff --git a/SOURCES/0011-tests-json_echo-Fix-for-Python3.patch b/SOURCES/0011-tests-json_echo-Fix-for-Python3.patch deleted file mode 100644 index be98168..0000000 --- a/SOURCES/0011-tests-json_echo-Fix-for-Python3.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 04d0d2e685063d422ce73b67eb01d4803100d379 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 12 Feb 2020 22:35:27 +0100 -Subject: [PATCH] tests: json_echo: Fix for Python3 - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1754047 -Upstream Status: nftables commit 582f142b1578b - -commit 582f142b1578b6036707242bfe874bcefc002ac2 -Author: Phil Sutter -Date: Thu Feb 6 01:21:30 2020 +0100 - - tests: json_echo: Fix for Python3 - - The keys() method returns an object which does not support indexing, so - convert it to a list prior to doing so. - - Fixes: a35e3a0cdc63a ("tests: json_echo: convert to py3") - Signed-off-by: Phil Sutter ---- - tests/json_echo/run-test.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tests/json_echo/run-test.py b/tests/json_echo/run-test.py -index a636d5f..fa7d69a 100755 ---- a/tests/json_echo/run-test.py -+++ b/tests/json_echo/run-test.py -@@ -119,7 +119,7 @@ def get_handle(output, search): - else: - data = item - -- k = search.keys()[0] -+ k = list(search.keys())[0] - - if not k in data: - continue --- -2.31.1 - diff --git a/SOURCES/0012-netlink_delinearize-allow-postprocessing-on-concaten.patch b/SOURCES/0012-netlink_delinearize-allow-postprocessing-on-concaten.patch new file mode 100644 index 0000000..f0e3f60 --- /dev/null +++ b/SOURCES/0012-netlink_delinearize-allow-postprocessing-on-concaten.patch @@ -0,0 +1,76 @@ +From 477a5632894a8bf6cba1f6e69a3f7d58d220820b Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 10:27:57 +0100 +Subject: [PATCH] netlink_delinearize: allow postprocessing on concatenated + elements + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 0542a431e8dcc + +commit 0542a431e8dccfa86fa5b1744f536e61a0b204f3 +Author: Florian Westphal +Date: Tue Jun 14 21:57:58 2022 +0200 + + netlink_delinearize: allow postprocessing on concatenated elements + + Currently there is no case where the individual expressions inside a + mapped concatenation need to be munged. + + However, to support proper delinearization for an input like + 'rule netdev nt nc set update ether saddr . vlan id timeout 5s @macset' + + we need to allow this. + + Right now, this gets listed as: + + update @macset { @ll,48,48 . @ll,112,16 & 0xfff timeout 5s } + + because the ethernet protocol is replaced by vlan beforehand, + so we fail to map @ll,48,48 to a vlan protocol. + + Likewise, we can't map the vlan info either because we cannot + cope with the 'and' operation properly, nor is it removed. + + Prepare for this by deleting and re-adding so that we do not + corrupt the linked list. + + After this, the list can be safely changed and a followup patch + can start to delete/reallocate expressions. + + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + src/netlink_delinearize.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/src/netlink_delinearize.c b/src/netlink_delinearize.c +index 068c3bb..2f13990 100644 +--- a/src/netlink_delinearize.c ++++ b/src/netlink_delinearize.c +@@ -2538,16 +2538,21 @@ static void expr_postprocess(struct rule_pp_ctx *ctx, struct expr **exprp) + unsigned int type = expr->dtype->type, ntype = 0; + int off = expr->dtype->subtypes; + const struct datatype *dtype; ++ LIST_HEAD(tmp); ++ struct expr *n; + +- list_for_each_entry(i, &expr->expressions, list) { ++ list_for_each_entry_safe(i, n, &expr->expressions, list) { + if (type) { + dtype = concat_subtype_lookup(type, --off); + expr_set_type(i, dtype, dtype->byteorder); + } ++ list_del(&i->list); + expr_postprocess(ctx, &i); ++ list_add_tail(&i->list, &tmp); + + ntype = concat_subtype_add(ntype, i->dtype->type); + } ++ list_splice(&tmp, &expr->expressions); + datatype_set(expr, concat_type_alloc(ntype)); + break; + } +-- +2.41.0.rc1 + diff --git a/SOURCES/0012-tests-json_echo-Support-testing-host-binaries.patch b/SOURCES/0012-tests-json_echo-Support-testing-host-binaries.patch deleted file mode 100644 index 88cfa7f..0000000 --- a/SOURCES/0012-tests-json_echo-Support-testing-host-binaries.patch +++ /dev/null @@ -1,68 +0,0 @@ -From 0eb301a3f50fb70cb78d955692f3feea1ad8095e Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 12 Feb 2020 22:35:27 +0100 -Subject: [PATCH] tests: json_echo: Support testing host binaries - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1754047 -Upstream Status: nftables commit 106b1f2b93f82 - -commit 106b1f2b93f82784c18dd5e312bbf88e6c02a5b8 -Author: Phil Sutter -Date: Fri Jan 10 11:19:42 2020 +0100 - - tests: json_echo: Support testing host binaries - - Support -H/--host option to use host's libnftables.so.1. Alternatively - users may specify a custom library path via -l/--library option. - - Signed-off-by: Phil Sutter ---- - tests/json_echo/run-test.py | 23 +++++++++++++++++++---- - 1 file changed, 19 insertions(+), 4 deletions(-) - -diff --git a/tests/json_echo/run-test.py b/tests/json_echo/run-test.py -index fa7d69a..36a377a 100755 ---- a/tests/json_echo/run-test.py -+++ b/tests/json_echo/run-test.py -@@ -4,6 +4,7 @@ from __future__ import print_function - import sys - import os - import json -+import argparse - - TESTS_PATH = os.path.dirname(os.path.abspath(__file__)) - sys.path.insert(0, os.path.join(TESTS_PATH, '../../py/')) -@@ -13,12 +14,26 @@ from nftables import Nftables - # Change working directory to repository root - os.chdir(TESTS_PATH + "/../..") - --if not os.path.exists('src/.libs/libnftables.so'): -- print("The nftables library does not exist. " -- "You need to build the project.") -+parser = argparse.ArgumentParser(description='Run JSON echo tests') -+parser.add_argument('-H', '--host', action='store_true', -+ help='Run tests against installed libnftables.so.1') -+parser.add_argument('-l', '--library', default=None, -+ help='Path to libntables.so, overrides --host') -+args = parser.parse_args() -+ -+check_lib_path = True -+if args.library is None: -+ if args.host: -+ args.library = 'libnftables.so.1' -+ check_lib_path = False -+ else: -+ args.library = 'src/.libs/libnftables.so.1' -+ -+if check_lib_path and not os.path.exists(args.library): -+ print("Library not found at '%s'." % args.library) - sys.exit(1) - --nftables = Nftables(sofile = 'src/.libs/libnftables.so') -+nftables = Nftables(sofile = args.library) - nftables.set_echo_output(True) - - # various commands to work with --- -2.31.1 - diff --git a/SOURCES/0013-netlink_delinearize-postprocess-binary-ands-in-conca.patch b/SOURCES/0013-netlink_delinearize-postprocess-binary-ands-in-conca.patch new file mode 100644 index 0000000..7ef74f0 --- /dev/null +++ b/SOURCES/0013-netlink_delinearize-postprocess-binary-ands-in-conca.patch @@ -0,0 +1,159 @@ +From 120ec5410b0c9f8f84f2bfdf092228cc61899785 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 10:27:57 +0100 +Subject: [PATCH] netlink_delinearize: postprocess binary ands in + concatenations + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 89688c947efc3 + +commit 89688c947efc36d25c58c85650414fa3a491732e +Author: Florian Westphal +Date: Tue Jun 14 21:56:48 2022 +0200 + + netlink_delinearize: postprocess binary ands in concatenations + + Input: + update ether saddr . vlan id timeout 5s @macset + ether saddr . vlan id @macset + + Before this patch, gets rendered as: + update @macset { @ll,48,48 . @ll,112,16 & 0xfff timeout 5s } + @ll,48,48 . @ll,112,16 & 0xfff @macset + + After this, listing will show: + update @macset { @ll,48,48 . vlan id timeout 5s } + @ll,48,48 . vlan id @macset + + The @ll, ... is due to vlan description replacing the ethernet one, + so payload decode fails to take the concatenation apart (the ethernet + header payload info is matched vs. vlan template). + + This will be adjusted by a followup patch. + + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + include/netlink.h | 6 ++++++ + src/netlink_delinearize.c | 45 ++++++++++++++++++++++++++++++++++----- + 2 files changed, 46 insertions(+), 5 deletions(-) + +diff --git a/include/netlink.h b/include/netlink.h +index e8e0f68..71c888f 100644 +--- a/include/netlink.h ++++ b/include/netlink.h +@@ -42,10 +42,16 @@ struct netlink_parse_ctx { + struct netlink_ctx *nlctx; + }; + ++ ++#define RULE_PP_IN_CONCATENATION (1 << 0) ++ ++#define RULE_PP_REMOVE_OP_AND (RULE_PP_IN_CONCATENATION) ++ + struct rule_pp_ctx { + struct proto_ctx pctx; + struct payload_dep_ctx pdctx; + struct stmt *stmt; ++ unsigned int flags; + }; + + extern const struct input_descriptor indesc_netlink; +diff --git a/src/netlink_delinearize.c b/src/netlink_delinearize.c +index 2f13990..cba419d 100644 +--- a/src/netlink_delinearize.c ++++ b/src/netlink_delinearize.c +@@ -2259,12 +2259,13 @@ static void binop_adjust(const struct expr *binop, struct expr *right, + } + } + +-static void binop_postprocess(struct rule_pp_ctx *ctx, struct expr *expr, +- struct expr **expr_binop) ++static void __binop_postprocess(struct rule_pp_ctx *ctx, ++ struct expr *expr, ++ struct expr *left, ++ struct expr *mask, ++ struct expr **expr_binop) + { + struct expr *binop = *expr_binop; +- struct expr *left = binop->left; +- struct expr *mask = binop->right; + unsigned int shift; + + assert(binop->etype == EXPR_BINOP); +@@ -2300,15 +2301,26 @@ static void binop_postprocess(struct rule_pp_ctx *ctx, struct expr *expr, + + assert(binop->left == left); + *expr_binop = expr_get(left); +- expr_free(binop); + + if (left->etype == EXPR_PAYLOAD) + payload_match_postprocess(ctx, expr, left); + else if (left->etype == EXPR_EXTHDR && right) + expr_set_type(right, left->dtype, left->byteorder); ++ ++ expr_free(binop); + } + } + ++static void binop_postprocess(struct rule_pp_ctx *ctx, struct expr *expr, ++ struct expr **expr_binop) ++{ ++ struct expr *binop = *expr_binop; ++ struct expr *left = binop->left; ++ struct expr *mask = binop->right; ++ ++ __binop_postprocess(ctx, expr, left, mask, expr_binop); ++} ++ + static void map_binop_postprocess(struct rule_pp_ctx *ctx, struct expr *expr) + { + struct expr *binop = expr->map; +@@ -2541,6 +2553,7 @@ static void expr_postprocess(struct rule_pp_ctx *ctx, struct expr **exprp) + LIST_HEAD(tmp); + struct expr *n; + ++ ctx->flags |= RULE_PP_IN_CONCATENATION; + list_for_each_entry_safe(i, n, &expr->expressions, list) { + if (type) { + dtype = concat_subtype_lookup(type, --off); +@@ -2552,6 +2565,7 @@ static void expr_postprocess(struct rule_pp_ctx *ctx, struct expr **exprp) + + ntype = concat_subtype_add(ntype, i->dtype->type); + } ++ ctx->flags &= ~RULE_PP_IN_CONCATENATION; + list_splice(&tmp, &expr->expressions); + datatype_set(expr, concat_type_alloc(ntype)); + break; +@@ -2568,6 +2582,27 @@ static void expr_postprocess(struct rule_pp_ctx *ctx, struct expr **exprp) + expr_set_type(expr->right, &integer_type, + BYTEORDER_HOST_ENDIAN); + break; ++ case OP_AND: ++ expr_set_type(expr->right, expr->left->dtype, ++ expr->left->byteorder); ++ ++ /* Do not process OP_AND in ordinary rule context. ++ * ++ * Removal needs to be performed as part of the relational ++ * operation because the RHS constant might need to be adjusted ++ * (shifted). ++ * ++ * This is different in set element context or concatenations: ++ * There is no relational operation (eq, neq and so on), thus ++ * it needs to be processed right away. ++ */ ++ if ((ctx->flags & RULE_PP_REMOVE_OP_AND) && ++ expr->left->etype == EXPR_PAYLOAD && ++ expr->right->etype == EXPR_VALUE) { ++ __binop_postprocess(ctx, expr, expr->left, expr->right, exprp); ++ return; ++ } ++ break; + default: + expr_set_type(expr->right, expr->left->dtype, + expr->left->byteorder); +-- +2.41.0.rc1 + diff --git a/SOURCES/0013-tests-monitor-Support-running-individual-test-cases.patch b/SOURCES/0013-tests-monitor-Support-running-individual-test-cases.patch deleted file mode 100644 index deef550..0000000 --- a/SOURCES/0013-tests-monitor-Support-running-individual-test-cases.patch +++ /dev/null @@ -1,64 +0,0 @@ -From 67f168ebfbeb26a8d7e4f1b9284cc32f13ceff9b Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 12 Feb 2020 22:35:27 +0100 -Subject: [PATCH] tests: monitor: Support running individual test cases - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1754047 -Upstream Status: nftables commit eb5034108cdc6 - -commit eb5034108cdc60341b2d61599077db935b6bbc4f -Author: Phil Sutter -Date: Fri Jan 10 11:15:45 2020 +0100 - - tests: monitor: Support running individual test cases - - Recognize testcase paths on command line and limit testing on those - only. - - Signed-off-by: Phil Sutter - Acked-by: Pablo Neira Ayuso ---- - tests/monitor/run-tests.sh | 9 +++++++-- - 1 file changed, 7 insertions(+), 2 deletions(-) - -diff --git a/tests/monitor/run-tests.sh b/tests/monitor/run-tests.sh -index 0478cf6..efacdaa 100755 ---- a/tests/monitor/run-tests.sh -+++ b/tests/monitor/run-tests.sh -@@ -108,6 +108,7 @@ echo_run_test() { - touch $output_file - } - -+testcases="" - while [ -n "$1" ]; do - case "$1" in - -d|--debug) -@@ -118,11 +119,15 @@ while [ -n "$1" ]; do - test_json=true - shift - ;; -+ testcases/*.t) -+ testcases+=" $1" -+ shift -+ ;; - *) - echo "unknown option '$1'" - ;& - -h|--help) -- echo "Usage: $(basename $0) [-j|--json] [-d|--debug]" -+ echo "Usage: $(basename $0) [-j|--json] [-d|--debug] [testcase ...]" - exit 1 - ;; - esac -@@ -138,7 +143,7 @@ for variant in $variants; do - run_test=${variant}_run_test - output_append=${variant}_output_append - -- for testcase in testcases/*.t; do -+ for testcase in ${testcases:-testcases/*.t}; do - echo "$variant: running tests from file $(basename $testcase)" - # files are like this: - # --- -2.31.1 - diff --git a/SOURCES/0014-proto-track-full-stack-of-seen-l2-protocols-not-just.patch b/SOURCES/0014-proto-track-full-stack-of-seen-l2-protocols-not-just.patch new file mode 100644 index 0000000..8ce0ca7 --- /dev/null +++ b/SOURCES/0014-proto-track-full-stack-of-seen-l2-protocols-not-just.patch @@ -0,0 +1,287 @@ +From 5246e288a724e7b9641c94f228096dc1529bb2ea Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 10:27:57 +0100 +Subject: [PATCH] proto: track full stack of seen l2 protocols, not just + cumulative offset + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 0d9daa0407212 + +commit 0d9daa0407212c8cc89b3ea8aee031ddf0109b08 +Author: Florian Westphal +Date: Mon Jul 25 14:32:13 2022 +0200 + + proto: track full stack of seen l2 protocols, not just cumulative offset + + For input, a cumulative size counter of all pushed l2 headers is enough, + because we have the full expression tree available to us. + + For delinearization we need to track all seen l2 headers, else we lose + information that we might need at a later time. + + Consider: + + rule netdev nt nc set update ether saddr . vlan id + + during delinearization, the vlan proto_desc replaces the ethernet one, + and by the time we try to split the concatenation apart we will search + the ether saddr offset vs. the templates for proto_vlan. + + This replaces the offset with an array that stores the protocol + descriptions seen. + + Then, if the payload offset is larger than our description, search the + l2 stack and adjust the offset until we're within the expected offset + boundary. + + Reported-by: Eric Garver + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + include/proto.h | 3 +- + src/evaluate.c | 15 +++++++-- + src/netlink_delinearize.c | 5 --- + src/payload.c | 67 ++++++++++++++++++++++++++++++++------- + src/proto.c | 2 -- + 5 files changed, 71 insertions(+), 21 deletions(-) + +diff --git a/include/proto.h b/include/proto.h +index a04240a..35e760c 100644 +--- a/include/proto.h ++++ b/include/proto.h +@@ -193,13 +193,14 @@ struct proto_ctx { + struct { + struct location location; + const struct proto_desc *desc; +- unsigned int offset; + struct { + struct location location; + const struct proto_desc *desc; + } protos[PROTO_CTX_NUM_PROTOS]; + unsigned int num_protos; + } protocol[PROTO_BASE_MAX + 1]; ++ const struct proto_desc *stacked_ll[PROTO_CTX_NUM_PROTOS]; ++ uint8_t stacked_ll_count; + }; + + extern void proto_ctx_init(struct proto_ctx *ctx, unsigned int family, +diff --git a/src/evaluate.c b/src/evaluate.c +index 82bf131..9246064 100644 +--- a/src/evaluate.c ++++ b/src/evaluate.c +@@ -678,7 +678,13 @@ static int resolve_protocol_conflict(struct eval_ctx *ctx, + conflict_resolution_gen_dependency(ctx, link, payload, &nstmt) < 0) + return 1; + +- payload->payload.offset += ctx->pctx.protocol[base].offset; ++ if (base == PROTO_BASE_LL_HDR) { ++ unsigned int i; ++ ++ for (i = 0; i < ctx->pctx.stacked_ll_count; i++) ++ payload->payload.offset += ctx->pctx.stacked_ll[i]->length; ++ } ++ + rule_stmt_insert_at(ctx->rule, nstmt, ctx->stmt); + + return 0; +@@ -727,7 +733,12 @@ static int __expr_evaluate_payload(struct eval_ctx *ctx, struct expr *expr) + if (desc == payload->payload.desc) { + const struct proto_hdr_template *tmpl; + +- payload->payload.offset += ctx->pctx.protocol[base].offset; ++ if (desc->base == PROTO_BASE_LL_HDR) { ++ unsigned int i; ++ ++ for (i = 0; i < ctx->pctx.stacked_ll_count; i++) ++ payload->payload.offset += ctx->pctx.stacked_ll[i]->length; ++ } + check_icmp: + if (desc != &proto_icmp && desc != &proto_icmp6) + return 0; +diff --git a/src/netlink_delinearize.c b/src/netlink_delinearize.c +index cba419d..0b5519d 100644 +--- a/src/netlink_delinearize.c ++++ b/src/netlink_delinearize.c +@@ -1976,11 +1976,6 @@ static void payload_match_postprocess(struct rule_pp_ctx *ctx, + struct expr *expr, + struct expr *payload) + { +- enum proto_bases base = payload->payload.base; +- +- assert(payload->payload.offset >= ctx->pctx.protocol[base].offset); +- payload->payload.offset -= ctx->pctx.protocol[base].offset; +- + switch (expr->op) { + case OP_EQ: + case OP_NEQ: +diff --git a/src/payload.c b/src/payload.c +index 66418cd..2c0d0ac 100644 +--- a/src/payload.c ++++ b/src/payload.c +@@ -116,8 +116,13 @@ static void payload_expr_pctx_update(struct proto_ctx *ctx, + if (desc->base == base->base) { + assert(base->length > 0); + +- if (!left->payload.is_raw) +- ctx->protocol[base->base].offset += base->length; ++ if (!left->payload.is_raw) { ++ if (desc->base == PROTO_BASE_LL_HDR && ++ ctx->stacked_ll_count < PROTO_CTX_NUM_PROTOS) { ++ ctx->stacked_ll[ctx->stacked_ll_count] = base; ++ ctx->stacked_ll_count++; ++ } ++ } + } + proto_ctx_update(ctx, desc->base, loc, desc); + } +@@ -869,6 +874,38 @@ void exthdr_dependency_kill(struct payload_dep_ctx *ctx, struct expr *expr, + } + } + ++static const struct proto_desc *get_stacked_desc(const struct proto_ctx *ctx, ++ const struct proto_desc *top, ++ const struct expr *e, ++ unsigned int *skip) ++{ ++ unsigned int i, total, payload_offset = e->payload.offset; ++ ++ assert(e->etype == EXPR_PAYLOAD); ++ ++ if (e->payload.base != PROTO_BASE_LL_HDR || ++ payload_offset < top->length) { ++ *skip = 0; ++ return top; ++ } ++ ++ for (i = 0, total = 0; i < ctx->stacked_ll_count; i++) { ++ const struct proto_desc *stacked; ++ ++ stacked = ctx->stacked_ll[i]; ++ if (payload_offset < stacked->length) { ++ *skip = total; ++ return stacked; ++ } ++ ++ payload_offset -= stacked->length; ++ total += stacked->length; ++ } ++ ++ *skip = total; ++ return top; ++} ++ + /** + * payload_expr_complete - fill in type information of a raw payload expr + * +@@ -880,9 +917,10 @@ void exthdr_dependency_kill(struct payload_dep_ctx *ctx, struct expr *expr, + */ + void payload_expr_complete(struct expr *expr, const struct proto_ctx *ctx) + { ++ unsigned int payload_offset = expr->payload.offset; + const struct proto_desc *desc; + const struct proto_hdr_template *tmpl; +- unsigned int i; ++ unsigned int i, total; + + assert(expr->etype == EXPR_PAYLOAD); + +@@ -891,9 +929,12 @@ void payload_expr_complete(struct expr *expr, const struct proto_ctx *ctx) + return; + assert(desc->base == expr->payload.base); + ++ desc = get_stacked_desc(ctx, desc, expr, &total); ++ payload_offset -= total; ++ + for (i = 0; i < array_size(desc->templates); i++) { + tmpl = &desc->templates[i]; +- if (tmpl->offset != expr->payload.offset || ++ if (tmpl->offset != payload_offset || + tmpl->len != expr->len) + continue; + +@@ -950,6 +991,7 @@ bool payload_expr_trim(struct expr *expr, struct expr *mask, + unsigned int payload_len = expr->len; + const struct proto_desc *desc; + unsigned int off, i, len = 0; ++ unsigned int total; + + assert(expr->etype == EXPR_PAYLOAD); + +@@ -959,10 +1001,8 @@ bool payload_expr_trim(struct expr *expr, struct expr *mask, + + assert(desc->base == expr->payload.base); + +- if (ctx->protocol[expr->payload.base].offset) { +- assert(payload_offset >= ctx->protocol[expr->payload.base].offset); +- payload_offset -= ctx->protocol[expr->payload.base].offset; +- } ++ desc = get_stacked_desc(ctx, desc, expr, &total); ++ payload_offset -= total; + + off = round_up(mask->len, BITS_PER_BYTE) - mask_len; + payload_offset += off; +@@ -1009,10 +1049,11 @@ bool payload_expr_trim(struct expr *expr, struct expr *mask, + void payload_expr_expand(struct list_head *list, struct expr *expr, + const struct proto_ctx *ctx) + { ++ unsigned int payload_offset = expr->payload.offset; + const struct proto_hdr_template *tmpl; + const struct proto_desc *desc; ++ unsigned int i, total; + struct expr *new; +- unsigned int i; + + assert(expr->etype == EXPR_PAYLOAD); + +@@ -1021,13 +1062,16 @@ void payload_expr_expand(struct list_head *list, struct expr *expr, + goto raw; + assert(desc->base == expr->payload.base); + ++ desc = get_stacked_desc(ctx, desc, expr, &total); ++ payload_offset -= total; ++ + for (i = 1; i < array_size(desc->templates); i++) { + tmpl = &desc->templates[i]; + + if (tmpl->len == 0) + break; + +- if (tmpl->offset != expr->payload.offset) ++ if (tmpl->offset != payload_offset) + continue; + + if (tmpl->icmp_dep && ctx->th_dep.icmp.type && +@@ -1039,6 +1083,7 @@ void payload_expr_expand(struct list_head *list, struct expr *expr, + list_add_tail(&new->list, list); + expr->len -= tmpl->len; + expr->payload.offset += tmpl->len; ++ payload_offset += tmpl->len; + if (expr->len == 0) + return; + } else if (expr->len > 0) { +@@ -1051,7 +1096,7 @@ void payload_expr_expand(struct list_head *list, struct expr *expr, + } + raw: + new = payload_expr_alloc(&expr->location, NULL, 0); +- payload_init_raw(new, expr->payload.base, expr->payload.offset, ++ payload_init_raw(new, expr->payload.base, payload_offset, + expr->len); + list_add_tail(&new->list, list); + } +diff --git a/src/proto.c b/src/proto.c +index a013a00..2663f21 100644 +--- a/src/proto.c ++++ b/src/proto.c +@@ -160,8 +160,6 @@ static void proto_ctx_debug(const struct proto_ctx *ctx, enum proto_bases base, + proto_base_names[i], + ctx->protocol[i].desc ? ctx->protocol[i].desc->name : + "none"); +- if (ctx->protocol[i].offset) +- pr_debug(" (offset: %u)", ctx->protocol[i].offset); + if (i == base) + pr_debug(" <-"); + pr_debug("\n"); +-- +2.41.0.rc1 + diff --git a/SOURCES/0014-tests-monitor-Support-testing-host-s-nft-binary.patch b/SOURCES/0014-tests-monitor-Support-testing-host-s-nft-binary.patch deleted file mode 100644 index 8ab1067..0000000 --- a/SOURCES/0014-tests-monitor-Support-testing-host-s-nft-binary.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 18e1b545cbd2d055b16ec3bf5f481d8032dc5dbe Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 12 Feb 2020 22:35:27 +0100 -Subject: [PATCH] tests: monitor: Support testing host's nft binary - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1754047 -Upstream Status: nftables commit 15ede6857c8c5 - -commit 15ede6857c8c578ec6211c8b68424183ba1baf1a -Author: Phil Sutter -Date: Wed Feb 5 19:48:53 2020 +0100 - - tests: monitor: Support testing host's nft binary - - Add support for -H/--host flag to use 'nft' tool from $PATH instead of - the local one. - - Signed-off-by: Phil Sutter ---- - tests/monitor/run-tests.sh | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/tests/monitor/run-tests.sh b/tests/monitor/run-tests.sh -index efacdaa..ffb833a 100755 ---- a/tests/monitor/run-tests.sh -+++ b/tests/monitor/run-tests.sh -@@ -119,6 +119,10 @@ while [ -n "$1" ]; do - test_json=true - shift - ;; -+ -H|--host) -+ nft=nft -+ shift -+ ;; - testcases/*.t) - testcases+=" $1" - shift --- -2.31.1 - diff --git a/SOURCES/0015-debug-dump-the-l2-protocol-stack.patch b/SOURCES/0015-debug-dump-the-l2-protocol-stack.patch new file mode 100644 index 0000000..dd296ec --- /dev/null +++ b/SOURCES/0015-debug-dump-the-l2-protocol-stack.patch @@ -0,0 +1,44 @@ +From 33df569ad87c851596c02663fb4941bc0783d08c Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 10:27:57 +0100 +Subject: [PATCH] debug: dump the l2 protocol stack + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit dbd5f348c71de + +commit dbd5f348c71decf0baa8fb592c576f63fa232f50 +Author: Florian Westphal +Date: Mon Jul 25 16:42:23 2022 +0200 + + debug: dump the l2 protocol stack + + Previously we used to print the cumulative size of the headers, + update this to print the tracked l2 stack. + + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + src/proto.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/src/proto.c b/src/proto.c +index 2663f21..c496482 100644 +--- a/src/proto.c ++++ b/src/proto.c +@@ -154,6 +154,12 @@ static void proto_ctx_debug(const struct proto_ctx *ctx, enum proto_bases base, + if (!(debug_mask & NFT_DEBUG_PROTO_CTX)) + return; + ++ if (base == PROTO_BASE_LL_HDR && ctx->stacked_ll_count) { ++ pr_debug(" saved ll headers:"); ++ for (i = 0; i < ctx->stacked_ll_count; i++) ++ pr_debug(" %s", ctx->stacked_ll[i]->name); ++ } ++ + pr_debug("update %s protocol context:\n", proto_base_names[base]); + for (i = PROTO_BASE_LL_HDR; i <= PROTO_BASE_MAX; i++) { + pr_debug(" %-20s: %s", +-- +2.41.0.rc1 + diff --git a/SOURCES/0015-tests-py-Support-testing-host-binaries.patch b/SOURCES/0015-tests-py-Support-testing-host-binaries.patch deleted file mode 100644 index 8e0cf3d..0000000 --- a/SOURCES/0015-tests-py-Support-testing-host-binaries.patch +++ /dev/null @@ -1,76 +0,0 @@ -From 74575c409bad2940470f31946c97430043c3195e Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 12 Feb 2020 22:35:27 +0100 -Subject: [PATCH] tests: py: Support testing host binaries - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1754047 -Upstream Status: nftables commit 5f2746205e50c - -commit 5f2746205e50c77295d0f84f8178ee3a1ce15407 -Author: Phil Sutter -Date: Thu Feb 6 01:36:01 2020 +0100 - - tests: py: Support testing host binaries - - Support -H/--host option to use host's libnftables.so.1. Alternatively - users may specify a custom library path via -l/--library option. - - Signed-off-by: Phil Sutter ---- - tests/py/nft-test.py | 22 ++++++++++++++++++---- - 1 file changed, 18 insertions(+), 4 deletions(-) - -diff --git a/tests/py/nft-test.py b/tests/py/nft-test.py -index 6edca3c..01ee6c9 100755 ---- a/tests/py/nft-test.py -+++ b/tests/py/nft-test.py -@@ -1357,10 +1357,16 @@ def main(): - dest='force_all_family', - help='keep testing all families on error') - -+ parser.add_argument('-H', '--host', action='store_true', -+ help='run tests against installed libnftables.so.1') -+ - parser.add_argument('-j', '--enable-json', action='store_true', - dest='enable_json', - help='test JSON functionality as well') - -+ parser.add_argument('-l', '--library', default=None, -+ help='path to libntables.so.1, overrides --host') -+ - parser.add_argument('-s', '--schema', action='store_true', - dest='enable_schema', - help='verify json input/output against schema') -@@ -1388,9 +1394,17 @@ def main(): - # Change working directory to repository root - os.chdir(TESTS_PATH + "/../..") - -- if not os.path.exists('src/.libs/libnftables.so'): -- print("The nftables library does not exist. " -- "You need to build the project.") -+ check_lib_path = True -+ if args.library is None: -+ if args.host: -+ args.library = 'libnftables.so.1' -+ check_lib_path = False -+ else: -+ args.library = 'src/.libs/libnftables.so.1' -+ -+ if check_lib_path and not os.path.exists(args.library): -+ print("The nftables library at '%s' does not exist. " -+ "You need to build the project." % args.library) - return - - if args.enable_schema and not args.enable_json: -@@ -1398,7 +1412,7 @@ def main(): - return - - global nftables -- nftables = Nftables(sofile = 'src/.libs/libnftables.so') -+ nftables = Nftables(sofile = args.library) - - test_files = files_ok = run_total = 0 - tests = passed = warnings = errors = 0 --- -2.31.1 - diff --git a/SOURCES/0016-doc-nft.8-Mention-wildcard-interface-matching.patch b/SOURCES/0016-doc-nft.8-Mention-wildcard-interface-matching.patch deleted file mode 100644 index c4bc399..0000000 --- a/SOURCES/0016-doc-nft.8-Mention-wildcard-interface-matching.patch +++ /dev/null @@ -1,43 +0,0 @@ -From d58192a8d2810271d5c6525dc66ba1e1ec3fd2b7 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 12 Feb 2020 22:39:44 +0100 -Subject: [PATCH] doc: nft.8: Mention wildcard interface matching - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1763652 -Upstream Status: nftables commit 03d45ad330a25 - -commit 03d45ad330a25323610648bb05f550e0fb9d65b2 -Author: Phil Sutter -Date: Thu Feb 6 12:24:51 2020 +0100 - - doc: nft.8: Mention wildcard interface matching - - Special meaning of asterisk in interface names wasn't described - anywhere. - - Signed-off-by: Phil Sutter ---- - doc/primary-expression.txt | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/doc/primary-expression.txt b/doc/primary-expression.txt -index 5473d59..a5cab9d 100644 ---- a/doc/primary-expression.txt -+++ b/doc/primary-expression.txt -@@ -36,6 +36,13 @@ add such a rule, it will stop matching if the interface gets renamed and it - will match again in case interface gets deleted and later a new interface - with the same name is created. - -+Like with iptables, wildcard matching on interface name prefixes is available for -+*iifname* and *oifname* matches by appending an asterisk (*) character. Note -+however that unlike iptables, nftables does not accept interface names -+consisting of the wildcard character only - users are supposed to just skip -+those always matching expressions. In order to match on literal asterisk -+character, one may escape it using backslash (\). -+ - .Meta expression types - [options="header"] - |================== --- -2.31.1 - diff --git a/SOURCES/0016-tests-add-a-test-case-for-ether-and-vlan-listing.patch b/SOURCES/0016-tests-add-a-test-case-for-ether-and-vlan-listing.patch new file mode 100644 index 0000000..6e16638 --- /dev/null +++ b/SOURCES/0016-tests-add-a-test-case-for-ether-and-vlan-listing.patch @@ -0,0 +1,65 @@ +From 1773e6c1975ee4a6b00c24a99bf57b4597af295d Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 10:27:57 +0100 +Subject: [PATCH] tests: add a test case for ether and vlan listing + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit f680055cd4377 + +commit f680055cd4377f2f531f5f77b3aaa7550988665d +Author: Florian Westphal +Date: Mon Jul 25 19:31:22 2022 +0200 + + tests: add a test case for ether and vlan listing + + before this patch series, test fails dump validation: + - update @macset { ether saddr . vlan id timeout 5s } counter packets 0 bytes 0 + - ether saddr . vlan id @macset + + update @macset { @ll,48,48 . @ll,112,16 & 0xfff timeout 5s } counter packets 0 bytes 0 + + @ll,48,48 . @ll,112,16 & 0xfff @macset + + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + tests/shell/testcases/sets/0070stacked_l2_headers | 6 ++++++ + .../sets/dumps/0070stacked_l2_headers.nft | 14 ++++++++++++++ + 2 files changed, 20 insertions(+) + create mode 100755 tests/shell/testcases/sets/0070stacked_l2_headers + create mode 100644 tests/shell/testcases/sets/dumps/0070stacked_l2_headers.nft + +diff --git a/tests/shell/testcases/sets/0070stacked_l2_headers b/tests/shell/testcases/sets/0070stacked_l2_headers +new file mode 100755 +index 0000000..07820b7 +--- /dev/null ++++ b/tests/shell/testcases/sets/0070stacked_l2_headers +@@ -0,0 +1,6 @@ ++#!/bin/bash ++ ++set -e ++dumpfile=$(dirname $0)/dumps/$(basename $0).nft ++ ++$NFT -f "$dumpfile" +diff --git a/tests/shell/testcases/sets/dumps/0070stacked_l2_headers.nft b/tests/shell/testcases/sets/dumps/0070stacked_l2_headers.nft +new file mode 100644 +index 0000000..ef254b9 +--- /dev/null ++++ b/tests/shell/testcases/sets/dumps/0070stacked_l2_headers.nft +@@ -0,0 +1,14 @@ ++table netdev nt { ++ set macset { ++ typeof ether saddr . vlan id ++ size 1024 ++ flags dynamic,timeout ++ } ++ ++ chain nc { ++ update @macset { ether saddr . vlan id timeout 5s } counter packets 0 bytes 0 ++ ether saddr . vlan id @macset ++ vlan pcp 1 ++ ether saddr 0a:0b:0c:0d:0e:0f vlan id 42 ++ } ++} +-- +2.41.0.rc1 + diff --git a/SOURCES/0017-netlink_delinearize-also-postprocess-OP_AND-in-set-e.patch b/SOURCES/0017-netlink_delinearize-also-postprocess-OP_AND-in-set-e.patch new file mode 100644 index 0000000..7c89f29 --- /dev/null +++ b/SOURCES/0017-netlink_delinearize-also-postprocess-OP_AND-in-set-e.patch @@ -0,0 +1,99 @@ +From bba1a2086ec7bcc0cfa8df9e12c6cc1375180011 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 10:27:57 +0100 +Subject: [PATCH] netlink_delinearize: also postprocess OP_AND in set element + context + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit b1e3ed0335d13 + +commit b1e3ed0335d13d206a2a2698a1ba189fa396dbf3 +Author: Florian Westphal +Date: Mon Aug 1 13:03:18 2022 +0200 + + netlink_delinearize: also postprocess OP_AND in set element context + + Pablo reports: + add rule netdev nt y update @macset { vlan id timeout 5s } + + listing still shows the raw expression: + update @macset { @ll,112,16 & 0xfff timeout 5s } + + so also cover the 'set element' case. + + Reported-by: Pablo Neira Ayuso + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + include/netlink.h | 4 +++- + src/netlink_delinearize.c | 2 ++ + .../sets/dumps/0070stacked_l2_headers.nft | 14 ++++++++++++++ + 3 files changed, 19 insertions(+), 1 deletion(-) + +diff --git a/include/netlink.h b/include/netlink.h +index 71c888f..63d07ed 100644 +--- a/include/netlink.h ++++ b/include/netlink.h +@@ -44,8 +44,10 @@ struct netlink_parse_ctx { + + + #define RULE_PP_IN_CONCATENATION (1 << 0) ++#define RULE_PP_IN_SET_ELEM (1 << 1) + +-#define RULE_PP_REMOVE_OP_AND (RULE_PP_IN_CONCATENATION) ++#define RULE_PP_REMOVE_OP_AND (RULE_PP_IN_CONCATENATION | \ ++ RULE_PP_IN_SET_ELEM) + + struct rule_pp_ctx { + struct proto_ctx pctx; +diff --git a/src/netlink_delinearize.c b/src/netlink_delinearize.c +index 0b5519d..c6ad84d 100644 +--- a/src/netlink_delinearize.c ++++ b/src/netlink_delinearize.c +@@ -2660,7 +2660,9 @@ static void expr_postprocess(struct rule_pp_ctx *ctx, struct expr **exprp) + expr_postprocess(ctx, &expr->prefix); + break; + case EXPR_SET_ELEM: ++ ctx->flags |= RULE_PP_IN_SET_ELEM; + expr_postprocess(ctx, &expr->key); ++ ctx->flags &= ~RULE_PP_IN_SET_ELEM; + break; + case EXPR_EXTHDR: + exthdr_dependency_kill(&ctx->pdctx, expr, ctx->pctx.family); +diff --git a/tests/shell/testcases/sets/dumps/0070stacked_l2_headers.nft b/tests/shell/testcases/sets/dumps/0070stacked_l2_headers.nft +index ef254b9..0057e9c 100644 +--- a/tests/shell/testcases/sets/dumps/0070stacked_l2_headers.nft ++++ b/tests/shell/testcases/sets/dumps/0070stacked_l2_headers.nft +@@ -1,14 +1,28 @@ + table netdev nt { ++ set vlanidset { ++ typeof vlan id ++ size 1024 ++ flags dynamic,timeout ++ } ++ + set macset { + typeof ether saddr . vlan id + size 1024 + flags dynamic,timeout + } + ++ set ipset { ++ typeof vlan id . ip saddr ++ size 1024 ++ flags dynamic,timeout ++ } ++ + chain nc { + update @macset { ether saddr . vlan id timeout 5s } counter packets 0 bytes 0 + ether saddr . vlan id @macset + vlan pcp 1 + ether saddr 0a:0b:0c:0d:0e:0f vlan id 42 ++ update @vlanidset { vlan id timeout 5s } counter packets 0 bytes 0 ++ update @ipset { vlan id . ip saddr timeout 5s } counter packets 0 bytes 0 + } + } +-- +2.41.0.rc1 + diff --git a/SOURCES/0017-scanner-Extend-asteriskstring-definition.patch b/SOURCES/0017-scanner-Extend-asteriskstring-definition.patch deleted file mode 100644 index 6468662..0000000 --- a/SOURCES/0017-scanner-Extend-asteriskstring-definition.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 34ba60c0c2b6057e8b56a77e47899bbeccd88bfd Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 12 Feb 2020 22:39:44 +0100 -Subject: [PATCH] scanner: Extend asteriskstring definition - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1763652 -Upstream Status: nftables commit 556c5a94b8067 - -commit 556c5a94b8067f33ef0a42836753dae0736b7524 -Author: Phil Sutter -Date: Thu Feb 6 12:31:56 2020 +0100 - - scanner: Extend asteriskstring definition - - Accept escaped asterisks also mid-string and as only character. - Especially the latter will help when translating from iptables where - asterisk has no special meaning. - - Signed-off-by: Phil Sutter ---- - src/scanner.l | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/scanner.l b/src/scanner.l -index d32adf4..7daf5c1 100644 ---- a/src/scanner.l -+++ b/src/scanner.l -@@ -120,7 +120,7 @@ numberstring ({decstring}|{hexstring}) - letter [a-zA-Z] - string ({letter}|[_.])({letter}|{digit}|[/\-_\.])* - quotedstring \"[^"]*\" --asteriskstring ({string}\*|{string}\\\*) -+asteriskstring ({string}\*|{string}\\\*|\\\*|{string}\\\*{string}) - comment #.*$ - slash \/ - --- -2.31.1 - diff --git a/SOURCES/0018-evaluate-search-stacked-header-list-for-matching-pay.patch b/SOURCES/0018-evaluate-search-stacked-header-list-for-matching-pay.patch new file mode 100644 index 0000000..287058c --- /dev/null +++ b/SOURCES/0018-evaluate-search-stacked-header-list-for-matching-pay.patch @@ -0,0 +1,198 @@ +From da9367286d4589a3371d547cd8e6dd6d985cc69a Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 10:27:58 +0100 +Subject: [PATCH] evaluate: search stacked header list for matching payload dep + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 87c3041bfd244 + +commit 87c3041bfd244aaf39e644d33c0df4fe04079e1c +Author: Florian Westphal +Date: Mon Jul 25 20:02:28 2022 +0200 + + evaluate: search stacked header list for matching payload dep + + "ether saddr 0:1:2:3:4:6 vlan id 2" works, but reverse fails: + + "vlan id 2 ether saddr 0:1:2:3:4:6" will give + Error: conflicting protocols specified: vlan vs. ether + + After "proto: track full stack of seen l2 protocols, not just cumulative offset", + we have a list of all l2 headers, so search those to see if we had this + proto base in the past before rejecting this. + + Reported-by: Eric Garver + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + src/evaluate.c | 21 +++++++--- + tests/py/bridge/vlan.t | 3 ++ + tests/py/bridge/vlan.t.json | 56 +++++++++++++++++++++++++++ + tests/py/bridge/vlan.t.payload | 16 ++++++++ + tests/py/bridge/vlan.t.payload.netdev | 20 ++++++++++ + 5 files changed, 110 insertions(+), 6 deletions(-) + +diff --git a/src/evaluate.c b/src/evaluate.c +index 9246064..d67f915 100644 +--- a/src/evaluate.c ++++ b/src/evaluate.c +@@ -659,13 +659,22 @@ static int resolve_protocol_conflict(struct eval_ctx *ctx, + struct stmt *nstmt = NULL; + int link, err; + +- if (payload->payload.base == PROTO_BASE_LL_HDR && +- proto_is_dummy(desc)) { +- err = meta_iiftype_gen_dependency(ctx, payload, &nstmt); +- if (err < 0) +- return err; ++ if (payload->payload.base == PROTO_BASE_LL_HDR) { ++ if (proto_is_dummy(desc)) { ++ err = meta_iiftype_gen_dependency(ctx, payload, &nstmt); ++ if (err < 0) ++ return err; + +- rule_stmt_insert_at(ctx->rule, nstmt, ctx->stmt); ++ rule_stmt_insert_at(ctx->rule, nstmt, ctx->stmt); ++ } else { ++ unsigned int i; ++ ++ /* payload desc stored in the L2 header stack? No conflict. */ ++ for (i = 0; i < ctx->pctx.stacked_ll_count; i++) { ++ if (ctx->pctx.stacked_ll[i] == payload->payload.desc) ++ return 0; ++ } ++ } + } + + assert(base <= PROTO_BASE_MAX); +diff --git a/tests/py/bridge/vlan.t b/tests/py/bridge/vlan.t +index 924ed4e..4920601 100644 +--- a/tests/py/bridge/vlan.t ++++ b/tests/py/bridge/vlan.t +@@ -47,3 +47,6 @@ ether type ip vlan id 1 ip saddr 10.0.0.1;fail + + # mangling + vlan id 1 vlan id set 2;ok ++ ++ether saddr 00:01:02:03:04:05 vlan id 1;ok ++vlan id 2 ether saddr 0:1:2:3:4:6;ok;ether saddr 00:01:02:03:04:06 vlan id 2 +diff --git a/tests/py/bridge/vlan.t.json b/tests/py/bridge/vlan.t.json +index e7640f9..58d4a40 100644 +--- a/tests/py/bridge/vlan.t.json ++++ b/tests/py/bridge/vlan.t.json +@@ -761,3 +761,59 @@ + } + } + ] ++ ++# ether saddr 00:01:02:03:04:05 vlan id 1 ++[ ++ { ++ "match": { ++ "left": { ++ "payload": { ++ "field": "saddr", ++ "protocol": "ether" ++ } ++ }, ++ "op": "==", ++ "right": "00:01:02:03:04:05" ++ } ++ }, ++ { ++ "match": { ++ "left": { ++ "payload": { ++ "field": "id", ++ "protocol": "vlan" ++ } ++ }, ++ "op": "==", ++ "right": 1 ++ } ++ } ++] ++ ++# vlan id 2 ether saddr 0:1:2:3:4:6 ++[ ++ { ++ "match": { ++ "left": { ++ "payload": { ++ "field": "saddr", ++ "protocol": "ether" ++ } ++ }, ++ "op": "==", ++ "right": "00:01:02:03:04:06" ++ } ++ }, ++ { ++ "match": { ++ "left": { ++ "payload": { ++ "field": "id", ++ "protocol": "vlan" ++ } ++ }, ++ "op": "==", ++ "right": 2 ++ } ++ } ++] +diff --git a/tests/py/bridge/vlan.t.payload b/tests/py/bridge/vlan.t.payload +index 6c8d595..713670e 100644 +--- a/tests/py/bridge/vlan.t.payload ++++ b/tests/py/bridge/vlan.t.payload +@@ -276,3 +276,19 @@ bridge + [ payload load 2b @ link header + 14 => reg 1 ] + [ bitwise reg 1 = ( reg 1 & 0x000000f0 ) ^ 0x00000200 ] + [ payload write reg 1 => 2b @ link header + 14 csum_type 0 csum_off 0 csum_flags 0x0 ] ++ ++# ether saddr 00:01:02:03:04:05 vlan id 1 ++bridge test-bridge input ++ [ payload load 8b @ link header + 6 => reg 1 ] ++ [ cmp eq reg 1 0x03020100 0x00810504 ] ++ [ payload load 2b @ link header + 14 => reg 1 ] ++ [ bitwise reg 1 = ( reg 1 & 0x0000ff0f ) ^ 0x00000000 ] ++ [ cmp eq reg 1 0x00000100 ] ++ ++# vlan id 2 ether saddr 0:1:2:3:4:6 ++bridge test-bridge input ++ [ payload load 8b @ link header + 6 => reg 1 ] ++ [ cmp eq reg 1 0x03020100 0x00810604 ] ++ [ payload load 2b @ link header + 14 => reg 1 ] ++ [ bitwise reg 1 = ( reg 1 & 0x0000ff0f ) ^ 0x00000000 ] ++ [ cmp eq reg 1 0x00000200 ] +diff --git a/tests/py/bridge/vlan.t.payload.netdev b/tests/py/bridge/vlan.t.payload.netdev +index d2c7d74..98a2a2b 100644 +--- a/tests/py/bridge/vlan.t.payload.netdev ++++ b/tests/py/bridge/vlan.t.payload.netdev +@@ -322,3 +322,23 @@ netdev + [ payload load 2b @ link header + 14 => reg 1 ] + [ bitwise reg 1 = ( reg 1 & 0x000000f0 ) ^ 0x00000200 ] + [ payload write reg 1 => 2b @ link header + 14 csum_type 0 csum_off 0 csum_flags 0x0 ] ++ ++# vlan id 2 ether saddr 0:1:2:3:4:6 ++netdev test-netdev ingress ++ [ meta load iiftype => reg 1 ] ++ [ cmp eq reg 1 0x00000001 ] ++ [ payload load 8b @ link header + 6 => reg 1 ] ++ [ cmp eq reg 1 0x03020100 0x00810604 ] ++ [ payload load 2b @ link header + 14 => reg 1 ] ++ [ bitwise reg 1 = ( reg 1 & 0x0000ff0f ) ^ 0x00000000 ] ++ [ cmp eq reg 1 0x00000200 ] ++ ++# ether saddr 00:01:02:03:04:05 vlan id 1 ++netdev test-netdev ingress ++ [ meta load iiftype => reg 1 ] ++ [ cmp eq reg 1 0x00000001 ] ++ [ payload load 8b @ link header + 6 => reg 1 ] ++ [ cmp eq reg 1 0x03020100 0x00810504 ] ++ [ payload load 2b @ link header + 14 => reg 1 ] ++ [ bitwise reg 1 = ( reg 1 & 0x0000ff0f ) ^ 0x00000000 ] ++ [ cmp eq reg 1 0x00000100 ] +-- +2.41.0.rc1 + diff --git a/SOURCES/0018-parser-add-a-helper-for-concat-expression-handling.patch b/SOURCES/0018-parser-add-a-helper-for-concat-expression-handling.patch deleted file mode 100644 index d973cdf..0000000 --- a/SOURCES/0018-parser-add-a-helper-for-concat-expression-handling.patch +++ /dev/null @@ -1,162 +0,0 @@ -From 160d84fb761c54a5f757aff907fc197d259196bd Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 17 Feb 2020 15:26:42 +0100 -Subject: [PATCH] parser: add a helper for concat expression handling - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1795224 -Upstream Status: nftables commit 10f114806ccd9 - -commit 10f114806ccd9d64f9d72eaa813babb04d719688 -Author: Florian Westphal -Date: Wed Dec 11 14:31:44 2019 +0100 - - parser: add a helper for concat expression handling - - Cull the repeated copy&paste snippets and add/use a helper for this. - - Signed-off-by: Florian Westphal ---- - src/parser_bison.y | 99 ++++++++++++++++++++-------------------------- - 1 file changed, 43 insertions(+), 56 deletions(-) - -diff --git a/src/parser_bison.y b/src/parser_bison.y -index 707f467..0fd9b94 100644 ---- a/src/parser_bison.y -+++ b/src/parser_bison.y -@@ -102,6 +102,25 @@ static void location_update(struct location *loc, struct location *rhs, int n) - } - } - -+static struct expr *handle_concat_expr(const struct location *loc, -+ struct expr *expr, -+ struct expr *expr_l, struct expr *expr_r, -+ struct location loc_rhs[3]) -+{ -+ if (expr->etype != EXPR_CONCAT) { -+ expr = concat_expr_alloc(loc); -+ compound_expr_add(expr, expr_l); -+ } else { -+ location_update(&expr_r->location, loc_rhs, 2); -+ -+ expr = expr_l; -+ expr->location = *loc; -+ } -+ -+ compound_expr_add(expr, expr_r); -+ return expr; -+} -+ - #define YYLLOC_DEFAULT(Current, Rhs, N) location_update(&Current, Rhs, N) - - #define symbol_value(loc, str) \ -@@ -1878,20 +1897,12 @@ data_type_atom_expr : type_identifier - data_type_expr : data_type_atom_expr - | data_type_expr DOT data_type_atom_expr - { -- if ($1->etype != EXPR_CONCAT) { -- $$ = concat_expr_alloc(&@$); -- compound_expr_add($$, $1); -- } else { -- struct location rhs[] = { -- [1] = @2, -- [2] = @3, -- }; -- location_update(&$3->location, rhs, 2); -- -- $$ = $1; -- $$->location = @$; -- } -- compound_expr_add($$, $3); -+ struct location rhs[] = { -+ [1] = @2, -+ [2] = @3, -+ }; -+ -+ $$ = handle_concat_expr(&@$, $$, $1, $3, rhs); - } - ; - -@@ -2992,20 +3003,12 @@ basic_stmt_expr : inclusive_or_stmt_expr - concat_stmt_expr : basic_stmt_expr - | concat_stmt_expr DOT primary_stmt_expr - { -- if ($$->etype != EXPR_CONCAT) { -- $$ = concat_expr_alloc(&@$); -- compound_expr_add($$, $1); -- } else { -- struct location rhs[] = { -- [1] = @2, -- [2] = @3, -- }; -- location_update(&$3->location, rhs, 2); -- -- $$ = $1; -- $$->location = @$; -- } -- compound_expr_add($$, $3); -+ struct location rhs[] = { -+ [1] = @2, -+ [2] = @3, -+ }; -+ -+ $$ = handle_concat_expr(&@$, $$, $1, $3, rhs); - } - ; - -@@ -3525,20 +3528,12 @@ basic_expr : inclusive_or_expr - concat_expr : basic_expr - | concat_expr DOT basic_expr - { -- if ($$->etype != EXPR_CONCAT) { -- $$ = concat_expr_alloc(&@$); -- compound_expr_add($$, $1); -- } else { -- struct location rhs[] = { -- [1] = @2, -- [2] = @3, -- }; -- location_update(&$3->location, rhs, 2); -- -- $$ = $1; -- $$->location = @$; -- } -- compound_expr_add($$, $3); -+ struct location rhs[] = { -+ [1] = @2, -+ [2] = @3, -+ }; -+ -+ $$ = handle_concat_expr(&@$, $$, $1, $3, rhs); - } - ; - -@@ -3946,20 +3941,12 @@ basic_rhs_expr : inclusive_or_rhs_expr - concat_rhs_expr : basic_rhs_expr - | concat_rhs_expr DOT basic_rhs_expr - { -- if ($$->etype != EXPR_CONCAT) { -- $$ = concat_expr_alloc(&@$); -- compound_expr_add($$, $1); -- } else { -- struct location rhs[] = { -- [1] = @2, -- [2] = @3, -- }; -- location_update(&$3->location, rhs, 2); -- -- $$ = $1; -- $$->location = @$; -- } -- compound_expr_add($$, $3); -+ struct location rhs[] = { -+ [1] = @2, -+ [2] = @3, -+ }; -+ -+ $$ = handle_concat_expr(&@$, $$, $1, $3, rhs); - } - ; - --- -2.31.1 - diff --git a/SOURCES/0019-include-resync-nf_tables.h-cache-copy.patch b/SOURCES/0019-include-resync-nf_tables.h-cache-copy.patch deleted file mode 100644 index af7fa1b..0000000 --- a/SOURCES/0019-include-resync-nf_tables.h-cache-copy.patch +++ /dev/null @@ -1,83 +0,0 @@ -From e872d169c189f363ebbdc39105510c1809b58276 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Thu, 13 Feb 2020 17:48:18 +0100 -Subject: [PATCH] include: resync nf_tables.h cache copy - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1795224 -Upstream Status: nftables commit 9b94127950f98 - -commit 9b94127950f9848bc5a1505ae65ca3045ff68a16 -Author: Stefano Brivio -Date: Thu Jan 30 01:16:55 2020 +0100 - - include: resync nf_tables.h cache copy - - Get this header in sync with nf-next as of merge commit - b3a608222336 (5.6-rc1-ish). - - Signed-off-by: Stefano Brivio - Signed-off-by: Pablo Neira Ayuso ---- - include/linux/netfilter/nf_tables.h | 17 +++++++++++++++++ - 1 file changed, 17 insertions(+) - -diff --git a/include/linux/netfilter/nf_tables.h b/include/linux/netfilter/nf_tables.h -index ed8881a..1a99df3 100644 ---- a/include/linux/netfilter/nf_tables.h -+++ b/include/linux/netfilter/nf_tables.h -@@ -48,6 +48,7 @@ enum nft_registers { - - #define NFT_REG_SIZE 16 - #define NFT_REG32_SIZE 4 -+#define NFT_REG32_COUNT (NFT_REG32_15 - NFT_REG32_00 + 1) - - /** - * enum nft_verdicts - nf_tables internal verdicts -@@ -299,14 +300,28 @@ enum nft_set_policies { - * enum nft_set_desc_attributes - set element description - * - * @NFTA_SET_DESC_SIZE: number of elements in set (NLA_U32) -+ * @NFTA_SET_DESC_CONCAT: description of field concatenation (NLA_NESTED) - */ - enum nft_set_desc_attributes { - NFTA_SET_DESC_UNSPEC, - NFTA_SET_DESC_SIZE, -+ NFTA_SET_DESC_CONCAT, - __NFTA_SET_DESC_MAX - }; - #define NFTA_SET_DESC_MAX (__NFTA_SET_DESC_MAX - 1) - -+/** -+ * enum nft_set_field_attributes - attributes of concatenated fields -+ * -+ * @NFTA_SET_FIELD_LEN: length of single field, in bits (NLA_U32) -+ */ -+enum nft_set_field_attributes { -+ NFTA_SET_FIELD_UNSPEC, -+ NFTA_SET_FIELD_LEN, -+ __NFTA_SET_FIELD_MAX -+}; -+#define NFTA_SET_FIELD_MAX (__NFTA_SET_FIELD_MAX - 1) -+ - /** - * enum nft_set_attributes - nf_tables set netlink attributes - * -@@ -368,6 +383,7 @@ enum nft_set_elem_flags { - * @NFTA_SET_ELEM_USERDATA: user data (NLA_BINARY) - * @NFTA_SET_ELEM_EXPR: expression (NLA_NESTED: nft_expr_attributes) - * @NFTA_SET_ELEM_OBJREF: stateful object reference (NLA_STRING) -+ * @NFTA_SET_ELEM_KEY_END: closing key value (NLA_NESTED: nft_data) - */ - enum nft_set_elem_attributes { - NFTA_SET_ELEM_UNSPEC, -@@ -380,6 +396,7 @@ enum nft_set_elem_attributes { - NFTA_SET_ELEM_EXPR, - NFTA_SET_ELEM_PAD, - NFTA_SET_ELEM_OBJREF, -+ NFTA_SET_ELEM_KEY_END, - __NFTA_SET_ELEM_MAX - }; - #define NFTA_SET_ELEM_MAX (__NFTA_SET_ELEM_MAX - 1) --- -2.31.1 - diff --git a/SOURCES/0019-src-allow-anon-set-concatenation-with-ether-and-vlan.patch b/SOURCES/0019-src-allow-anon-set-concatenation-with-ether-and-vlan.patch new file mode 100644 index 0000000..6bde45c --- /dev/null +++ b/SOURCES/0019-src-allow-anon-set-concatenation-with-ether-and-vlan.patch @@ -0,0 +1,223 @@ +From f2988bad7c73e30ea4a80f348f7adf8078e6ef57 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 10:27:58 +0100 +Subject: [PATCH] src: allow anon set concatenation with ether and vlan + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit c1c223f1b5818 + +commit c1c223f1b58188542222ee2d9a4a8cc133d1dc3b +Author: Florian Westphal +Date: Mon Jul 25 21:34:52 2022 +0200 + + src: allow anon set concatenation with ether and vlan + + vlan id uses integer type (which has a length of 0). + + Using it was possible, but listing would assert: + python: mergesort.c:24: concat_expr_msort_value: Assertion `ilen > 0' failed. + + There are two reasons for this. + First reason is that the udata/typeof information lacks the 'vlan id' + part, because internally this is 'payload . binop(payload AND mask)'. + + binop lacks an udata store. It makes little sense to store it, + 'typeof' keyword expects normal match syntax. + + So, when storing udata, store the left hand side of the binary + operation, i.e. the load of the 2-byte key. + + With that resolved, delinerization could work, but concat_elem_expr() + would splice 12 bits off the elements value, but it should be 16 (on + a byte boundary). + + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + src/expression.c | 17 +++++++++-- + src/netlink.c | 10 +++++-- + tests/py/bridge/vlan.t | 2 ++ + tests/py/bridge/vlan.t.json | 41 +++++++++++++++++++++++++++ + tests/py/bridge/vlan.t.payload | 12 ++++++++ + tests/py/bridge/vlan.t.payload.netdev | 14 +++++++++ + 6 files changed, 91 insertions(+), 5 deletions(-) + +diff --git a/src/expression.c b/src/expression.c +index deb649e..7390089 100644 +--- a/src/expression.c ++++ b/src/expression.c +@@ -879,17 +879,30 @@ static void concat_expr_print(const struct expr *expr, struct output_ctx *octx) + #define NFTNL_UDATA_SET_KEY_CONCAT_SUB_DATA 1 + #define NFTNL_UDATA_SET_KEY_CONCAT_SUB_MAX 2 + ++static struct expr *expr_build_udata_recurse(struct expr *e) ++{ ++ switch (e->etype) { ++ case EXPR_BINOP: ++ return e->left; ++ default: ++ break; ++ } ++ ++ return e; ++} ++ + static int concat_expr_build_udata(struct nftnl_udata_buf *udbuf, + const struct expr *concat_expr) + { + struct nftnl_udata *nest; ++ struct expr *expr, *tmp; + unsigned int i = 0; +- struct expr *expr; + +- list_for_each_entry(expr, &concat_expr->expressions, list) { ++ list_for_each_entry_safe(expr, tmp, &concat_expr->expressions, list) { + struct nftnl_udata *nest_expr; + int err; + ++ expr = expr_build_udata_recurse(expr); + if (!expr_ops(expr)->build_udata || i >= NFT_REG32_SIZE) + return -1; + +diff --git a/src/netlink.c b/src/netlink.c +index 89d864e..799cf9b 100644 +--- a/src/netlink.c ++++ b/src/netlink.c +@@ -1114,17 +1114,21 @@ static struct expr *concat_elem_expr(struct expr *key, + struct expr *data, int *off) + { + const struct datatype *subtype; ++ unsigned int sub_length; + struct expr *expr; + + if (key) { + (*off)--; +- expr = constant_expr_splice(data, key->len); ++ sub_length = round_up(key->len, BITS_PER_BYTE); ++ ++ expr = constant_expr_splice(data, sub_length); + expr->dtype = datatype_get(key->dtype); + expr->byteorder = key->byteorder; + expr->len = key->len; + } else { + subtype = concat_subtype_lookup(dtype->type, --(*off)); +- expr = constant_expr_splice(data, subtype->size); ++ sub_length = round_up(subtype->size, BITS_PER_BYTE); ++ expr = constant_expr_splice(data, sub_length); + expr->dtype = subtype; + expr->byteorder = subtype->byteorder; + } +@@ -1136,7 +1140,7 @@ static struct expr *concat_elem_expr(struct expr *key, + expr->dtype->basetype->type == TYPE_BITMASK) + expr = bitmask_expr_to_binops(expr); + +- data->len -= netlink_padding_len(expr->len); ++ data->len -= netlink_padding_len(sub_length); + + return expr; + } +diff --git a/tests/py/bridge/vlan.t b/tests/py/bridge/vlan.t +index 4920601..95bdff4 100644 +--- a/tests/py/bridge/vlan.t ++++ b/tests/py/bridge/vlan.t +@@ -50,3 +50,5 @@ vlan id 1 vlan id set 2;ok + + ether saddr 00:01:02:03:04:05 vlan id 1;ok + vlan id 2 ether saddr 0:1:2:3:4:6;ok;ether saddr 00:01:02:03:04:06 vlan id 2 ++ ++ether saddr . vlan id { 0a:0b:0c:0d:0e:0f . 42, 0a:0b:0c:0d:0e:0f . 4095 };ok +diff --git a/tests/py/bridge/vlan.t.json b/tests/py/bridge/vlan.t.json +index 58d4a40..f77756f 100644 +--- a/tests/py/bridge/vlan.t.json ++++ b/tests/py/bridge/vlan.t.json +@@ -817,3 +817,44 @@ + } + } + ] ++ ++# ether saddr . vlan id { 0a:0b:0c:0d:0e:0f . 42, 0a:0b:0c:0d:0e:0f . 4095 } ++[ ++ { ++ "match": { ++ "left": { ++ "concat": [ ++ { ++ "payload": { ++ "field": "saddr", ++ "protocol": "ether" ++ } ++ }, ++ { ++ "payload": { ++ "field": "id", ++ "protocol": "vlan" ++ } ++ } ++ ] ++ }, ++ "op": "==", ++ "right": { ++ "set": [ ++ { ++ "concat": [ ++ "0a:0b:0c:0d:0e:0f", ++ 42 ++ ] ++ }, ++ { ++ "concat": [ ++ "0a:0b:0c:0d:0e:0f", ++ 4095 ++ ] ++ } ++ ] ++ } ++ } ++ } ++] +diff --git a/tests/py/bridge/vlan.t.payload b/tests/py/bridge/vlan.t.payload +index 713670e..62e4b89 100644 +--- a/tests/py/bridge/vlan.t.payload ++++ b/tests/py/bridge/vlan.t.payload +@@ -292,3 +292,15 @@ bridge test-bridge input + [ payload load 2b @ link header + 14 => reg 1 ] + [ bitwise reg 1 = ( reg 1 & 0x0000ff0f ) ^ 0x00000000 ] + [ cmp eq reg 1 0x00000200 ] ++ ++# ether saddr . vlan id { 0a:0b:0c:0d:0e:0f . 42, 0a:0b:0c:0d:0e:0f . 4095 } ++__set%d test-bridge 3 size 2 ++__set%d test-bridge 0 ++ element 0d0c0b0a 00000f0e 00002a00 : 0 [end] element 0d0c0b0a 00000f0e 0000ff0f : 0 [end] ++bridge test-bridge input ++ [ payload load 2b @ link header + 12 => reg 1 ] ++ [ cmp eq reg 1 0x00000081 ] ++ [ payload load 6b @ link header + 6 => reg 1 ] ++ [ payload load 2b @ link header + 14 => reg 10 ] ++ [ bitwise reg 10 = ( reg 10 & 0x0000ff0f ) ^ 0x00000000 ] ++ [ lookup reg 1 set __set%d ] +diff --git a/tests/py/bridge/vlan.t.payload.netdev b/tests/py/bridge/vlan.t.payload.netdev +index 98a2a2b..1018d4c 100644 +--- a/tests/py/bridge/vlan.t.payload.netdev ++++ b/tests/py/bridge/vlan.t.payload.netdev +@@ -342,3 +342,17 @@ netdev test-netdev ingress + [ payload load 2b @ link header + 14 => reg 1 ] + [ bitwise reg 1 = ( reg 1 & 0x0000ff0f ) ^ 0x00000000 ] + [ cmp eq reg 1 0x00000100 ] ++ ++# ether saddr . vlan id { 0a:0b:0c:0d:0e:0f . 42, 0a:0b:0c:0d:0e:0f . 4095 } ++__set%d test-netdev 3 size 2 ++__set%d test-netdev 0 ++ element 0d0c0b0a 00000f0e 00002a00 : 0 [end] element 0d0c0b0a 00000f0e 0000ff0f : 0 [end] ++netdev test-netdev ingress ++ [ meta load iiftype => reg 1 ] ++ [ cmp eq reg 1 0x00000001 ] ++ [ payload load 2b @ link header + 12 => reg 1 ] ++ [ cmp eq reg 1 0x00000081 ] ++ [ payload load 6b @ link header + 6 => reg 1 ] ++ [ payload load 2b @ link header + 14 => reg 10 ] ++ [ bitwise reg 10 = ( reg 10 & 0x0000ff0f ) ^ 0x00000000 ] ++ [ lookup reg 1 set __set%d ] +-- +2.41.0.rc1 + diff --git a/SOURCES/0020-evaluate-set-eval-ctx-for-add-update-statements-with.patch b/SOURCES/0020-evaluate-set-eval-ctx-for-add-update-statements-with.patch new file mode 100644 index 0000000..251fee2 --- /dev/null +++ b/SOURCES/0020-evaluate-set-eval-ctx-for-add-update-statements-with.patch @@ -0,0 +1,200 @@ +From baea5b0f3199d21a8089ab792aee86621f67202c Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 12:45:30 +0100 +Subject: [PATCH] evaluate: set eval ctx for add/update statements with integer + constants + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 4cc6b20d31498 + +commit 4cc6b20d31498d90e90ff574ce8b70276afcee8f +Author: Florian Westphal +Date: Mon Jan 23 19:03:28 2023 +0100 + + evaluate: set eval ctx for add/update statements with integer constants + + Eric reports that nft asserts when using integer basetype constants with + 'typeof' sets. Example: + table netdev t { + set s { + typeof ether saddr . vlan id + flags dynamic,timeout + } + + chain c { } + } + + loads fine. But adding a rule with add/update statement fails: + nft 'add rule netdev t c set update ether saddr . 0 @s' + nft: netlink_linearize.c:867: netlink_gen_expr: Assertion `dreg < ctx->reg_low' failed. + + When the 'ether saddr . 0' concat expression is processed, there is + no set definition available anymore to deduce the required size of the + integer constant. + + nft eval step then derives the required length using the data types. + '0' has integer basetype, so the deduced length is 0. + + The assertion triggers because serialization step finds that it + needs one more register. + + 2 are needed to store the ethernet address, another register is + needed for the vlan id. + + Update eval step to make the expression context store the set key + information when processing the preceeding set reference, then + let stmt_evaluate_set() preserve the existing context instead of + zeroing it again via stmt_evaluate_arg(). + + This makes concat expression evaluation compute the total size + needed based on the sets key definition. + + Reported-by: Eric Garver + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + src/evaluate.c | 32 +++++++++++++++++-- + .../maps/dumps/typeof_maps_concat.nft | 11 +++++++ + tests/shell/testcases/maps/typeof_maps_concat | 6 ++++ + .../sets/dumps/typeof_sets_concat.nft | 12 +++++++ + tests/shell/testcases/sets/typeof_sets_concat | 6 ++++ + 5 files changed, 65 insertions(+), 2 deletions(-) + create mode 100644 tests/shell/testcases/maps/dumps/typeof_maps_concat.nft + create mode 100755 tests/shell/testcases/maps/typeof_maps_concat + create mode 100644 tests/shell/testcases/sets/dumps/typeof_sets_concat.nft + create mode 100755 tests/shell/testcases/sets/typeof_sets_concat + +diff --git a/src/evaluate.c b/src/evaluate.c +index d67f915..7f81411 100644 +--- a/src/evaluate.c ++++ b/src/evaluate.c +@@ -1526,6 +1526,14 @@ static int interval_set_eval(struct eval_ctx *ctx, struct set *set, + return ret; + } + ++static void expr_evaluate_set_ref(struct eval_ctx *ctx, struct expr *expr) ++{ ++ struct set *set = expr->set; ++ ++ expr_set_context(&ctx->ectx, set->key->dtype, set->key->len); ++ ctx->ectx.key = set->key; ++} ++ + static int expr_evaluate_set(struct eval_ctx *ctx, struct expr **expr) + { + struct expr *set = *expr, *i, *next; +@@ -2388,6 +2396,7 @@ static int expr_evaluate(struct eval_ctx *ctx, struct expr **expr) + case EXPR_VARIABLE: + return expr_evaluate_variable(ctx, expr); + case EXPR_SET_REF: ++ expr_evaluate_set_ref(ctx, *expr); + return 0; + case EXPR_VALUE: + return expr_evaluate_value(ctx, expr); +@@ -2550,6 +2559,25 @@ static int stmt_evaluate_arg(struct eval_ctx *ctx, struct stmt *stmt, + return __stmt_evaluate_arg(ctx, stmt, dtype, len, byteorder, expr); + } + ++/* like stmt_evaluate_arg, but keep existing context created ++ * by previous expr_evaluate(). ++ * ++ * This is needed for add/update statements: ++ * ctx->ectx.key has the set key, which may be needed for 'typeof' ++ * sets: the 'add/update' expression might contain integer data types. ++ * ++ * Without the key we cannot derive the element size. ++ */ ++static int stmt_evaluate_key(struct eval_ctx *ctx, struct stmt *stmt, ++ const struct datatype *dtype, unsigned int len, ++ enum byteorder byteorder, struct expr **expr) ++{ ++ if (expr_evaluate(ctx, expr) < 0) ++ return -1; ++ ++ return __stmt_evaluate_arg(ctx, stmt, dtype, len, byteorder, expr); ++} ++ + static int stmt_evaluate_verdict(struct eval_ctx *ctx, struct stmt *stmt) + { + if (stmt_evaluate_arg(ctx, stmt, &verdict_type, 0, 0, &stmt->expr) < 0) +@@ -3762,7 +3790,7 @@ static int stmt_evaluate_set(struct eval_ctx *ctx, struct stmt *stmt) + return expr_error(ctx->msgs, stmt->set.set, + "Expression does not refer to a set"); + +- if (stmt_evaluate_arg(ctx, stmt, ++ if (stmt_evaluate_key(ctx, stmt, + stmt->set.set->set->key->dtype, + stmt->set.set->set->key->len, + stmt->set.set->set->key->byteorder, +@@ -3805,7 +3833,7 @@ static int stmt_evaluate_map(struct eval_ctx *ctx, struct stmt *stmt) + return expr_error(ctx->msgs, stmt->map.set, + "Expression does not refer to a set"); + +- if (stmt_evaluate_arg(ctx, stmt, ++ if (stmt_evaluate_key(ctx, stmt, + stmt->map.set->set->key->dtype, + stmt->map.set->set->key->len, + stmt->map.set->set->key->byteorder, +diff --git a/tests/shell/testcases/maps/dumps/typeof_maps_concat.nft b/tests/shell/testcases/maps/dumps/typeof_maps_concat.nft +new file mode 100644 +index 0000000..1ca98d8 +--- /dev/null ++++ b/tests/shell/testcases/maps/dumps/typeof_maps_concat.nft +@@ -0,0 +1,11 @@ ++table netdev t { ++ map m { ++ typeof ether saddr . vlan id : meta mark ++ size 1234 ++ flags dynamic,timeout ++ } ++ ++ chain c { ++ ether type != 8021q update @m { ether daddr . 123 timeout 1m : 0x0000002a } counter packets 0 bytes 0 return ++ } ++} +diff --git a/tests/shell/testcases/maps/typeof_maps_concat b/tests/shell/testcases/maps/typeof_maps_concat +new file mode 100755 +index 0000000..07820b7 +--- /dev/null ++++ b/tests/shell/testcases/maps/typeof_maps_concat +@@ -0,0 +1,6 @@ ++#!/bin/bash ++ ++set -e ++dumpfile=$(dirname $0)/dumps/$(basename $0).nft ++ ++$NFT -f "$dumpfile" +diff --git a/tests/shell/testcases/sets/dumps/typeof_sets_concat.nft b/tests/shell/testcases/sets/dumps/typeof_sets_concat.nft +new file mode 100644 +index 0000000..dbaf7cd +--- /dev/null ++++ b/tests/shell/testcases/sets/dumps/typeof_sets_concat.nft +@@ -0,0 +1,12 @@ ++table netdev t { ++ set s { ++ typeof ether saddr . vlan id ++ size 2048 ++ flags dynamic,timeout ++ } ++ ++ chain c { ++ ether type != 8021q add @s { ether saddr . 0 timeout 5s } counter packets 0 bytes 0 return ++ ether type != 8021q update @s { ether daddr . 123 timeout 1m } counter packets 0 bytes 0 return ++ } ++} +diff --git a/tests/shell/testcases/sets/typeof_sets_concat b/tests/shell/testcases/sets/typeof_sets_concat +new file mode 100755 +index 0000000..07820b7 +--- /dev/null ++++ b/tests/shell/testcases/sets/typeof_sets_concat +@@ -0,0 +1,6 @@ ++#!/bin/bash ++ ++set -e ++dumpfile=$(dirname $0)/dumps/$(basename $0).nft ++ ++$NFT -f "$dumpfile" +-- +2.41.0.rc1 + diff --git a/SOURCES/0020-src-Add-support-for-NFTNL_SET_DESC_CONCAT.patch b/SOURCES/0020-src-Add-support-for-NFTNL_SET_DESC_CONCAT.patch deleted file mode 100644 index 01d4785..0000000 --- a/SOURCES/0020-src-Add-support-for-NFTNL_SET_DESC_CONCAT.patch +++ /dev/null @@ -1,181 +0,0 @@ -From c8a5da2f527c85ab7c392cd293ff37d02a3f93a7 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Thu, 13 Feb 2020 17:48:18 +0100 -Subject: [PATCH] src: Add support for NFTNL_SET_DESC_CONCAT - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1795224 -Upstream Status: nftables commit 6156ba34018dd -Conflicts: Context change in src/mnl.c due to missing commit - 6e48df5329eab ("src: add "typeof" build/parse/print support") - -commit 6156ba34018dddd59cb6737cfd5a69a0cbc5eaa4 -Author: Stefano Brivio -Date: Thu Jan 30 01:16:56 2020 +0100 - - src: Add support for NFTNL_SET_DESC_CONCAT - - To support arbitrary range concatenations, the kernel needs to know - how long each field in the concatenation is. The new libnftnl - NFTNL_SET_DESC_CONCAT set attribute describes this as an array of - lengths, in bytes, of concatenated fields. - - While evaluating concatenated expressions, export the datatype size - into the new field_len array, and hand the data over via libnftnl. - - Similarly, when data is passed back from libnftnl, parse it into - the set description. - - When set data is cloned, we now need to copy the additional fields - in set_clone(), too. - - This change depends on the libnftnl patch with title: - set: Add support for NFTA_SET_DESC_CONCAT attributes - - v4: No changes - v3: Rework to use set description data instead of a stand-alone - attribute - v2: No changes - - Signed-off-by: Stefano Brivio - Signed-off-by: Pablo Neira Ayuso ---- - include/expression.h | 2 ++ - include/rule.h | 6 +++++- - src/evaluate.c | 14 +++++++++++--- - src/mnl.c | 7 +++++++ - src/netlink.c | 11 +++++++++++ - src/rule.c | 2 +- - 6 files changed, 37 insertions(+), 5 deletions(-) - -diff --git a/include/expression.h b/include/expression.h -index 717b675..ee726aa 100644 ---- a/include/expression.h -+++ b/include/expression.h -@@ -256,6 +256,8 @@ struct expr { - struct list_head expressions; - unsigned int size; - uint32_t set_flags; -+ uint8_t field_len[NFT_REG32_COUNT]; -+ uint8_t field_count; - }; - struct { - /* EXPR_SET_REF */ -diff --git a/include/rule.h b/include/rule.h -index 47eb29f..c03b0b8 100644 ---- a/include/rule.h -+++ b/include/rule.h -@@ -290,7 +290,9 @@ extern struct rule *rule_lookup_by_index(const struct chain *chain, - * @rg_cache: cached range element (left) - * @policy: set mechanism policy - * @automerge: merge adjacents and overlapping elements, if possible -- * @desc: set mechanism desc -+ * @desc.size: count of set elements -+ * @desc.field_len: length of single concatenated fields, bytes -+ * @desc.field_count: count of concatenated fields - */ - struct set { - struct list_head list; -@@ -310,6 +312,8 @@ struct set { - bool automerge; - struct { - uint32_t size; -+ uint8_t field_len[NFT_REG32_COUNT]; -+ uint8_t field_count; - } desc; - }; - -diff --git a/src/evaluate.c b/src/evaluate.c -index a865902..58f458d 100644 ---- a/src/evaluate.c -+++ b/src/evaluate.c -@@ -1216,6 +1216,8 @@ static int expr_evaluate_concat(struct eval_ctx *ctx, struct expr **expr, - struct expr *i, *next; - - list_for_each_entry_safe(i, next, &(*expr)->expressions, list) { -+ unsigned dsize_bytes; -+ - if (expr_is_constant(*expr) && dtype && off == 0) - return expr_binary_error(ctx->msgs, i, *expr, - "unexpected concat component, " -@@ -1240,6 +1242,9 @@ static int expr_evaluate_concat(struct eval_ctx *ctx, struct expr **expr, - i->dtype->name); - - ntype = concat_subtype_add(ntype, i->dtype->type); -+ -+ dsize_bytes = div_round_up(i->dtype->size, BITS_PER_BYTE); -+ (*expr)->field_len[(*expr)->field_count++] = dsize_bytes; - } - - (*expr)->flags |= flags; -@@ -3321,9 +3326,12 @@ static int set_evaluate(struct eval_ctx *ctx, struct set *set) - "specified in %s definition", - set->key->dtype->name, type); - } -- if (set->flags & NFT_SET_INTERVAL && -- set->key->etype == EXPR_CONCAT) -- return set_error(ctx, set, "concatenated types not supported in interval sets"); -+ -+ if (set->flags & NFT_SET_INTERVAL && set->key->etype == EXPR_CONCAT) { -+ memcpy(&set->desc.field_len, &set->key->field_len, -+ sizeof(set->desc.field_len)); -+ set->desc.field_count = set->key->field_count; -+ } - - if (set_is_datamap(set->flags)) { - if (set->datatype == NULL) -diff --git a/src/mnl.c b/src/mnl.c -index aa5b0b4..221ee05 100644 ---- a/src/mnl.c -+++ b/src/mnl.c -@@ -881,6 +881,13 @@ int mnl_nft_set_add(struct netlink_ctx *ctx, const struct cmd *cmd, - set->automerge)) - memory_allocation_error(); - -+ if (set->desc.field_len[0]) { -+ nftnl_set_set_data(nls, NFTNL_SET_DESC_CONCAT, -+ set->desc.field_len, -+ set->desc.field_count * -+ sizeof(set->desc.field_len[0])); -+ } -+ - nftnl_set_set_data(nls, NFTNL_SET_USERDATA, nftnl_udata_buf_data(udbuf), - nftnl_udata_buf_len(udbuf)); - nftnl_udata_buf_free(udbuf); -diff --git a/src/netlink.c b/src/netlink.c -index 486e124..83d863c 100644 ---- a/src/netlink.c -+++ b/src/netlink.c -@@ -672,6 +672,17 @@ struct set *netlink_delinearize_set(struct netlink_ctx *ctx, - if (nftnl_set_is_set(nls, NFTNL_SET_DESC_SIZE)) - set->desc.size = nftnl_set_get_u32(nls, NFTNL_SET_DESC_SIZE); - -+ if (nftnl_set_is_set(nls, NFTNL_SET_DESC_CONCAT)) { -+ uint32_t len = NFT_REG32_COUNT; -+ const uint8_t *data; -+ -+ data = nftnl_set_get_data(nls, NFTNL_SET_DESC_CONCAT, &len); -+ if (data) { -+ memcpy(set->desc.field_len, data, len); -+ set->desc.field_count = len; -+ } -+ } -+ - return set; - } - -diff --git a/src/rule.c b/src/rule.c -index 3ca1805..4669577 100644 ---- a/src/rule.c -+++ b/src/rule.c -@@ -337,7 +337,7 @@ struct set *set_clone(const struct set *set) - new_set->objtype = set->objtype; - new_set->policy = set->policy; - new_set->automerge = set->automerge; -- new_set->desc.size = set->desc.size; -+ new_set->desc = set->desc; - - return new_set; - } --- -2.31.1 - diff --git a/SOURCES/0021-monitor-Sanitize-startup-race-condition.patch b/SOURCES/0021-monitor-Sanitize-startup-race-condition.patch new file mode 100644 index 0000000..b58c6a4 --- /dev/null +++ b/SOURCES/0021-monitor-Sanitize-startup-race-condition.patch @@ -0,0 +1,107 @@ +From 6e522a03cfda57267224ecdd653dcfda9c4efe62 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 9 Feb 2023 15:25:37 +0100 +Subject: [PATCH] monitor: Sanitize startup race condition + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 545edb7a8ef0a + +commit 545edb7a8ef0a8acf991b1b7857fddc24d7b151a +Author: Phil Sutter +Date: Wed Sep 28 23:26:42 2022 +0200 + + monitor: Sanitize startup race condition + + During startup, 'nft monitor' first fetches the current ruleset and then + keeps this cache up to date based on received events. This is racey, as + any ruleset changes in between the initial fetch and the socket opening + are not recognized. + + This script demonstrates the problem: + + | #!/bin/bash + | + | while true; do + | nft flush ruleset + | iptables-nft -A FORWARD + | done & + | maniploop=$! + | + | trap "kill $maniploop; kill \$!; wait" EXIT + | + | while true; do + | nft monitor rules >/dev/null & + | sleep 0.2 + | kill $! + | done + + If the table add event is missed, the rule add event callback fails to + deserialize the rule and calls abort(). + + Avoid the inconvenient program exit by returning NULL from + netlink_delinearize_rule() instead of aborting and make callers check + the return value. + + Signed-off-by: Phil Sutter + +Signed-off-by: Phil Sutter +--- + src/cache.c | 1 + + src/monitor.c | 5 +++++ + src/netlink_delinearize.c | 5 ++++- + 3 files changed, 10 insertions(+), 1 deletion(-) + +diff --git a/src/cache.c b/src/cache.c +index fd8df88..701aec6 100644 +--- a/src/cache.c ++++ b/src/cache.c +@@ -490,6 +490,7 @@ static int list_rule_cb(struct nftnl_rule *nlr, void *data) + + netlink_dump_rule(nlr, ctx); + rule = netlink_delinearize_rule(ctx, nlr); ++ assert(rule); + list_add_tail(&rule->list, &ctx->list); + + return 0; +diff --git a/src/monitor.c b/src/monitor.c +index 7fa92eb..a6b30a1 100644 +--- a/src/monitor.c ++++ b/src/monitor.c +@@ -551,6 +551,10 @@ static int netlink_events_rule_cb(const struct nlmsghdr *nlh, int type, + + nlr = netlink_rule_alloc(nlh); + r = netlink_delinearize_rule(monh->ctx, nlr); ++ if (!r) { ++ fprintf(stderr, "W: Received event for an unknown table.\n"); ++ goto out_free_nlr; ++ } + nlr_for_each_set(nlr, rule_map_decompose_cb, NULL, + &monh->ctx->nft->cache); + cmd = netlink_msg2cmd(type, nlh->nlmsg_flags); +@@ -587,6 +591,7 @@ static int netlink_events_rule_cb(const struct nlmsghdr *nlh, int type, + break; + } + rule_free(r); ++out_free_nlr: + nftnl_rule_free(nlr); + return MNL_CB_OK; + } +diff --git a/src/netlink_delinearize.c b/src/netlink_delinearize.c +index c6ad84d..1d47c74 100644 +--- a/src/netlink_delinearize.c ++++ b/src/netlink_delinearize.c +@@ -3194,7 +3194,10 @@ struct rule *netlink_delinearize_rule(struct netlink_ctx *ctx, + pctx->rule = rule_alloc(&netlink_location, &h); + pctx->table = table_cache_find(&ctx->nft->cache.table_cache, + h.table.name, h.family); +- assert(pctx->table != NULL); ++ if (!pctx->table) { ++ errno = ENOENT; ++ return NULL; ++ } + + pctx->rule->comment = nftnl_rule_get_comment(nlr); + +-- +2.41.0.rc1 + diff --git a/SOURCES/0021-src-Add-support-for-concatenated-set-ranges.patch b/SOURCES/0021-src-Add-support-for-concatenated-set-ranges.patch deleted file mode 100644 index 5d9101b..0000000 --- a/SOURCES/0021-src-Add-support-for-concatenated-set-ranges.patch +++ /dev/null @@ -1,577 +0,0 @@ -From 7b1f98e90a32865faca9a97f4348f20c753cd2f3 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Fri, 14 Feb 2020 14:51:33 +0100 -Subject: [PATCH] src: Add support for concatenated set ranges - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1795224 -Upstream Status: nftables commit 8ac2f3b2fca38 - -commit 8ac2f3b2fca38b6533043b0678730c10ba4dc5ef -Author: Stefano Brivio -Date: Thu Jan 30 01:16:57 2020 +0100 - - src: Add support for concatenated set ranges - - After exporting field lengths via NFTNL_SET_DESC_CONCAT attributes, - we now need to adjust parsing of user input and generation of - netlink key data to complete support for concatenation of set - ranges. - - Instead of using separate elements for start and end of a range, - denoting the end element by the NFT_SET_ELEM_INTERVAL_END flag, - as it's currently done for ranges without concatenation, we'll use - the new attribute NFTNL_SET_ELEM_KEY_END as suggested by Pablo. It - behaves in the same way as NFTNL_SET_ELEM_KEY, but it indicates - that the included key represents the upper bound of a range. - - For example, "packets with an IPv4 address between 192.0.2.0 and - 192.0.2.42, with destination port between 22 and 25", needs to be - expressed as a single element with two keys: - - NFTA_SET_ELEM_KEY: 192.0.2.0 . 22 - NFTA_SET_ELEM_KEY_END: 192.0.2.42 . 25 - - To achieve this, we need to: - - - adjust the lexer rules to allow multiton expressions as elements - of a concatenation. As wildcards are not allowed (semantics would - be ambiguous), exclude wildcards expressions from the set of - possible multiton expressions, and allow them directly where - needed. Concatenations now admit prefixes and ranges - - - generate, for each element in a range concatenation, a second key - attribute, that includes the upper bound for the range - - - also expand prefixes and non-ranged values in the concatenation - to ranges: given a set with interval and concatenation support, - the kernel has no way to tell which elements are ranged, so they - all need to be. For example, 192.0.2.0 . 192.0.2.9 : 1024 is - sent as: - - NFTA_SET_ELEM_KEY: 192.0.2.0 . 1024 - NFTA_SET_ELEM_KEY_END: 192.0.2.9 . 1024 - - - aggregate ranges when elements received by the kernel represent - concatenated ranges, see concat_range_aggregate() - - - perform a few minor adjustments where interval expressions - are already handled: we have intervals in these sets, but - the set specification isn't just an interval, so we can't - just aggregate and deaggregate interval ranges linearly - - v4: No changes - v3: - - rework to use a separate key for closing element of range instead of - a separate element with EXPR_F_INTERVAL_END set (Pablo Neira Ayuso) - v2: - - reworked netlink_gen_concat_data(), moved loop body to a new function, - netlink_gen_concat_data_expr() (Phil Sutter) - - dropped repeated pattern in bison file, replaced by a new helper, - compound_expr_alloc_or_add() (Phil Sutter) - - added set_is_nonconcat_range() helper (Phil Sutter) - - in expr_evaluate_set(), we need to set NFT_SET_SUBKEY also on empty - sets where the set in the context already has the flag - - dropped additional 'end' parameter from netlink_gen_data(), - temporarily set EXPR_F_INTERVAL_END on expressions and use that from - netlink_gen_concat_data() to figure out we need to add the 'end' - element (Phil Sutter) - - replace range_mask_len() by a simplified version, as we don't need - to actually store the composing masks of a range (Phil Sutter) - - Signed-off-by: Stefano Brivio - Signed-off-by: Pablo Neira Ayuso ---- - include/expression.h | 1 + - include/rule.h | 5 ++ - src/evaluate.c | 5 ++ - src/netlink.c | 109 +++++++++++++++++++++++++++++----------- - src/parser_bison.y | 17 +++++-- - src/rule.c | 13 ++--- - src/segtree.c | 117 +++++++++++++++++++++++++++++++++++++++++++ - 7 files changed, 229 insertions(+), 38 deletions(-) - -diff --git a/include/expression.h b/include/expression.h -index ee726aa..2e41aa0 100644 ---- a/include/expression.h -+++ b/include/expression.h -@@ -460,6 +460,7 @@ extern int set_to_intervals(struct list_head *msgs, struct set *set, - struct expr *init, bool add, - unsigned int debug_mask, bool merge, - struct output_ctx *octx); -+extern void concat_range_aggregate(struct expr *set); - extern void interval_map_decompose(struct expr *set); - - extern struct expr *get_set_intervals(const struct set *set, -diff --git a/include/rule.h b/include/rule.h -index c03b0b8..626973e 100644 ---- a/include/rule.h -+++ b/include/rule.h -@@ -372,6 +372,11 @@ static inline bool set_is_interval(uint32_t set_flags) - return set_flags & NFT_SET_INTERVAL; - } - -+static inline bool set_is_non_concat_range(struct set *s) -+{ -+ return (s->flags & NFT_SET_INTERVAL) && s->desc.field_count <= 1; -+} -+ - #include - - struct counter { -diff --git a/src/evaluate.c b/src/evaluate.c -index 58f458d..0c84816 100644 ---- a/src/evaluate.c -+++ b/src/evaluate.c -@@ -136,6 +136,11 @@ static int byteorder_conversion(struct eval_ctx *ctx, struct expr **expr, - - if ((*expr)->byteorder == byteorder) - return 0; -+ -+ /* Conversion for EXPR_CONCAT is handled for single composing ranges */ -+ if ((*expr)->etype == EXPR_CONCAT) -+ return 0; -+ - if (expr_basetype(*expr)->type != TYPE_INTEGER) - return expr_error(ctx->msgs, *expr, - "Byteorder mismatch: expected %s, got %s", -diff --git a/src/netlink.c b/src/netlink.c -index 83d863c..e0ba903 100644 ---- a/src/netlink.c -+++ b/src/netlink.c -@@ -98,10 +98,11 @@ struct nftnl_expr *alloc_nft_expr(const char *name) - static struct nftnl_set_elem *alloc_nftnl_setelem(const struct expr *set, - const struct expr *expr) - { -- const struct expr *elem, *key, *data; -+ const struct expr *elem, *data; - struct nftnl_set_elem *nlse; - struct nft_data_linearize nld; - struct nftnl_udata_buf *udbuf = NULL; -+ struct expr *key; - - nlse = nftnl_set_elem_alloc(); - if (nlse == NULL) -@@ -119,6 +120,16 @@ static struct nftnl_set_elem *alloc_nftnl_setelem(const struct expr *set, - - netlink_gen_data(key, &nld); - nftnl_set_elem_set(nlse, NFTNL_SET_ELEM_KEY, &nld.value, nld.len); -+ -+ if (set->set_flags & NFT_SET_INTERVAL && expr->key->field_count > 1) { -+ key->flags |= EXPR_F_INTERVAL_END; -+ netlink_gen_data(key, &nld); -+ key->flags &= ~EXPR_F_INTERVAL_END; -+ -+ nftnl_set_elem_set(nlse, NFTNL_SET_ELEM_KEY_END, &nld.value, -+ nld.len); -+ } -+ - if (elem->timeout) - nftnl_set_elem_set_u64(nlse, NFTNL_SET_ELEM_TIMEOUT, - elem->timeout); -@@ -186,28 +197,58 @@ void netlink_gen_raw_data(const mpz_t value, enum byteorder byteorder, - data->len = len; - } - -+static int netlink_export_pad(unsigned char *data, const mpz_t v, -+ const struct expr *i) -+{ -+ mpz_export_data(data, v, i->byteorder, -+ div_round_up(i->len, BITS_PER_BYTE)); -+ -+ return netlink_padded_len(i->len) / BITS_PER_BYTE; -+} -+ -+static int netlink_gen_concat_data_expr(int end, const struct expr *i, -+ unsigned char *data) -+{ -+ switch (i->etype) { -+ case EXPR_RANGE: -+ i = end ? i->right : i->left; -+ break; -+ case EXPR_PREFIX: -+ if (end) { -+ int count; -+ mpz_t v; -+ -+ mpz_init_bitmask(v, i->len - i->prefix_len); -+ mpz_add(v, i->prefix->value, v); -+ count = netlink_export_pad(data, v, i); -+ mpz_clear(v); -+ return count; -+ } -+ return netlink_export_pad(data, i->prefix->value, i); -+ case EXPR_VALUE: -+ break; -+ default: -+ BUG("invalid expression type '%s' in set", expr_ops(i)->name); -+ } -+ -+ return netlink_export_pad(data, i->value, i); -+} -+ - static void netlink_gen_concat_data(const struct expr *expr, - struct nft_data_linearize *nld) - { -+ unsigned int len = expr->len / BITS_PER_BYTE, offset = 0; -+ int end = expr->flags & EXPR_F_INTERVAL_END; -+ unsigned char data[len]; - const struct expr *i; -- unsigned int len, offset; -- -- len = expr->len / BITS_PER_BYTE; -- if (1) { -- unsigned char data[len]; -- -- memset(data, 0, sizeof(data)); -- offset = 0; -- list_for_each_entry(i, &expr->expressions, list) { -- assert(i->etype == EXPR_VALUE); -- mpz_export_data(data + offset, i->value, i->byteorder, -- div_round_up(i->len, BITS_PER_BYTE)); -- offset += netlink_padded_len(i->len) / BITS_PER_BYTE; -- } - -- memcpy(nld->value, data, len); -- nld->len = len; -- } -+ memset(data, 0, len); -+ -+ list_for_each_entry(i, &expr->expressions, list) -+ offset += netlink_gen_concat_data_expr(end, i, data + offset); -+ -+ memcpy(nld->value, data, len); -+ nld->len = len; - } - - static void netlink_gen_constant_data(const struct expr *expr, -@@ -812,6 +853,7 @@ int netlink_delinearize_setelem(struct nftnl_set_elem *nlse, - if (nftnl_set_elem_is_set(nlse, NFTNL_SET_ELEM_FLAGS)) - flags = nftnl_set_elem_get_u32(nlse, NFTNL_SET_ELEM_FLAGS); - -+key_end: - key = netlink_alloc_value(&netlink_location, &nld); - datatype_set(key, set->key->dtype); - key->byteorder = set->key->byteorder; -@@ -880,6 +922,15 @@ int netlink_delinearize_setelem(struct nftnl_set_elem *nlse, - } - out: - compound_expr_add(set->init, expr); -+ -+ if (!(flags & NFT_SET_ELEM_INTERVAL_END) && -+ nftnl_set_elem_is_set(nlse, NFTNL_SET_ELEM_KEY_END)) { -+ flags |= NFT_SET_ELEM_INTERVAL_END; -+ nld.value = nftnl_set_elem_get(nlse, NFTNL_SET_ELEM_KEY_END, -+ &nld.len); -+ goto key_end; -+ } -+ - return 0; - } - -@@ -918,15 +969,16 @@ int netlink_list_setelems(struct netlink_ctx *ctx, const struct handle *h, - set->init = set_expr_alloc(&internal_location, set); - nftnl_set_elem_foreach(nls, list_setelem_cb, ctx); - -- if (!(set->flags & NFT_SET_INTERVAL)) -+ if (set->flags & NFT_SET_INTERVAL && set->desc.field_count > 1) -+ concat_range_aggregate(set->init); -+ else if (set->flags & NFT_SET_INTERVAL) -+ interval_map_decompose(set->init); -+ else - list_expr_sort(&ctx->set->init->expressions); - - nftnl_set_free(nls); - ctx->set = NULL; - -- if (set->flags & NFT_SET_INTERVAL) -- interval_map_decompose(set->init); -- - return 0; - } - -@@ -935,6 +987,7 @@ int netlink_get_setelem(struct netlink_ctx *ctx, const struct handle *h, - struct set *set, struct expr *init) - { - struct nftnl_set *nls, *nls_out = NULL; -+ int err = 0; - - nls = nftnl_set_alloc(); - if (nls == NULL) -@@ -958,18 +1011,18 @@ int netlink_get_setelem(struct netlink_ctx *ctx, const struct handle *h, - set->init = set_expr_alloc(loc, set); - nftnl_set_elem_foreach(nls_out, list_setelem_cb, ctx); - -- if (!(set->flags & NFT_SET_INTERVAL)) -+ if (set->flags & NFT_SET_INTERVAL && set->desc.field_count > 1) -+ concat_range_aggregate(set->init); -+ else if (set->flags & NFT_SET_INTERVAL) -+ err = get_set_decompose(table, set); -+ else - list_expr_sort(&ctx->set->init->expressions); - - nftnl_set_free(nls); - nftnl_set_free(nls_out); - ctx->set = NULL; - -- if (set->flags & NFT_SET_INTERVAL && -- get_set_decompose(table, set) < 0) -- return -1; -- -- return 0; -+ return err; - } - - void netlink_dump_obj(struct nftnl_obj *nln, struct netlink_ctx *ctx) -diff --git a/src/parser_bison.y b/src/parser_bison.y -index 0fd9b94..ea83f52 100644 ---- a/src/parser_bison.y -+++ b/src/parser_bison.y -@@ -3551,7 +3551,6 @@ range_rhs_expr : basic_rhs_expr DASH basic_rhs_expr - - multiton_rhs_expr : prefix_rhs_expr - | range_rhs_expr -- | wildcard_expr - ; - - map_expr : concat_expr MAP rhs_expr -@@ -3645,7 +3644,7 @@ set_elem_option : TIMEOUT time_spec - ; - - set_lhs_expr : concat_rhs_expr -- | multiton_rhs_expr -+ | wildcard_expr - ; - - set_rhs_expr : concat_rhs_expr -@@ -3898,7 +3897,7 @@ list_rhs_expr : basic_rhs_expr COMMA basic_rhs_expr - ; - - rhs_expr : concat_rhs_expr { $$ = $1; } -- | multiton_rhs_expr { $$ = $1; } -+ | wildcard_expr { $$ = $1; } - | set_expr { $$ = $1; } - | set_ref_symbol_expr { $$ = $1; } - ; -@@ -3939,7 +3938,17 @@ basic_rhs_expr : inclusive_or_rhs_expr - ; - - concat_rhs_expr : basic_rhs_expr -- | concat_rhs_expr DOT basic_rhs_expr -+ | multiton_rhs_expr -+ | concat_rhs_expr DOT multiton_rhs_expr -+ { -+ struct location rhs[] = { -+ [1] = @2, -+ [2] = @3, -+ }; -+ -+ $$ = handle_concat_expr(&@$, $$, $1, $3, rhs); -+ } -+ | concat_rhs_expr DOT basic_rhs_expr - { - struct location rhs[] = { - [1] = @2, -diff --git a/src/rule.c b/src/rule.c -index 4669577..e18237b 100644 ---- a/src/rule.c -+++ b/src/rule.c -@@ -1512,7 +1512,8 @@ static int __do_add_setelems(struct netlink_ctx *ctx, struct set *set, - return -1; - - if (set->init != NULL && -- set->flags & NFT_SET_INTERVAL) { -+ set->flags & NFT_SET_INTERVAL && -+ set->desc.field_count <= 1) { - interval_map_decompose(expr); - list_splice_tail_init(&expr->expressions, &set->init->expressions); - set->init->size += expr->size; -@@ -1533,7 +1534,7 @@ static int do_add_setelems(struct netlink_ctx *ctx, struct cmd *cmd, - table = table_lookup(h, &ctx->nft->cache); - set = set_lookup(table, h->set.name); - -- if (set->flags & NFT_SET_INTERVAL && -+ if (set_is_non_concat_range(set) && - set_to_intervals(ctx->msgs, set, init, true, - ctx->nft->debug_mask, set->automerge, - &ctx->nft->output) < 0) -@@ -1548,7 +1549,7 @@ static int do_add_set(struct netlink_ctx *ctx, const struct cmd *cmd, - struct set *set = cmd->set; - - if (set->init != NULL) { -- if (set->flags & NFT_SET_INTERVAL && -+ if (set_is_non_concat_range(set) && - set_to_intervals(ctx->msgs, set, set->init, true, - ctx->nft->debug_mask, set->automerge, - &ctx->nft->output) < 0) -@@ -1634,7 +1635,7 @@ static int do_delete_setelems(struct netlink_ctx *ctx, struct cmd *cmd) - table = table_lookup(h, &ctx->nft->cache); - set = set_lookup(table, h->set.name); - -- if (set->flags & NFT_SET_INTERVAL && -+ if (set_is_non_concat_range(set) && - set_to_intervals(ctx->msgs, set, expr, false, - ctx->nft->debug_mask, set->automerge, - &ctx->nft->output) < 0) -@@ -2488,7 +2489,7 @@ static int do_get_setelems(struct netlink_ctx *ctx, struct cmd *cmd, - set = set_lookup(table, cmd->handle.set.name); - - /* Create a list of elements based of what we got from command line. */ -- if (set->flags & NFT_SET_INTERVAL) -+ if (set_is_non_concat_range(set)) - init = get_set_intervals(set, cmd->expr); - else - init = cmd->expr; -@@ -2501,7 +2502,7 @@ static int do_get_setelems(struct netlink_ctx *ctx, struct cmd *cmd, - if (err >= 0) - __do_list_set(ctx, cmd, table, new_set); - -- if (set->flags & NFT_SET_INTERVAL) -+ if (set_is_non_concat_range(set)) - expr_free(init); - - set_free(new_set); -diff --git a/src/segtree.c b/src/segtree.c -index 7217dbc..e859f84 100644 ---- a/src/segtree.c -+++ b/src/segtree.c -@@ -652,6 +652,11 @@ struct expr *get_set_intervals(const struct set *set, const struct expr *init) - set_elem_add(set, new_init, i->key->value, - i->flags, i->byteorder); - break; -+ case EXPR_CONCAT: -+ compound_expr_add(new_init, expr_clone(i)); -+ i->flags |= EXPR_F_INTERVAL_END; -+ compound_expr_add(new_init, expr_clone(i)); -+ break; - default: - range_expr_value_low(low, i); - set_elem_add(set, new_init, low, 0, i->byteorder); -@@ -823,6 +828,9 @@ static int expr_value_cmp(const void *p1, const void *p2) - struct expr *e2 = *(void * const *)p2; - int ret; - -+ if (expr_value(e1)->etype == EXPR_CONCAT) -+ return -1; -+ - ret = mpz_cmp(expr_value(e1)->value, expr_value(e2)->value); - if (ret == 0) { - if (e1->flags & EXPR_F_INTERVAL_END) -@@ -834,6 +842,115 @@ static int expr_value_cmp(const void *p1, const void *p2) - return ret; - } - -+/* Given start and end elements of a range, check if it can be represented as -+ * a single netmask, and if so, how long, by returning zero or a positive value. -+ */ -+static int range_mask_len(const mpz_t start, const mpz_t end, unsigned int len) -+{ -+ mpz_t tmp_start, tmp_end; -+ int ret; -+ -+ mpz_init_set_ui(tmp_start, mpz_get_ui(start)); -+ mpz_init_set_ui(tmp_end, mpz_get_ui(end)); -+ -+ while (mpz_cmp(tmp_start, tmp_end) <= 0 && -+ !mpz_tstbit(tmp_start, 0) && mpz_tstbit(tmp_end, 0) && -+ len--) { -+ mpz_fdiv_q_2exp(tmp_start, tmp_start, 1); -+ mpz_fdiv_q_2exp(tmp_end, tmp_end, 1); -+ } -+ -+ ret = !mpz_cmp(tmp_start, tmp_end) ? (int)len : -1; -+ -+ mpz_clear(tmp_start); -+ mpz_clear(tmp_end); -+ -+ return ret; -+} -+ -+/* Given a set with two elements (start and end), transform them into a -+ * concatenation of ranges. That is, from a list of start expressions and a list -+ * of end expressions, form a list of start - end expressions. -+ */ -+void concat_range_aggregate(struct expr *set) -+{ -+ struct expr *i, *start = NULL, *end, *r1, *r2, *next, *r1_next, *tmp; -+ struct list_head *r2_next; -+ int prefix_len, free_r1; -+ mpz_t range, p; -+ -+ list_for_each_entry_safe(i, next, &set->expressions, list) { -+ if (!start) { -+ start = i; -+ continue; -+ } -+ end = i; -+ -+ /* Walk over r1 (start expression) and r2 (end) in parallel, -+ * form ranges between corresponding r1 and r2 expressions, -+ * store them by replacing r2 expressions, and free r1 -+ * expressions. -+ */ -+ r2 = list_first_entry(&expr_value(end)->expressions, -+ struct expr, list); -+ list_for_each_entry_safe(r1, r1_next, -+ &expr_value(start)->expressions, -+ list) { -+ mpz_init(range); -+ mpz_init(p); -+ -+ r2_next = r2->list.next; -+ free_r1 = 0; -+ -+ if (!mpz_cmp(r1->value, r2->value)) { -+ free_r1 = 1; -+ goto next; -+ } -+ -+ mpz_sub(range, r2->value, r1->value); -+ mpz_sub_ui(range, range, 1); -+ mpz_and(p, r1->value, range); -+ -+ /* Check if we are forced, or if it's anyway preferable, -+ * to express the range as two points instead of a -+ * netmask. -+ */ -+ prefix_len = range_mask_len(r1->value, r2->value, -+ r1->len); -+ if (prefix_len < 0 || -+ !(r1->dtype->flags & DTYPE_F_PREFIX)) { -+ tmp = range_expr_alloc(&r1->location, r1, -+ r2); -+ -+ list_replace(&r2->list, &tmp->list); -+ r2_next = tmp->list.next; -+ } else { -+ tmp = prefix_expr_alloc(&r1->location, r1, -+ prefix_len); -+ tmp->len = r2->len; -+ -+ list_replace(&r2->list, &tmp->list); -+ r2_next = tmp->list.next; -+ expr_free(r2); -+ } -+ -+next: -+ mpz_clear(p); -+ mpz_clear(range); -+ -+ r2 = list_entry(r2_next, typeof(*r2), list); -+ compound_expr_remove(start, r1); -+ -+ if (free_r1) -+ expr_free(r1); -+ } -+ -+ compound_expr_remove(set, start); -+ expr_free(start); -+ start = NULL; -+ } -+} -+ - void interval_map_decompose(struct expr *set) - { - struct expr **elements, **ranges; --- -2.31.1 - diff --git a/SOURCES/0022-netlink_delinearize-fix-decoding-of-concat-data-elem.patch b/SOURCES/0022-netlink_delinearize-fix-decoding-of-concat-data-elem.patch new file mode 100644 index 0000000..de7d6e7 --- /dev/null +++ b/SOURCES/0022-netlink_delinearize-fix-decoding-of-concat-data-elem.patch @@ -0,0 +1,53 @@ +From 9126153259c891ef55571f358d1e56b3f2274fc4 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Fri, 17 Feb 2023 17:52:16 +0100 +Subject: [PATCH] netlink_delinearize: fix decoding of concat data element + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit db59a5c1204c9 + +commit db59a5c1204c9246a82a115a8761f15809578479 +Author: Florian Westphal +Date: Mon Dec 12 11:04:34 2022 +0100 + + netlink_delinearize: fix decoding of concat data element + + Its possible to use update as follows: + + meta l4proto tcp update @pinned { ip saddr . ct original proto-src : ip daddr . ct original proto-dst } + + ... but when listing, only the first element of the concatenation is + shown. + + Check if the element size is too small and parse subsequent registers as + well. + + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + src/netlink_delinearize.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/src/netlink_delinearize.c b/src/netlink_delinearize.c +index 1d47c74..e9e0845 100644 +--- a/src/netlink_delinearize.c ++++ b/src/netlink_delinearize.c +@@ -1659,6 +1659,14 @@ static void netlink_parse_dynset(struct netlink_parse_ctx *ctx, + if (nftnl_expr_is_set(nle, NFTNL_EXPR_DYNSET_SREG_DATA)) { + sreg_data = netlink_parse_register(nle, NFTNL_EXPR_DYNSET_SREG_DATA); + expr_data = netlink_get_register(ctx, loc, sreg_data); ++ ++ if (expr_data->len < set->data->len) { ++ expr_free(expr_data); ++ expr_data = netlink_parse_concat_expr(ctx, loc, sreg_data, set->data->len); ++ if (expr_data == NULL) ++ netlink_error(ctx, loc, ++ "Could not parse dynset map data expressions"); ++ } + } + + if (expr_data != NULL) { +-- +2.41.0.rc1 + diff --git a/SOURCES/0022-parser_json-Support-ranges-in-concat-expressions.patch b/SOURCES/0022-parser_json-Support-ranges-in-concat-expressions.patch deleted file mode 100644 index 665aa6b..0000000 --- a/SOURCES/0022-parser_json-Support-ranges-in-concat-expressions.patch +++ /dev/null @@ -1,119 +0,0 @@ -From 68392da523f43b9ae09f824fa68b04b20c9c88f5 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 20 May 2020 11:12:37 +0200 -Subject: [PATCH] parser_json: Support ranges in concat expressions - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1805798 -Upstream Status: nftables commit 9475ca305a993 - -commit 9475ca305a993751b05cf26ef8e785a00de98b94 -Author: Phil Sutter -Date: Fri Mar 6 16:15:48 2020 +0100 - - parser_json: Support ranges in concat expressions - - Duplicate commit 8ac2f3b2fca38's changes to bison parser into JSON - parser by introducing a new context flag signalling we're parsing - concatenated expressions. - - Fixes: 8ac2f3b2fca38 ("src: Add support for concatenated set ranges") - Signed-off-by: Phil Sutter - Acked-by: Eric Garver ---- - src/parser_json.c | 51 +++++++++++++++++++++++++++-------------------- - 1 file changed, 29 insertions(+), 22 deletions(-) - -diff --git a/src/parser_json.c b/src/parser_json.c -index 031930e..c48faa8 100644 ---- a/src/parser_json.c -+++ b/src/parser_json.c -@@ -40,6 +40,7 @@ - #define CTX_F_MANGLE (1 << 5) - #define CTX_F_SES (1 << 6) /* set_elem_expr_stmt */ - #define CTX_F_MAP (1 << 7) /* LHS of map_expr */ -+#define CTX_F_CONCAT (1 << 8) /* inside concat_expr */ - - struct json_ctx { - struct input_descriptor indesc; -@@ -99,6 +100,7 @@ static struct expr *json_parse_primary_expr(struct json_ctx *ctx, json_t *root); - static struct expr *json_parse_set_rhs_expr(struct json_ctx *ctx, json_t *root); - static struct expr *json_parse_set_elem_expr_stmt(struct json_ctx *ctx, json_t *root); - static struct expr *json_parse_map_lhs_expr(struct json_ctx *ctx, json_t *root); -+static struct expr *json_parse_concat_elem_expr(struct json_ctx *ctx, json_t *root); - static struct stmt *json_parse_stmt(struct json_ctx *ctx, json_t *root); - - /* parsing helpers */ -@@ -1058,7 +1060,7 @@ static struct expr *json_parse_concat_expr(struct json_ctx *ctx, - } - - json_array_foreach(root, index, value) { -- tmp = json_parse_primary_expr(ctx, value); -+ tmp = json_parse_concat_elem_expr(ctx, value); - if (!tmp) { - json_error(ctx, "Parsing expr at index %zd failed.", index); - expr_free(expr); -@@ -1354,28 +1356,28 @@ static struct expr *json_parse_expr(struct json_ctx *ctx, json_t *root) - { "set", json_parse_set_expr, CTX_F_RHS | CTX_F_STMT }, /* allow this as stmt expr because that allows set references */ - { "map", json_parse_map_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS }, - /* below three are multiton_rhs_expr */ -- { "prefix", json_parse_prefix_expr, CTX_F_RHS | CTX_F_STMT }, -- { "range", json_parse_range_expr, CTX_F_RHS | CTX_F_STMT }, -- { "payload", json_parse_payload_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_MANGLE | CTX_F_SES | CTX_F_MAP }, -- { "exthdr", json_parse_exthdr_expr, CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP }, -- { "tcp option", json_parse_tcp_option_expr, CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_MANGLE | CTX_F_SES }, -- { "ip option", json_parse_ip_option_expr, CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_MANGLE | CTX_F_SES }, -- { "meta", json_parse_meta_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_MANGLE | CTX_F_SES | CTX_F_MAP }, -- { "osf", json_parse_osf_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_MAP }, -- { "ipsec", json_parse_xfrm_expr, CTX_F_PRIMARY | CTX_F_MAP }, -- { "socket", json_parse_socket_expr, CTX_F_PRIMARY }, -- { "rt", json_parse_rt_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP }, -- { "ct", json_parse_ct_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_MANGLE | CTX_F_SES | CTX_F_MAP }, -- { "numgen", json_parse_numgen_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP }, -+ { "prefix", json_parse_prefix_expr, CTX_F_RHS | CTX_F_STMT | CTX_F_CONCAT }, -+ { "range", json_parse_range_expr, CTX_F_RHS | CTX_F_STMT | CTX_F_CONCAT }, -+ { "payload", json_parse_payload_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_MANGLE | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, -+ { "exthdr", json_parse_exthdr_expr, CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, -+ { "tcp option", json_parse_tcp_option_expr, CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_MANGLE | CTX_F_SES | CTX_F_CONCAT }, -+ { "ip option", json_parse_ip_option_expr, CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_MANGLE | CTX_F_SES | CTX_F_CONCAT }, -+ { "meta", json_parse_meta_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_MANGLE | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, -+ { "osf", json_parse_osf_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_MAP | CTX_F_CONCAT }, -+ { "ipsec", json_parse_xfrm_expr, CTX_F_PRIMARY | CTX_F_MAP | CTX_F_CONCAT }, -+ { "socket", json_parse_socket_expr, CTX_F_PRIMARY | CTX_F_CONCAT }, -+ { "rt", json_parse_rt_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, -+ { "ct", json_parse_ct_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_MANGLE | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, -+ { "numgen", json_parse_numgen_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, - /* below two are hash expr */ -- { "jhash", json_parse_hash_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP }, -- { "symhash", json_parse_hash_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP }, -- { "fib", json_parse_fib_expr, CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP }, -- { "|", json_parse_binop_expr, CTX_F_RHS | CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP }, -- { "^", json_parse_binop_expr, CTX_F_RHS | CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP }, -- { "&", json_parse_binop_expr, CTX_F_RHS | CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP }, -- { ">>", json_parse_binop_expr, CTX_F_RHS | CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP }, -- { "<<", json_parse_binop_expr, CTX_F_RHS | CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP }, -+ { "jhash", json_parse_hash_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, -+ { "symhash", json_parse_hash_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, -+ { "fib", json_parse_fib_expr, CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, -+ { "|", json_parse_binop_expr, CTX_F_RHS | CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, -+ { "^", json_parse_binop_expr, CTX_F_RHS | CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, -+ { "&", json_parse_binop_expr, CTX_F_RHS | CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, -+ { ">>", json_parse_binop_expr, CTX_F_RHS | CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, -+ { "<<", json_parse_binop_expr, CTX_F_RHS | CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, - { "accept", json_parse_verdict_expr, CTX_F_RHS | CTX_F_SET_RHS }, - { "drop", json_parse_verdict_expr, CTX_F_RHS | CTX_F_SET_RHS }, - { "continue", json_parse_verdict_expr, CTX_F_RHS | CTX_F_SET_RHS }, -@@ -1500,6 +1502,11 @@ static struct expr *json_parse_map_lhs_expr(struct json_ctx *ctx, json_t *root) - return json_parse_flagged_expr(ctx, CTX_F_MAP, root); - } - -+static struct expr *json_parse_concat_elem_expr(struct json_ctx *ctx, json_t *root) -+{ -+ return json_parse_flagged_expr(ctx, CTX_F_CONCAT, root); -+} -+ - static struct expr *json_parse_dtype_expr(struct json_ctx *ctx, json_t *root) - { - if (json_is_string(root)) { --- -2.31.1 - diff --git a/SOURCES/0023-doc-Document-notrack-statement.patch b/SOURCES/0023-doc-Document-notrack-statement.patch deleted file mode 100644 index d0aa129..0000000 --- a/SOURCES/0023-doc-Document-notrack-statement.patch +++ /dev/null @@ -1,51 +0,0 @@ -From f7a31d5c3277b29f104fd8ff48df24c8bc790f19 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 24 Jun 2020 18:46:39 +0200 -Subject: [PATCH] doc: Document notrack statement - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1841292 -Upstream Status: nftables commit f16fbe76f62dc - -commit f16fbe76f62dcb9f7395d1837ad2d056463ba55f -Author: Phil Sutter -Date: Mon Jun 22 15:07:40 2020 +0200 - - doc: Document notrack statement - - Merely a stub, but better to mention it explicitly instead of having it - appear in synproxy examples and letting users guess as to what it does. - - Signed-off-by: Phil Sutter - Reviewed-by: Florian Westphal ---- - doc/statements.txt | 14 ++++++++++++++ - 1 file changed, 14 insertions(+) - -diff --git a/doc/statements.txt b/doc/statements.txt -index 3b82436..749533a 100644 ---- a/doc/statements.txt -+++ b/doc/statements.txt -@@ -262,6 +262,20 @@ table inet raw { - ct event set new,related,destroy - -------------------------------------- - -+NOTRACK STATEMENT -+~~~~~~~~~~~~~~~~~ -+The notrack statement allows to disable connection tracking for certain -+packets. -+ -+[verse] -+*notrack* -+ -+Note that for this statement to be effective, it has to be applied to packets -+before a conntrack lookup happens. Therefore, it needs to sit in a chain with -+either prerouting or output hook and a hook priority of -300 or less. -+ -+See SYNPROXY STATEMENT for an example usage. -+ - META STATEMENT - ~~~~~~~~~~~~~~ - A meta statement sets the value of a meta expression. The existing meta fields --- -2.31.1 - diff --git a/SOURCES/0023-netlink_linearize-fix-timeout-with-map-updates.patch b/SOURCES/0023-netlink_linearize-fix-timeout-with-map-updates.patch new file mode 100644 index 0000000..28725cc --- /dev/null +++ b/SOURCES/0023-netlink_linearize-fix-timeout-with-map-updates.patch @@ -0,0 +1,66 @@ +From d6e25e9fb09649963852ba79a249efeb067c6db4 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Fri, 17 Feb 2023 17:52:16 +0100 +Subject: [PATCH] netlink_linearize: fix timeout with map updates + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 284c038ef4c69 + +commit 284c038ef4c69d042ef91272d90c143019ecea1f +Author: Florian Westphal +Date: Mon Dec 12 11:04:35 2022 +0100 + + netlink_linearize: fix timeout with map updates + + Map updates can use timeouts, just like with sets, but the + linearization step did not pass this info to the kernel. + + meta l4proto tcp update @pinned { ip saddr . ct original proto-src timeout 90s : ip daddr . tcp dport + + Listing this won't show the "timeout 90s" because kernel never saw it to + begin with. + + Also update evaluation step to reject a timeout that was set on + the data part: Timeouts are only allowed for the key-value pair + as a whole. + + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + src/evaluate.c | 3 +++ + src/netlink_linearize.c | 4 ++++ + 2 files changed, 7 insertions(+) + +diff --git a/src/evaluate.c b/src/evaluate.c +index 7f81411..6d0a0f5 100644 +--- a/src/evaluate.c ++++ b/src/evaluate.c +@@ -3858,6 +3858,9 @@ static int stmt_evaluate_map(struct eval_ctx *ctx, struct stmt *stmt) + if (stmt->map.data->comment != NULL) + return expr_error(ctx->msgs, stmt->map.data, + "Data expression comments are not supported"); ++ if (stmt->map.data->timeout > 0) ++ return expr_error(ctx->msgs, stmt->map.data, ++ "Data expression timeouts are not supported"); + + list_for_each_entry(this, &stmt->map.stmt_list, list) { + if (stmt_evaluate(ctx, this) < 0) +diff --git a/src/netlink_linearize.c b/src/netlink_linearize.c +index c8bbcb7..6de0a96 100644 +--- a/src/netlink_linearize.c ++++ b/src/netlink_linearize.c +@@ -1520,6 +1520,10 @@ static void netlink_gen_map_stmt(struct netlink_linearize_ctx *ctx, + nftnl_expr_set_u32(nle, NFTNL_EXPR_DYNSET_SET_ID, set->handle.set_id); + nft_rule_add_expr(ctx, nle, &stmt->location); + ++ if (stmt->map.key->timeout > 0) ++ nftnl_expr_set_u64(nle, NFTNL_EXPR_DYNSET_TIMEOUT, ++ stmt->map.key->timeout); ++ + list_for_each_entry(this, &stmt->map.stmt_list, list) + num_stmts++; + +-- +2.41.0.rc1 + diff --git a/SOURCES/0024-JSON-Improve-performance-of-json_events_cb.patch b/SOURCES/0024-JSON-Improve-performance-of-json_events_cb.patch deleted file mode 100644 index baa1dca..0000000 --- a/SOURCES/0024-JSON-Improve-performance-of-json_events_cb.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 58d8baa70172bb9862276ac5f542248c88d3faf4 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 24 Jun 2020 18:48:14 +0200 -Subject: [PATCH] JSON: Improve performance of json_events_cb() - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1835300 -Upstream Status: nftables commit c96c7da272e33 - -commit c96c7da272e33a34770c4de4e3e50f7ed264672e -Author: Phil Sutter -Date: Wed May 13 16:29:51 2020 +0200 - - JSON: Improve performance of json_events_cb() - - The function tries to insert handles into JSON input for echo option. - Yet there may be nothing to do if the given netlink message doesn't - contain a handle, e.g. if it is an 'add element' command. Calling - seqnum_to_json() is pointless overhead in that case, and if input is - large this overhead is significant. Better wait with that call until - after checking if the message is relevant at all. - - Signed-off-by: Phil Sutter - Acked-by: Eric Garver ---- - src/parser_json.c | 9 ++++++--- - 1 file changed, 6 insertions(+), 3 deletions(-) - -diff --git a/src/parser_json.c b/src/parser_json.c -index c48faa8..ce8e566 100644 ---- a/src/parser_json.c -+++ b/src/parser_json.c -@@ -3845,12 +3845,15 @@ static uint64_t handle_from_nlmsg(const struct nlmsghdr *nlh) - } - int json_events_cb(const struct nlmsghdr *nlh, struct netlink_mon_handler *monh) - { -- json_t *tmp, *json = seqnum_to_json(nlh->nlmsg_seq); - uint64_t handle = handle_from_nlmsg(nlh); -+ json_t *tmp, *json; - void *iter; - -- /* might be anonymous set, ignore message */ -- if (!json || !handle) -+ if (!handle) -+ return MNL_CB_OK; -+ -+ json = seqnum_to_json(nlh->nlmsg_seq); -+ if (!json) - return MNL_CB_OK; - - tmp = json_object_get(json, "add"); --- -2.31.1 - diff --git a/SOURCES/0024-tests-add-a-test-case-for-map-update-from-packet-pat.patch b/SOURCES/0024-tests-add-a-test-case-for-map-update-from-packet-pat.patch new file mode 100644 index 0000000..75ffdd2 --- /dev/null +++ b/SOURCES/0024-tests-add-a-test-case-for-map-update-from-packet-pat.patch @@ -0,0 +1,73 @@ +From 254a7ef45c890e297d9390a6f20b9132ad17c5d1 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Fri, 17 Feb 2023 17:52:16 +0100 +Subject: [PATCH] tests: add a test case for map update from packet path with + concat + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit b8e1940aa1907 + +commit b8e1940aa190773b23b3ee9349beb20c31f42bdb +Author: Florian Westphal +Date: Mon Dec 12 11:04:36 2022 +0100 + + tests: add a test case for map update from packet path with concat + + add a second test case for map updates, this time with both + a timeout and a data element that consists of a concatenation. + + Signed-off-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + .../maps/dumps/typeof_maps_concat_update_0.nft | 12 ++++++++++++ + .../testcases/maps/typeof_maps_concat_update_0 | 18 ++++++++++++++++++ + 2 files changed, 30 insertions(+) + create mode 100644 tests/shell/testcases/maps/dumps/typeof_maps_concat_update_0.nft + create mode 100755 tests/shell/testcases/maps/typeof_maps_concat_update_0 + +diff --git a/tests/shell/testcases/maps/dumps/typeof_maps_concat_update_0.nft b/tests/shell/testcases/maps/dumps/typeof_maps_concat_update_0.nft +new file mode 100644 +index 0000000..d91b795 +--- /dev/null ++++ b/tests/shell/testcases/maps/dumps/typeof_maps_concat_update_0.nft +@@ -0,0 +1,12 @@ ++table ip foo { ++ map pinned { ++ typeof ip daddr . tcp dport : ip daddr . tcp dport ++ size 65535 ++ flags dynamic,timeout ++ timeout 6m ++ } ++ ++ chain pr { ++ update @pinned { ip saddr . ct original proto-dst timeout 1m30s : ip daddr . tcp dport } ++ } ++} +diff --git a/tests/shell/testcases/maps/typeof_maps_concat_update_0 b/tests/shell/testcases/maps/typeof_maps_concat_update_0 +new file mode 100755 +index 0000000..645ae14 +--- /dev/null ++++ b/tests/shell/testcases/maps/typeof_maps_concat_update_0 +@@ -0,0 +1,18 @@ ++#!/bin/bash ++ ++# check update statement does print both concatentations (key and data). ++ ++EXPECTED="table ip foo { ++ map pinned { ++ typeof ip daddr . tcp dport : ip daddr . tcp dport ++ size 65535 ++ flags dynamic,timeout ++ timeout 6m ++ } ++ chain pr { ++ meta l4proto tcp update @pinned { ip saddr . ct original proto-dst timeout 1m30s : ip daddr . tcp dport } ++ } ++}" ++ ++set -e ++$NFT -f - <<< $EXPECTED +-- +2.41.0.rc1 + diff --git a/SOURCES/0025-owner-Fix-potential-array-out-of-bounds-access.patch b/SOURCES/0025-owner-Fix-potential-array-out-of-bounds-access.patch new file mode 100644 index 0000000..5b86c4e --- /dev/null +++ b/SOURCES/0025-owner-Fix-potential-array-out-of-bounds-access.patch @@ -0,0 +1,44 @@ +From dbb1bcfbe480866f06977b2648b0a1595091b2b9 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Tue, 21 Feb 2023 19:50:40 +0100 +Subject: [PATCH] owner: Fix potential array out of bounds access + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 9967911e3dabb + +commit 9967911e3dabb32901617e81e56602af3b37287f +Author: Pablo Neira Ayuso +Date: Wed Dec 21 17:37:46 2022 +0100 + + owner: Fix potential array out of bounds access + + If the link target length exceeds 'sizeof(tmp)' bytes, readlink() will + return 'sizeof(tmp)'. Using this value as index is illegal. + + Original update from Phil, for the conntrack-tools tree, which also has + a copy of this function. + + Fixes: 6d085b22a8b5 ("table: support for the table owner flag") + Signed-off-by: Pablo Neira Ayuso + +Signed-off-by: Phil Sutter +--- + src/owner.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/owner.c b/src/owner.c +index 2d98a2e..20bed38 100644 +--- a/src/owner.c ++++ b/src/owner.c +@@ -66,7 +66,7 @@ static char *portid2name(pid_t pid, uint32_t portid, unsigned long inode) + continue; + + rl = readlink(procname, tmp, sizeof(tmp)); +- if (rl <= 0 || rl > (ssize_t)sizeof(tmp)) ++ if (rl <= 0 || rl >= (ssize_t)sizeof(tmp)) + continue; + + tmp[rl] = 0; +-- +2.41.0.rc1 + diff --git a/SOURCES/0025-segtree-Fix-missing-expires-value-in-prefixes.patch b/SOURCES/0025-segtree-Fix-missing-expires-value-in-prefixes.patch deleted file mode 100644 index 06b95e6..0000000 --- a/SOURCES/0025-segtree-Fix-missing-expires-value-in-prefixes.patch +++ /dev/null @@ -1,42 +0,0 @@ -From ab62f33df5ef33f6eff8d88d9475a01822a2f625 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Tue, 30 Jun 2020 16:20:22 +0200 -Subject: [PATCH] segtree: Fix missing expires value in prefixes - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1832235 -Upstream Status: nftables commit 60ba9c22fecc0 - -commit 60ba9c22fecc0ca9bb2a61f6ad39bceed1aee38f -Author: Phil Sutter -Date: Tue Apr 28 20:54:03 2020 +0200 - - segtree: Fix missing expires value in prefixes - - This probable copy'n'paste bug prevented 'expiration' field from being - populated when turning a range into a prefix in - interval_map_decompose(). Consequently, interval sets with timeout did - print expiry value for ranges (such as 10.0.0.1-10.0.0.5) but not - prefixes (10.0.0.0/8, for instance). - - Fixes: bb0e6d8a2851b ("segtree: incorrect handling of comments and timeouts with mapping") - Signed-off-by: Phil Sutter ---- - src/segtree.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/segtree.c b/src/segtree.c -index e859f84..1ba4363 100644 ---- a/src/segtree.c -+++ b/src/segtree.c -@@ -1086,7 +1086,7 @@ void interval_map_decompose(struct expr *set) - prefix->comment = xstrdup(low->comment); - if (low->timeout) - prefix->timeout = low->timeout; -- if (low->left->expiration) -+ if (low->expiration) - prefix->expiration = low->expiration; - } - --- -2.31.1 - diff --git a/SOURCES/0026-mnl-dump_nf_hooks-leaks-memory-in-error-path.patch b/SOURCES/0026-mnl-dump_nf_hooks-leaks-memory-in-error-path.patch new file mode 100644 index 0000000..2d7936d --- /dev/null +++ b/SOURCES/0026-mnl-dump_nf_hooks-leaks-memory-in-error-path.patch @@ -0,0 +1,57 @@ +From b5fd150a3fbad94381276bedc816d4a6fdecfaf9 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Tue, 21 Feb 2023 19:50:41 +0100 +Subject: [PATCH] mnl: dump_nf_hooks() leaks memory in error path + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit ef66f321e49b3 + +commit ef66f321e49b337c7e678bb90d6acb94f331dfc4 +Author: Phil Sutter +Date: Wed Jan 11 12:28:15 2023 +0100 + + mnl: dump_nf_hooks() leaks memory in error path + + Have to free the basehook object before returning to caller. + + Fixes: 4694f7230195b ("src: add support for base hook dumping") + Signed-off-by: Phil Sutter + +Signed-off-by: Phil Sutter +--- + src/mnl.c | 11 +++++++++-- + 1 file changed, 9 insertions(+), 2 deletions(-) + +diff --git a/src/mnl.c b/src/mnl.c +index 7dd77be..269d3f1 100644 +--- a/src/mnl.c ++++ b/src/mnl.c +@@ -2211,16 +2211,23 @@ static int dump_nf_hooks(const struct nlmsghdr *nlh, void *_data) + struct nlattr *nested[NFNLA_HOOK_INFO_MAX + 1] = {}; + uint32_t type; + +- if (mnl_attr_parse_nested(tb[NFNLA_HOOK_CHAIN_INFO], dump_nf_chain_info_cb, nested) < 0) ++ if (mnl_attr_parse_nested(tb[NFNLA_HOOK_CHAIN_INFO], ++ dump_nf_chain_info_cb, nested) < 0) { ++ basehook_free(hook); + return -1; ++ } + + type = ntohl(mnl_attr_get_u32(nested[NFNLA_HOOK_INFO_TYPE])); + if (type == NFNL_HOOK_TYPE_NFTABLES) { + struct nlattr *info[NFNLA_CHAIN_MAX + 1] = {}; + const char *tablename, *chainname; + +- if (mnl_attr_parse_nested(nested[NFNLA_HOOK_INFO_DESC], dump_nf_attr_chain_cb, info) < 0) ++ if (mnl_attr_parse_nested(nested[NFNLA_HOOK_INFO_DESC], ++ dump_nf_attr_chain_cb, ++ info) < 0) { ++ basehook_free(hook); + return -1; ++ } + + tablename = mnl_attr_get_str(info[NFNLA_CHAIN_TABLE]); + chainname = mnl_attr_get_str(info[NFNLA_CHAIN_NAME]); +-- +2.41.0.rc1 + diff --git a/SOURCES/0026-segtree-Use-expr_clone-in-get_set_interval_.patch b/SOURCES/0026-segtree-Use-expr_clone-in-get_set_interval_.patch deleted file mode 100644 index f54752a..0000000 --- a/SOURCES/0026-segtree-Use-expr_clone-in-get_set_interval_.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 119fbcbd8c37aac314d6ffa6225ab24ee4b0e31e Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Tue, 30 Jun 2020 16:20:23 +0200 -Subject: [PATCH] segtree: Use expr_clone in get_set_interval_*() - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1832235 -Upstream Status: nftables commit a2eedcc89d2ed - -commit a2eedcc89d2ed40411c26d53579300c4f1ccb83d -Author: Phil Sutter -Date: Thu Apr 30 13:45:40 2020 +0200 - - segtree: Use expr_clone in get_set_interval_*() - - Both functions perform interval set lookups with either start and end or - only start values as input. Interestingly, in practice they either see - values which are not contained or which match an existing range exactly. - - Make use of the above and just return a clone of the matching entry - instead of creating a new one based on input data. - - Signed-off-by: Phil Sutter ---- - src/segtree.c | 8 ++------ - 1 file changed, 2 insertions(+), 6 deletions(-) - -diff --git a/src/segtree.c b/src/segtree.c -index 1ba4363..dc4db6b 100644 ---- a/src/segtree.c -+++ b/src/segtree.c -@@ -695,9 +695,7 @@ static struct expr *get_set_interval_find(const struct table *table, - range_expr_value_high(high, i); - if (mpz_cmp(left->key->value, low) >= 0 && - mpz_cmp(right->key->value, high) <= 0) { -- range = range_expr_alloc(&internal_location, -- expr_clone(left->key), -- expr_clone(right->key)); -+ range = expr_clone(i->key); - goto out; - } - break; -@@ -729,9 +727,7 @@ static struct expr *get_set_interval_end(const struct table *table, - case EXPR_RANGE: - range_expr_value_low(low, i); - if (mpz_cmp(low, left->key->value) == 0) { -- range = range_expr_alloc(&internal_location, -- expr_clone(left->key), -- expr_clone(i->key->right)); -+ range = expr_clone(i->key); - goto out; - } - break; --- -2.31.1 - diff --git a/SOURCES/0027-meta-parse_iso_date-returns-boolean.patch b/SOURCES/0027-meta-parse_iso_date-returns-boolean.patch new file mode 100644 index 0000000..32fee0e --- /dev/null +++ b/SOURCES/0027-meta-parse_iso_date-returns-boolean.patch @@ -0,0 +1,41 @@ +From f5f1b17763264d88593eba175438818cf6533471 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Tue, 21 Feb 2023 19:50:41 +0100 +Subject: [PATCH] meta: parse_iso_date() returns boolean + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit db6e97bd667bf + +commit db6e97bd667bf205cee22049f9d0fd6550cb43a7 +Author: Phil Sutter +Date: Wed Jan 11 11:26:41 2023 +0100 + + meta: parse_iso_date() returns boolean + + Returning ts if 'ts == (time_t) -1' signals success to caller despite + failure. + + Fixes: 4460b839b945a ("meta: fix compiler warning in date_type_parse()") + Signed-off-by: Phil Sutter + +Signed-off-by: Phil Sutter +--- + src/meta.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/meta.c b/src/meta.c +index 80ace25..73bd1c4 100644 +--- a/src/meta.c ++++ b/src/meta.c +@@ -433,7 +433,7 @@ success: + cur_tm = localtime(&ts); + + if (ts == (time_t) -1 || cur_tm == NULL) +- return ts; ++ return false; + + /* Substract tm_gmtoff to get the current time */ + *tstamp = ts - cur_tm->tm_gmtoff; +-- +2.41.0.rc1 + diff --git a/SOURCES/0027-segtree-Merge-get_set_interval_find-and-get_set_inte.patch b/SOURCES/0027-segtree-Merge-get_set_interval_find-and-get_set_inte.patch deleted file mode 100644 index 2506813..0000000 --- a/SOURCES/0027-segtree-Merge-get_set_interval_find-and-get_set_inte.patch +++ /dev/null @@ -1,131 +0,0 @@ -From 40cdcccf0fc6f4d0d4c2248d4bd9bf3193a922e9 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Tue, 30 Jun 2020 16:20:23 +0200 -Subject: [PATCH] segtree: Merge get_set_interval_find() and - get_set_interval_end() - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1832235 -Upstream Status: nftables commit f21e73d6700b8 - -commit f21e73d6700b873eb1a295f43bbad9caaca577e2 -Author: Phil Sutter -Date: Thu Apr 30 13:57:35 2020 +0200 - - segtree: Merge get_set_interval_find() and get_set_interval_end() - - Both functions were very similar already. Under the assumption that they - will always either see a range (or start of) that matches exactly or not - at all, reduce complexity and make get_set_interval_find() accept NULL - (left or) right values. This way it becomes a full replacement for - get_set_interval_end(). - - Signed-off-by: Phil Sutter ---- - src/segtree.c | 63 +++++++++++++-------------------------------------- - 1 file changed, 16 insertions(+), 47 deletions(-) - -diff --git a/src/segtree.c b/src/segtree.c -index dc4db6b..6e1f696 100644 ---- a/src/segtree.c -+++ b/src/segtree.c -@@ -681,63 +681,31 @@ static struct expr *get_set_interval_find(const struct table *table, - { - struct expr *range = NULL; - struct set *set; -- mpz_t low, high; - struct expr *i; -+ mpz_t val; - - set = set_lookup(table, set_name); -- mpz_init2(low, set->key->len); -- mpz_init2(high, set->key->len); -+ mpz_init2(val, set->key->len); - - list_for_each_entry(i, &set->init->expressions, list) { - switch (i->key->etype) { - case EXPR_RANGE: -- range_expr_value_low(low, i); -- range_expr_value_high(high, i); -- if (mpz_cmp(left->key->value, low) >= 0 && -- mpz_cmp(right->key->value, high) <= 0) { -- range = expr_clone(i->key); -- goto out; -- } -- break; -- default: -- break; -- } -- } --out: -- mpz_clear(low); -- mpz_clear(high); -- -- return range; --} -- --static struct expr *get_set_interval_end(const struct table *table, -- const char *set_name, -- struct expr *left) --{ -- struct expr *i, *range = NULL; -- struct set *set; -- mpz_t low, high; -+ range_expr_value_low(val, i); -+ if (left && mpz_cmp(left->key->value, val)) -+ break; - -- set = set_lookup(table, set_name); -- mpz_init2(low, set->key->len); -- mpz_init2(high, set->key->len); -+ range_expr_value_high(val, i); -+ if (right && mpz_cmp(right->key->value, val)) -+ break; - -- list_for_each_entry(i, &set->init->expressions, list) { -- switch (i->key->etype) { -- case EXPR_RANGE: -- range_expr_value_low(low, i); -- if (mpz_cmp(low, left->key->value) == 0) { -- range = expr_clone(i->key); -- goto out; -- } -- break; -+ range = expr_clone(i->key); -+ goto out; - default: - break; - } - } - out: -- mpz_clear(low); -- mpz_clear(high); -+ mpz_clear(val); - - return range; - } -@@ -767,9 +735,9 @@ int get_set_decompose(struct table *table, struct set *set) - left = NULL; - } else { - if (left) { -- range = get_set_interval_end(table, -- set->handle.set.name, -- left); -+ range = get_set_interval_find(table, -+ set->handle.set.name, -+ left, NULL); - if (range) - compound_expr_add(new_init, range); - else -@@ -780,7 +748,8 @@ int get_set_decompose(struct table *table, struct set *set) - } - } - if (left) { -- range = get_set_interval_end(table, set->handle.set.name, left); -+ range = get_set_interval_find(table, set->handle.set.name, -+ left, NULL); - if (range) - compound_expr_add(new_init, range); - else --- -2.31.1 - diff --git a/SOURCES/0028-netlink-Fix-for-potential-NULL-pointer-deref.patch b/SOURCES/0028-netlink-Fix-for-potential-NULL-pointer-deref.patch new file mode 100644 index 0000000..480aa54 --- /dev/null +++ b/SOURCES/0028-netlink-Fix-for-potential-NULL-pointer-deref.patch @@ -0,0 +1,44 @@ +From 3fbbb074303ec3dafd97fcdeaa0a292068c23140 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Tue, 21 Feb 2023 19:50:41 +0100 +Subject: [PATCH] netlink: Fix for potential NULL-pointer deref + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 927d5674e7bf6 + +commit 927d5674e7bf656428f97c54c9171006e8c3c75e +Author: Phil Sutter +Date: Tue Jan 10 22:36:58 2023 +0100 + + netlink: Fix for potential NULL-pointer deref + + If memory allocation fails, calloc() returns NULL which was not checked + for. The code seems to expect zero array size though, so simply + replacing this call by one of the x*calloc() ones won't work. So guard + the call also by a check for 'len'. + + Fixes: db0697ce7f602 ("src: support for flowtable listing") + Signed-off-by: Phil Sutter + +Signed-off-by: Phil Sutter +--- + src/netlink.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/src/netlink.c b/src/netlink.c +index 799cf9b..dee1732 100644 +--- a/src/netlink.c ++++ b/src/netlink.c +@@ -1700,7 +1700,8 @@ netlink_delinearize_flowtable(struct netlink_ctx *ctx, + while (dev_array[len]) + len++; + +- flowtable->dev_array = calloc(1, len * sizeof(char *)); ++ if (len) ++ flowtable->dev_array = xmalloc(len * sizeof(char *)); + for (i = 0; i < len; i++) + flowtable->dev_array[i] = xstrdup(dev_array[i]); + +-- +2.41.0.rc1 + diff --git a/SOURCES/0028-tests-0034get_element_0-do-not-discard-stderr.patch b/SOURCES/0028-tests-0034get_element_0-do-not-discard-stderr.patch deleted file mode 100644 index b8615d6..0000000 --- a/SOURCES/0028-tests-0034get_element_0-do-not-discard-stderr.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 4337d4eafe66b594b56b43261c8742d6b65d5ee8 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Tue, 30 Jun 2020 16:20:23 +0200 -Subject: [PATCH] tests: 0034get_element_0: do not discard stderr - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1832235 -Upstream Status: nftables commit ff29e6c09aed9 - -commit ff29e6c09aed922a42e0e0551c34dd5d87067512 -Author: Florian Westphal -Date: Sat Feb 22 00:02:25 2020 +0100 - - tests: 0034get_element_0: do not discard stderr - - run_tests.sh alreadty discards stderr by default, but will show it in - case the test script is run directly (passed as argument). - - Discarding stderr also in the script prevents one from seeing - BUG() assertions and the like. - - Signed-off-by: Florian Westphal ---- - tests/shell/testcases/sets/0034get_element_0 | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tests/shell/testcases/sets/0034get_element_0 b/tests/shell/testcases/sets/0034get_element_0 -index c7e7298..e23dbda 100755 ---- a/tests/shell/testcases/sets/0034get_element_0 -+++ b/tests/shell/testcases/sets/0034get_element_0 -@@ -3,7 +3,7 @@ - RC=0 - - check() { # (elems, expected) -- out=$($NFT get element ip t s "{ $1 }" 2>/dev/null) -+ out=$($NFT get element ip t s "{ $1 }") - out=$(grep "elements =" <<< "$out") - out="${out#* \{ }" - out="${out% \}}" --- -2.31.1 - diff --git a/SOURCES/0029-optimize-Do-not-return-garbage-from-stack.patch b/SOURCES/0029-optimize-Do-not-return-garbage-from-stack.patch new file mode 100644 index 0000000..bd97af1 --- /dev/null +++ b/SOURCES/0029-optimize-Do-not-return-garbage-from-stack.patch @@ -0,0 +1,42 @@ +From 8bdba078567b879054880ec957a78842c5a18848 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Tue, 21 Feb 2023 19:50:41 +0100 +Subject: [PATCH] optimize: Do not return garbage from stack + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit d4d47e5bdf943 + +commit d4d47e5bdf943be494aeb5d5a29b8f5212acbddf +Author: Phil Sutter +Date: Fri Jan 13 17:09:53 2023 +0100 + + optimize: Do not return garbage from stack + + If input does not contain a single 'add' command (unusual, but + possible), 'ret' value was not initialized by nft_optimize() before + returning its value. + + Fixes: fb298877ece27 ("src: add ruleset optimization infrastructure") + Signed-off-by: Phil Sutter + +Signed-off-by: Phil Sutter +--- + src/optimize.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/optimize.c b/src/optimize.c +index 3a3049d..6514cbb 100644 +--- a/src/optimize.c ++++ b/src/optimize.c +@@ -1017,7 +1017,7 @@ static int cmd_optimize(struct nft_ctx *nft, struct cmd *cmd) + int nft_optimize(struct nft_ctx *nft, struct list_head *cmds) + { + struct cmd *cmd; +- int ret; ++ int ret = 0; + + list_for_each_entry(cmd, cmds, list) { + switch (cmd->op) { +-- +2.41.0.rc1 + diff --git a/SOURCES/0029-segtree-Fix-get-element-command-with-prefixes.patch b/SOURCES/0029-segtree-Fix-get-element-command-with-prefixes.patch deleted file mode 100644 index 7d699a6..0000000 --- a/SOURCES/0029-segtree-Fix-get-element-command-with-prefixes.patch +++ /dev/null @@ -1,135 +0,0 @@ -From 3a2016f539e46183965bada40946e259c33158d9 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Tue, 30 Jun 2020 16:20:23 +0200 -Subject: [PATCH] segtree: Fix get element command with prefixes - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1832235 -Upstream Status: nftables commit 506fb113f7ca4 - -commit 506fb113f7ca4fbb3d6da09ef6f9dc2b31f54a1f -Author: Phil Sutter -Date: Thu Apr 30 14:02:44 2020 +0200 - - segtree: Fix get element command with prefixes - - Code wasn't aware of prefix elements in interval sets. With previous - changes in place, they merely need to be accepted in - get_set_interval_find() - value comparison and expression duplication is - identical to ranges. - - Extend sets/0034get_element_0 test to cover prefixes as well. While - being at it, also cover concatenated ranges. - - Signed-off-by: Phil Sutter ---- - src/segtree.c | 1 + - tests/shell/testcases/sets/0034get_element_0 | 62 ++++++++++++++------ - 2 files changed, 45 insertions(+), 18 deletions(-) - -diff --git a/src/segtree.c b/src/segtree.c -index 6e1f696..073c6ec 100644 ---- a/src/segtree.c -+++ b/src/segtree.c -@@ -689,6 +689,7 @@ static struct expr *get_set_interval_find(const struct table *table, - - list_for_each_entry(i, &set->init->expressions, list) { - switch (i->key->etype) { -+ case EXPR_PREFIX: - case EXPR_RANGE: - range_expr_value_low(val, i); - if (left && mpz_cmp(left->key->value, val)) -diff --git a/tests/shell/testcases/sets/0034get_element_0 b/tests/shell/testcases/sets/0034get_element_0 -index e23dbda..3343529 100755 ---- a/tests/shell/testcases/sets/0034get_element_0 -+++ b/tests/shell/testcases/sets/0034get_element_0 -@@ -2,43 +2,69 @@ - - RC=0 - --check() { # (elems, expected) -- out=$($NFT get element ip t s "{ $1 }") -+check() { # (set, elems, expected) -+ out=$($NFT get element ip t $1 "{ $2 }") - out=$(grep "elements =" <<< "$out") - out="${out#* \{ }" - out="${out% \}}" -- [[ "$out" == "$2" ]] && return -- echo "ERROR: asked for '$1', expecting '$2' but got '$out'" -+ [[ "$out" == "$3" ]] && return -+ echo "ERROR: asked for '$2' in set $1, expecting '$3' but got '$out'" - ((RC++)) - } - - RULESET="add table ip t - add set ip t s { type inet_service; flags interval; } - add element ip t s { 10, 20-30, 40, 50-60 } -+add set ip t ips { type ipv4_addr; flags interval; } -+add element ip t ips { 10.0.0.1, 10.0.0.5-10.0.0.8 } -+add element ip t ips { 10.0.0.128/25, 10.0.1.0/24, 10.0.2.3-10.0.2.12 } -+add set ip t cs { type ipv4_addr . inet_service; flags interval; } -+add element ip t cs { 10.0.0.1 . 22, 10.1.0.0/16 . 1-1024 } -+add element ip t cs { 10.2.0.1-10.2.0.8 . 1024-65535 } - " - - $NFT -f - <<< "$RULESET" - - # simple cases, (non-)existing values and ranges --check 10 10 --check 11 "" --check 20-30 20-30 --check 15-18 "" -+check s 10 10 -+check s 11 "" -+check s 20-30 20-30 -+check s 15-18 "" - - # multiple single elements, ranges smaller than present --check "10, 40" "10, 40" --check "22-24, 26-28" "20-30, 20-30" --check 21-29 20-30 -+check s "10, 40" "10, 40" -+check s "22-24, 26-28" "20-30, 20-30" -+check s 21-29 20-30 - - # mixed single elements and ranges --check "10, 20" "10, 20-30" --check "10, 22" "10, 20-30" --check "10, 22-24" "10, 20-30" -+check s "10, 20" "10, 20-30" -+check s "10, 22" "10, 20-30" -+check s "10, 22-24" "10, 20-30" - - # non-existing ranges matching elements --check 10-40 "" --check 10-20 "" --check 10-25 "" --check 25-55 "" -+check s 10-40 "" -+check s 10-20 "" -+check s 10-25 "" -+check s 25-55 "" -+ -+# playing with IPs, ranges and prefixes -+check ips 10.0.0.1 10.0.0.1 -+check ips 10.0.0.2 "" -+check ips 10.0.1.0/24 10.0.1.0/24 -+check ips 10.0.1.2/31 10.0.1.0/24 -+check ips 10.0.1.0 10.0.1.0/24 -+check ips 10.0.1.3 10.0.1.0/24 -+check ips 10.0.1.255 10.0.1.0/24 -+check ips 10.0.2.3-10.0.2.12 10.0.2.3-10.0.2.12 -+check ips 10.0.2.10 10.0.2.3-10.0.2.12 -+check ips 10.0.2.12 10.0.2.3-10.0.2.12 -+ -+# test concatenated ranges, i.e. Pi, Pa and Po -+check cs "10.0.0.1 . 22" "10.0.0.1 . 22" -+check cs "10.0.0.1 . 23" "" -+check cs "10.0.0.2 . 22" "" -+check cs "10.1.0.1 . 42" "10.1.0.0/16 . 1-1024" -+check cs "10.1.1.0/24 . 10-20" "10.1.0.0/16 . 1-1024" -+check cs "10.2.0.3 . 20000" "10.2.0.1-10.2.0.8 . 1024-65535" - - exit $RC --- -2.31.1 - diff --git a/SOURCES/0030-include-Resync-nf_tables.h-cache-copy.patch b/SOURCES/0030-include-Resync-nf_tables.h-cache-copy.patch deleted file mode 100644 index 12fcf75..0000000 --- a/SOURCES/0030-include-Resync-nf_tables.h-cache-copy.patch +++ /dev/null @@ -1,45 +0,0 @@ -From 77a93baa622f8aa33fa6182d72b380d980e39574 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Sat, 8 Aug 2020 00:09:06 +0200 -Subject: [PATCH] include: Resync nf_tables.h cache copy - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1820684 -Upstream Status: nftables commit f1e5a0499c077 - -commit f1e5a0499c0773f18bc592dd0da0340120daa482 -Author: Stefano Brivio -Date: Mon Apr 13 21:48:02 2020 +0200 - - include: Resync nf_tables.h cache copy - - Get this header in sync with nf.git as of commit ef516e8625dd. - - Signed-off-by: Stefano Brivio - Signed-off-by: Pablo Neira Ayuso ---- - include/linux/netfilter/nf_tables.h | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/include/linux/netfilter/nf_tables.h b/include/linux/netfilter/nf_tables.h -index 1a99df3..9b54a86 100644 ---- a/include/linux/netfilter/nf_tables.h -+++ b/include/linux/netfilter/nf_tables.h -@@ -274,6 +274,7 @@ enum nft_rule_compat_attributes { - * @NFT_SET_TIMEOUT: set uses timeouts - * @NFT_SET_EVAL: set can be updated from the evaluation path - * @NFT_SET_OBJECT: set contains stateful objects -+ * @NFT_SET_CONCAT: set contains a concatenation - */ - enum nft_set_flags { - NFT_SET_ANONYMOUS = 0x1, -@@ -283,6 +284,7 @@ enum nft_set_flags { - NFT_SET_TIMEOUT = 0x10, - NFT_SET_EVAL = 0x20, - NFT_SET_OBJECT = 0x40, -+ NFT_SET_CONCAT = 0x80, - }; - - /** --- -2.31.1 - diff --git a/SOURCES/0030-optimize-Clarify-chain_optimize-array-allocations.patch b/SOURCES/0030-optimize-Clarify-chain_optimize-array-allocations.patch new file mode 100644 index 0000000..362d0f5 --- /dev/null +++ b/SOURCES/0030-optimize-Clarify-chain_optimize-array-allocations.patch @@ -0,0 +1,51 @@ +From 2438c7dafba336236e2e5dc1a6c57b6e157327cf Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Tue, 21 Feb 2023 19:50:41 +0100 +Subject: [PATCH] optimize: Clarify chain_optimize() array allocations + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit b83a0416cdc88 + +commit b83a0416cdc881c6ac35739cd858e4fe5fb2e04f +Author: Phil Sutter +Date: Tue Jan 10 22:13:44 2023 +0100 + + optimize: Clarify chain_optimize() array allocations + + Arguments passed to sizeof() where deemed suspicious by covscan due to + the different type. Consistently specify size of an array 'a' using + 'sizeof(*a) * nmemb'. + + For the statement arrays in stmt_matrix, even use xzalloc_array() since + the item count is fixed and therefore can't be zero. + + Fixes: fb298877ece27 ("src: add ruleset optimization infrastructure") + Signed-off-by: Phil Sutter + +Signed-off-by: Phil Sutter +--- + src/optimize.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +diff --git a/src/optimize.c b/src/optimize.c +index 6514cbb..baa6abc 100644 +--- a/src/optimize.c ++++ b/src/optimize.c +@@ -918,10 +918,11 @@ static int chain_optimize(struct nft_ctx *nft, struct list_head *rules) + ctx->num_rules++; + } + +- ctx->rule = xzalloc(sizeof(ctx->rule) * ctx->num_rules); +- ctx->stmt_matrix = xzalloc(sizeof(struct stmt *) * ctx->num_rules); ++ ctx->rule = xzalloc(sizeof(*ctx->rule) * ctx->num_rules); ++ ctx->stmt_matrix = xzalloc(sizeof(*ctx->stmt_matrix) * ctx->num_rules); + for (i = 0; i < ctx->num_rules; i++) +- ctx->stmt_matrix[i] = xzalloc(sizeof(struct stmt *) * MAX_STMTS); ++ ctx->stmt_matrix[i] = xzalloc_array(MAX_STMTS, ++ sizeof(**ctx->stmt_matrix)); + + merge = xzalloc(sizeof(*merge) * ctx->num_rules); + +-- +2.41.0.rc1 + diff --git a/SOURCES/0031-netlink_delinearize-Sanitize-concat-data-element-dec.patch b/SOURCES/0031-netlink_delinearize-Sanitize-concat-data-element-dec.patch new file mode 100644 index 0000000..633f671 --- /dev/null +++ b/SOURCES/0031-netlink_delinearize-Sanitize-concat-data-element-dec.patch @@ -0,0 +1,42 @@ +From 21d7fa6f6a40d56c5c23eedd6ddb6a411fb8e62b Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Tue, 21 Feb 2023 19:50:41 +0100 +Subject: [PATCH] netlink_delinearize: Sanitize concat data element decoding + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit 1344d9e53ba4d + +commit 1344d9e53ba4d67cedd13a2c76a970fc7ce65683 +Author: Phil Sutter +Date: Tue Feb 21 18:36:01 2023 +0100 + + netlink_delinearize: Sanitize concat data element decoding + + The call to netlink_get_register() might return NULL, catch this before + dereferencing the pointer. + + Fixes: db59a5c1204c9 ("netlink_delinearize: fix decoding of concat data element") + Signed-off-by: Phil Sutter + Acked-by: Florian Westphal + +Signed-off-by: Phil Sutter +--- + src/netlink_delinearize.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/netlink_delinearize.c b/src/netlink_delinearize.c +index e9e0845..cadb8ec 100644 +--- a/src/netlink_delinearize.c ++++ b/src/netlink_delinearize.c +@@ -1660,7 +1660,7 @@ static void netlink_parse_dynset(struct netlink_parse_ctx *ctx, + sreg_data = netlink_parse_register(nle, NFTNL_EXPR_DYNSET_SREG_DATA); + expr_data = netlink_get_register(ctx, loc, sreg_data); + +- if (expr_data->len < set->data->len) { ++ if (expr_data && expr_data->len < set->data->len) { + expr_free(expr_data); + expr_data = netlink_parse_concat_expr(ctx, loc, sreg_data, set->data->len); + if (expr_data == NULL) +-- +2.41.0.rc1 + diff --git a/SOURCES/0031-src-Set-NFT_SET_CONCAT-flag-for-sets-with-concatenat.patch b/SOURCES/0031-src-Set-NFT_SET_CONCAT-flag-for-sets-with-concatenat.patch deleted file mode 100644 index d8149bf..0000000 --- a/SOURCES/0031-src-Set-NFT_SET_CONCAT-flag-for-sets-with-concatenat.patch +++ /dev/null @@ -1,72 +0,0 @@ -From 5566405cc171c8fa84e0a13ea96b89245a3fb512 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Sat, 8 Aug 2020 00:05:48 +0200 -Subject: [PATCH] src: Set NFT_SET_CONCAT flag for sets with concatenated - ranges - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1820684 -Upstream Status: nftables commit 09441b5e92cee - -commit 09441b5e92ceea60198a35cd657904fa7a10ee54 -Author: Stefano Brivio -Date: Mon Apr 13 21:48:03 2020 +0200 - - src: Set NFT_SET_CONCAT flag for sets with concatenated ranges - - Pablo reports that nft, after commit 8ac2f3b2fca3 ("src: Add support - for concatenated set ranges"), crashes with older kernels (< 5.6) - without support for concatenated set ranges: those sets will be sent - to the kernel, which adds them without notion of the fact that - different concatenated fields are actually included, and nft crashes - while trying to list this kind of malformed concatenation. - - Use the NFT_SET_CONCAT flag introduced by kernel commit ef516e8625dd - ("netfilter: nf_tables: reintroduce the NFT_SET_CONCAT flag") when - sets including concatenated ranges are sent to the kernel, so that - older kernels (with no knowledge of this flag itself) will refuse set - creation. - - Note that, in expr_evaluate_set(), we have to check for the presence - of the flag, also on empty sets that might carry it in context data, - and actually set it in the actual set flags. - - Reported-by: Pablo Neira Ayuso - Signed-off-by: Stefano Brivio - Signed-off-by: Pablo Neira Ayuso ---- - src/evaluate.c | 9 ++++++++- - 1 file changed, 8 insertions(+), 1 deletion(-) - -diff --git a/src/evaluate.c b/src/evaluate.c -index 0c84816..f66251b 100644 ---- a/src/evaluate.c -+++ b/src/evaluate.c -@@ -1360,10 +1360,16 @@ static int expr_evaluate_set(struct eval_ctx *ctx, struct expr **expr) - set->size += i->size - 1; - set->set_flags |= i->set_flags; - expr_free(i); -- } else if (!expr_is_singleton(i)) -+ } else if (!expr_is_singleton(i)) { - set->set_flags |= NFT_SET_INTERVAL; -+ if (i->key->etype == EXPR_CONCAT) -+ set->set_flags |= NFT_SET_CONCAT; -+ } - } - -+ if (ctx->set && (ctx->set->flags & NFT_SET_CONCAT)) -+ set->set_flags |= NFT_SET_CONCAT; -+ - set->set_flags |= NFT_SET_CONSTANT; - - datatype_set(set, ctx->ectx.dtype); -@@ -3336,6 +3342,7 @@ static int set_evaluate(struct eval_ctx *ctx, struct set *set) - memcpy(&set->desc.field_len, &set->key->field_len, - sizeof(set->desc.field_len)); - set->desc.field_count = set->key->field_count; -+ set->flags |= NFT_SET_CONCAT; - } - - if (set_is_datamap(set->flags)) { --- -2.31.1 - diff --git a/SOURCES/0032-src-store-expr-not-dtype-to-track-data-in-sets.patch b/SOURCES/0032-src-store-expr-not-dtype-to-track-data-in-sets.patch deleted file mode 100644 index 4fa4cf1..0000000 --- a/SOURCES/0032-src-store-expr-not-dtype-to-track-data-in-sets.patch +++ /dev/null @@ -1,503 +0,0 @@ -From 19da892698f1dce2125a796ad86239711896978f Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 7 Dec 2020 18:25:20 +0100 -Subject: [PATCH] src: store expr, not dtype to track data in sets - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1877022 -Upstream Status: nftables commit 343a51702656a - -commit 343a51702656a6476e37cfb84609a82155c7fc5e -Author: Florian Westphal -Date: Tue Jul 16 19:03:55 2019 +0200 - - src: store expr, not dtype to track data in sets - - This will be needed once we add support for the 'typeof' keyword to - handle maps that could e.g. store 'ct helper' "type" values. - - Instead of: - - set foo { - type ipv4_addr . mark; - - this would allow - - set foo { - typeof(ip saddr) . typeof(ct mark); - - (exact syntax TBD). - - This would be needed to allow sets that store variable-sized data types - (string, integer and the like) that can't be used at at the moment. - - Adding special data types for everything is problematic due to the - large amount of different types needed. - - For anonymous sets, e.g. "string" can be used because the needed size can - be inferred from the statement, e.g. 'osf name { "Windows", "Linux }', - but in case of named sets that won't work because 'type string' lacks the - context needed to derive the size information. - - With 'typeof(osf name)' the context is there, but at the moment it won't - help because the expression is discarded instantly and only the data - type is retained. - - Signed-off-by: Florian Westphal ---- - include/datatype.h | 1 - - include/netlink.h | 1 - - include/rule.h | 6 ++--- - src/datatype.c | 5 ---- - src/evaluate.c | 58 ++++++++++++++++++++++++++++++++-------------- - src/expression.c | 2 +- - src/json.c | 4 ++-- - src/mnl.c | 6 ++--- - src/monitor.c | 2 +- - src/netlink.c | 32 ++++++++++++------------- - src/parser_bison.y | 3 +-- - src/parser_json.c | 8 +++++-- - src/rule.c | 8 +++---- - src/segtree.c | 8 +++++-- - 14 files changed, 81 insertions(+), 63 deletions(-) - -diff --git a/include/datatype.h b/include/datatype.h -index 49b8f60..04b4892 100644 ---- a/include/datatype.h -+++ b/include/datatype.h -@@ -293,7 +293,6 @@ concat_subtype_lookup(uint32_t type, unsigned int n) - - extern const struct datatype * - set_datatype_alloc(const struct datatype *orig_dtype, unsigned int byteorder); --extern void set_datatype_destroy(const struct datatype *dtype); - - extern void time_print(uint64_t msec, struct output_ctx *octx); - extern struct error_record *time_parse(const struct location *loc, -diff --git a/include/netlink.h b/include/netlink.h -index e694171..88d12ba 100644 ---- a/include/netlink.h -+++ b/include/netlink.h -@@ -189,6 +189,5 @@ int netlink_events_trace_cb(const struct nlmsghdr *nlh, int type, - struct netlink_mon_handler *monh); - - enum nft_data_types dtype_map_to_kernel(const struct datatype *dtype); --const struct datatype *dtype_map_from_kernel(enum nft_data_types type); - - #endif /* NFTABLES_NETLINK_H */ -diff --git a/include/rule.h b/include/rule.h -index 626973e..3637462 100644 ---- a/include/rule.h -+++ b/include/rule.h -@@ -283,8 +283,7 @@ extern struct rule *rule_lookup_by_index(const struct chain *chain, - * @gc_int: garbage collection interval - * @timeout: default timeout value - * @key: key expression (data type, length)) -- * @datatype: mapping data type -- * @datalen: mapping data len -+ * @data: mapping data expression - * @objtype: mapping object type - * @init: initializer - * @rg_cache: cached range element (left) -@@ -303,8 +302,7 @@ struct set { - uint32_t gc_int; - uint64_t timeout; - struct expr *key; -- const struct datatype *datatype; -- unsigned int datalen; -+ struct expr *data; - uint32_t objtype; - struct expr *init; - struct expr *rg_cache; -diff --git a/src/datatype.c b/src/datatype.c -index b9e167e..189e1b4 100644 ---- a/src/datatype.c -+++ b/src/datatype.c -@@ -1190,11 +1190,6 @@ const struct datatype *set_datatype_alloc(const struct datatype *orig_dtype, - return dtype; - } - --void set_datatype_destroy(const struct datatype *dtype) --{ -- datatype_free(dtype); --} -- - static struct error_record *time_unit_parse(const struct location *loc, - const char *str, uint64_t *unit) - { -diff --git a/src/evaluate.c b/src/evaluate.c -index f66251b..578dcae 100644 ---- a/src/evaluate.c -+++ b/src/evaluate.c -@@ -1383,6 +1383,7 @@ static int expr_evaluate_map(struct eval_ctx *ctx, struct expr **expr) - { - struct expr_ctx ectx = ctx->ectx; - struct expr *map = *expr, *mappings; -+ const struct datatype *dtype; - struct expr *key; - - expr_set_context(&ctx->ectx, NULL, 0); -@@ -1405,10 +1406,14 @@ static int expr_evaluate_map(struct eval_ctx *ctx, struct expr **expr) - mappings = implicit_set_declaration(ctx, "__map%d", - key, - mappings); -- mappings->set->datatype = -- datatype_get(set_datatype_alloc(ectx.dtype, -- ectx.byteorder)); -- mappings->set->datalen = ectx.len; -+ -+ dtype = set_datatype_alloc(ectx.dtype, ectx.byteorder); -+ -+ mappings->set->data = constant_expr_alloc(&netlink_location, -+ dtype, dtype->byteorder, -+ ectx.len, NULL); -+ if (ectx.len && mappings->set->data->len != ectx.len) -+ BUG("%d vs %d\n", mappings->set->data->len, ectx.len); - - map->mappings = mappings; - -@@ -1444,7 +1449,7 @@ static int expr_evaluate_map(struct eval_ctx *ctx, struct expr **expr) - map->mappings->set->key->dtype->desc, - map->map->dtype->desc); - -- datatype_set(map, map->mappings->set->datatype); -+ datatype_set(map, map->mappings->set->data->dtype); - map->flags |= EXPR_F_CONSTANT; - - /* Data for range lookups needs to be in big endian order */ -@@ -1474,7 +1479,12 @@ static int expr_evaluate_mapping(struct eval_ctx *ctx, struct expr **expr) - "Key must be a constant"); - mapping->flags |= mapping->left->flags & EXPR_F_SINGLETON; - -- expr_set_context(&ctx->ectx, set->datatype, set->datalen); -+ if (set->data) { -+ expr_set_context(&ctx->ectx, set->data->dtype, set->data->len); -+ } else { -+ assert((set->flags & NFT_SET_MAP) == 0); -+ } -+ - if (expr_evaluate(ctx, &mapping->right) < 0) - return -1; - if (!expr_is_constant(mapping->right)) -@@ -2119,7 +2129,7 @@ static int stmt_evaluate_arg(struct eval_ctx *ctx, struct stmt *stmt, - (*expr)->len); - else if ((*expr)->dtype->type != TYPE_INTEGER && - !datatype_equal((*expr)->dtype, dtype)) -- return stmt_binary_error(ctx, *expr, stmt, -+ return stmt_binary_error(ctx, *expr, stmt, /* verdict vs invalid? */ - "datatype mismatch: expected %s, " - "expression has type %s", - dtype->desc, (*expr)->dtype->desc); -@@ -3113,9 +3123,9 @@ static int stmt_evaluate_map(struct eval_ctx *ctx, struct stmt *stmt) - "Key expression comments are not supported"); - - if (stmt_evaluate_arg(ctx, stmt, -- stmt->map.set->set->datatype, -- stmt->map.set->set->datalen, -- stmt->map.set->set->datatype->byteorder, -+ stmt->map.set->set->data->dtype, -+ stmt->map.set->set->data->len, -+ stmt->map.set->set->data->byteorder, - &stmt->map.data->key) < 0) - return -1; - if (expr_is_constant(stmt->map.data)) -@@ -3161,8 +3171,12 @@ static int stmt_evaluate_objref_map(struct eval_ctx *ctx, struct stmt *stmt) - - mappings = implicit_set_declaration(ctx, "__objmap%d", - key, mappings); -- mappings->set->datatype = &string_type; -- mappings->set->datalen = NFT_OBJ_MAXNAMELEN * BITS_PER_BYTE; -+ -+ mappings->set->data = constant_expr_alloc(&netlink_location, -+ &string_type, -+ BYTEORDER_HOST_ENDIAN, -+ NFT_OBJ_MAXNAMELEN * BITS_PER_BYTE, -+ NULL); - mappings->set->objtype = stmt->objref.type; - - map->mappings = mappings; -@@ -3197,7 +3211,7 @@ static int stmt_evaluate_objref_map(struct eval_ctx *ctx, struct stmt *stmt) - map->mappings->set->key->dtype->desc, - map->map->dtype->desc); - -- datatype_set(map, map->mappings->set->datatype); -+ datatype_set(map, map->mappings->set->data->dtype); - map->flags |= EXPR_F_CONSTANT; - - /* Data for range lookups needs to be in big endian order */ -@@ -3346,17 +3360,25 @@ static int set_evaluate(struct eval_ctx *ctx, struct set *set) - } - - if (set_is_datamap(set->flags)) { -- if (set->datatype == NULL) -+ if (set->data == NULL) - return set_error(ctx, set, "map definition does not " - "specify mapping data type"); - -- set->datalen = set->datatype->size; -- if (set->datalen == 0 && set->datatype->type != TYPE_VERDICT) -+ if (set->data->len == 0 && set->data->dtype->type != TYPE_VERDICT) - return set_error(ctx, set, "unqualified mapping data " - "type specified in map definition"); - } else if (set_is_objmap(set->flags)) { -- set->datatype = &string_type; -- set->datalen = NFT_OBJ_MAXNAMELEN * BITS_PER_BYTE; -+ if (set->data) { -+ assert(set->data->etype == EXPR_VALUE); -+ assert(set->data->dtype == &string_type); -+ } -+ -+ assert(set->data == NULL); -+ set->data = constant_expr_alloc(&netlink_location, &string_type, -+ BYTEORDER_HOST_ENDIAN, -+ NFT_OBJ_MAXNAMELEN * BITS_PER_BYTE, -+ NULL); -+ - } - - ctx->set = set; -diff --git a/src/expression.c b/src/expression.c -index 5070b10..6fa2f1d 100644 ---- a/src/expression.c -+++ b/src/expression.c -@@ -1010,7 +1010,7 @@ static void map_expr_print(const struct expr *expr, struct output_ctx *octx) - { - expr_print(expr->map, octx); - if (expr->mappings->etype == EXPR_SET_REF && -- expr->mappings->set->datatype->type == TYPE_VERDICT) -+ expr->mappings->set->data->dtype->type == TYPE_VERDICT) - nft_print(octx, " vmap "); - else - nft_print(octx, " map "); -diff --git a/src/json.c b/src/json.c -index 3498e24..1906e7d 100644 ---- a/src/json.c -+++ b/src/json.c -@@ -82,7 +82,7 @@ static json_t *set_print_json(struct output_ctx *octx, const struct set *set) - - if (set_is_datamap(set->flags)) { - type = "map"; -- datatype_ext = set->datatype->name; -+ datatype_ext = set->data->dtype->name; - } else if (set_is_objmap(set->flags)) { - type = "map"; - datatype_ext = obj_type_name(set->objtype); -@@ -645,7 +645,7 @@ json_t *map_expr_json(const struct expr *expr, struct output_ctx *octx) - const char *type = "map"; - - if (expr->mappings->etype == EXPR_SET_REF && -- expr->mappings->set->datatype->type == TYPE_VERDICT) -+ expr->mappings->set->data->dtype->type == TYPE_VERDICT) - type = "vmap"; - - return json_pack("{s:{s:o, s:o}}", type, -diff --git a/src/mnl.c b/src/mnl.c -index 221ee05..23341e6 100644 ---- a/src/mnl.c -+++ b/src/mnl.c -@@ -839,9 +839,9 @@ int mnl_nft_set_add(struct netlink_ctx *ctx, const struct cmd *cmd, - div_round_up(set->key->len, BITS_PER_BYTE)); - if (set_is_datamap(set->flags)) { - nftnl_set_set_u32(nls, NFTNL_SET_DATA_TYPE, -- dtype_map_to_kernel(set->datatype)); -+ dtype_map_to_kernel(set->data->dtype)); - nftnl_set_set_u32(nls, NFTNL_SET_DATA_LEN, -- set->datalen / BITS_PER_BYTE); -+ set->data->len / BITS_PER_BYTE); - } - if (set_is_objmap(set->flags)) - nftnl_set_set_u32(nls, NFTNL_SET_OBJ_TYPE, set->objtype); -@@ -873,7 +873,7 @@ int mnl_nft_set_add(struct netlink_ctx *ctx, const struct cmd *cmd, - - if (set_is_datamap(set->flags) && - !nftnl_udata_put_u32(udbuf, NFTNL_UDATA_SET_DATABYTEORDER, -- set->datatype->byteorder)) -+ set->data->byteorder)) - memory_allocation_error(); - - if (set->automerge && -diff --git a/src/monitor.c b/src/monitor.c -index fb803cf..7927b6f 100644 ---- a/src/monitor.c -+++ b/src/monitor.c -@@ -401,7 +401,7 @@ static int netlink_events_setelem_cb(const struct nlmsghdr *nlh, int type, - */ - dummyset = set_alloc(monh->loc); - dummyset->key = expr_clone(set->key); -- dummyset->datatype = set->datatype; -+ dummyset->data = set->data; - dummyset->flags = set->flags; - dummyset->init = set_expr_alloc(monh->loc, set); - -diff --git a/src/netlink.c b/src/netlink.c -index e0ba903..64e51e5 100644 ---- a/src/netlink.c -+++ b/src/netlink.c -@@ -575,7 +575,7 @@ enum nft_data_types dtype_map_to_kernel(const struct datatype *dtype) - } - } - --const struct datatype *dtype_map_from_kernel(enum nft_data_types type) -+static const struct datatype *dtype_map_from_kernel(enum nft_data_types type) - { - switch (type) { - case NFT_DATA_VERDICT: -@@ -622,10 +622,10 @@ struct set *netlink_delinearize_set(struct netlink_ctx *ctx, - const struct nftnl_set *nls) - { - const struct nftnl_udata *ud[NFTNL_UDATA_SET_MAX + 1] = {}; -- uint32_t flags, key, data, data_len, objtype = 0; - enum byteorder keybyteorder = BYTEORDER_INVALID; - enum byteorder databyteorder = BYTEORDER_INVALID; -- const struct datatype *keytype, *datatype; -+ const struct datatype *keytype, *datatype = NULL; -+ uint32_t flags, key, objtype = 0; - bool automerge = false; - const char *udata; - struct set *set; -@@ -659,6 +659,8 @@ struct set *netlink_delinearize_set(struct netlink_ctx *ctx, - - flags = nftnl_set_get_u32(nls, NFTNL_SET_FLAGS); - if (set_is_datamap(flags)) { -+ uint32_t data; -+ - data = nftnl_set_get_u32(nls, NFTNL_SET_DATA_TYPE); - datatype = dtype_map_from_kernel(data); - if (datatype == NULL) { -@@ -667,8 +669,7 @@ struct set *netlink_delinearize_set(struct netlink_ctx *ctx, - data); - return NULL; - } -- } else -- datatype = NULL; -+ } - - if (set_is_objmap(flags)) { - objtype = nftnl_set_get_u32(nls, NFTNL_SET_OBJ_TYPE); -@@ -691,16 +692,13 @@ struct set *netlink_delinearize_set(struct netlink_ctx *ctx, - - set->objtype = objtype; - -+ set->data = NULL; - if (datatype) -- set->datatype = datatype_get(set_datatype_alloc(datatype, -- databyteorder)); -- else -- set->datatype = NULL; -- -- if (nftnl_set_is_set(nls, NFTNL_SET_DATA_LEN)) { -- data_len = nftnl_set_get_u32(nls, NFTNL_SET_DATA_LEN); -- set->datalen = data_len * BITS_PER_BYTE; -- } -+ set->data = constant_expr_alloc(&netlink_location, -+ set_datatype_alloc(datatype, databyteorder), -+ databyteorder, -+ nftnl_set_get_u32(nls, NFTNL_SET_DATA_LEN) * BITS_PER_BYTE, -+ NULL); - - if (nftnl_set_is_set(nls, NFTNL_SET_TIMEOUT)) - set->timeout = nftnl_set_get_u64(nls, NFTNL_SET_TIMEOUT); -@@ -897,10 +895,10 @@ key_end: - goto out; - - data = netlink_alloc_data(&netlink_location, &nld, -- set->datatype->type == TYPE_VERDICT ? -+ set->data->dtype->type == TYPE_VERDICT ? - NFT_REG_VERDICT : NFT_REG_1); -- datatype_set(data, set->datatype); -- data->byteorder = set->datatype->byteorder; -+ datatype_set(data, set->data->dtype); -+ data->byteorder = set->data->byteorder; - if (data->byteorder == BYTEORDER_HOST_ENDIAN) - mpz_switch_byteorder(data->value, data->len / BITS_PER_BYTE); - -diff --git a/src/parser_bison.y b/src/parser_bison.y -index ea83f52..4cca31b 100644 ---- a/src/parser_bison.y -+++ b/src/parser_bison.y -@@ -1749,9 +1749,8 @@ map_block : /* empty */ { $$ = $-1; } - stmt_separator - { - $1->key = $3; -- $1->datatype = $5->dtype; -+ $1->data = $5; - -- expr_free($5); - $1->flags |= NFT_SET_MAP; - $$ = $1; - } -diff --git a/src/parser_json.c b/src/parser_json.c -index ce8e566..ddc694f 100644 ---- a/src/parser_json.c -+++ b/src/parser_json.c -@@ -2833,11 +2833,15 @@ static struct cmd *json_parse_cmd_add_set(struct json_ctx *ctx, json_t *root, - } - - if (!json_unpack(root, "{s:s}", "map", &dtype_ext)) { -+ const struct datatype *dtype; -+ - set->objtype = string_to_nft_object(dtype_ext); - if (set->objtype) { - set->flags |= NFT_SET_OBJECT; -- } else if (datatype_lookup_byname(dtype_ext)) { -- set->datatype = datatype_lookup_byname(dtype_ext); -+ } else if ((dtype = datatype_lookup_byname(dtype_ext))) { -+ set->data = constant_expr_alloc(&netlink_location, -+ dtype, dtype->byteorder, -+ dtype->size, NULL); - set->flags |= NFT_SET_MAP; - } else { - json_error(ctx, "Invalid map type '%s'.", dtype_ext); -diff --git a/src/rule.c b/src/rule.c -index e18237b..f7d888b 100644 ---- a/src/rule.c -+++ b/src/rule.c -@@ -332,8 +332,8 @@ struct set *set_clone(const struct set *set) - new_set->gc_int = set->gc_int; - new_set->timeout = set->timeout; - new_set->key = expr_clone(set->key); -- new_set->datatype = datatype_get(set->datatype); -- new_set->datalen = set->datalen; -+ if (set->data) -+ new_set->data = expr_clone(set->data); - new_set->objtype = set->objtype; - new_set->policy = set->policy; - new_set->automerge = set->automerge; -@@ -356,7 +356,7 @@ void set_free(struct set *set) - expr_free(set->init); - handle_free(&set->handle); - expr_free(set->key); -- set_datatype_destroy(set->datatype); -+ expr_free(set->data); - xfree(set); - } - -@@ -469,7 +469,7 @@ static void set_print_declaration(const struct set *set, - nft_print(octx, "%s%stype %s", - opts->tab, opts->tab, set->key->dtype->name); - if (set_is_datamap(set->flags)) -- nft_print(octx, " : %s", set->datatype->name); -+ nft_print(octx, " : %s", set->data->dtype->name); - else if (set_is_objmap(set->flags)) - nft_print(octx, " : %s", obj_type_name(set->objtype)); - -diff --git a/src/segtree.c b/src/segtree.c -index 073c6ec..d6e3ce2 100644 ---- a/src/segtree.c -+++ b/src/segtree.c -@@ -79,8 +79,12 @@ static void seg_tree_init(struct seg_tree *tree, const struct set *set, - tree->root = RB_ROOT; - tree->keytype = set->key->dtype; - tree->keylen = set->key->len; -- tree->datatype = set->datatype; -- tree->datalen = set->datalen; -+ tree->datatype = NULL; -+ tree->datalen = 0; -+ if (set->data) { -+ tree->datatype = set->data->dtype; -+ tree->datalen = set->data->len; -+ } - tree->byteorder = first->byteorder; - tree->debug_mask = debug_mask; - } --- -2.31.1 - diff --git a/SOURCES/0032-tests-monitor-Summarize-failures-per-test-case.patch b/SOURCES/0032-tests-monitor-Summarize-failures-per-test-case.patch new file mode 100644 index 0000000..e899032 --- /dev/null +++ b/SOURCES/0032-tests-monitor-Summarize-failures-per-test-case.patch @@ -0,0 +1,54 @@ +From a2446688362b6b81bd0fa0dc22cb5cc2fa3378c1 Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 20 Jul 2023 15:55:05 +0200 +Subject: [PATCH] tests: monitor: Summarize failures per test case + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2211076 +Upstream Status: nftables commit c2b28dcebd058 + +commit c2b28dcebd058b978692b8e1899e79b96c025396 +Author: Phil Sutter +Date: Thu Jul 20 12:08:45 2023 +0200 + + tests: monitor: Summarize failures per test case + + Explicitly print when tests from a file fail in addition to the diff + + "output differs" message. + + Signed-off-by: Phil Sutter + +Signed-off-by: Phil Sutter +--- + tests/monitor/run-tests.sh | 9 ++++++++- + 1 file changed, 8 insertions(+), 1 deletion(-) + +diff --git a/tests/monitor/run-tests.sh b/tests/monitor/run-tests.sh +index b5ca47d..f1ac790 100755 +--- a/tests/monitor/run-tests.sh ++++ b/tests/monitor/run-tests.sh +@@ -161,7 +161,10 @@ for variant in $variants; do + output_append=${variant}_output_append + + for testcase in ${testcases:-testcases/*.t}; do +- echo "$variant: running tests from file $(basename $testcase)" ++ filename=$(basename $testcase) ++ echo "$variant: running tests from file $filename" ++ rc_start=$rc ++ + # files are like this: + # + # I add table ip t +@@ -199,6 +202,10 @@ for variant in $variants; do + $run_test + let "rc += $?" + } ++ ++ let "rc_diff = rc - rc_start" ++ [[ $rc_diff -ne 0 ]] && \ ++ echo "$variant: $rc_diff tests from file $filename failed" + done + done + exit $rc +-- +2.41.0 + diff --git a/SOURCES/0033-evaluate-Perform-set-evaluation-on-implicitly-declar.patch b/SOURCES/0033-evaluate-Perform-set-evaluation-on-implicitly-declar.patch deleted file mode 100644 index 1d5b5fc..0000000 --- a/SOURCES/0033-evaluate-Perform-set-evaluation-on-implicitly-declar.patch +++ /dev/null @@ -1,120 +0,0 @@ -From 785823a1f607a7bcd32d4cb42655422c223fcad5 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 7 Dec 2020 18:25:20 +0100 -Subject: [PATCH] evaluate: Perform set evaluation on implicitly declared - (anonymous) sets - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1877022 -Upstream Status: nftables commit 7aa08d45031ec - -commit 7aa08d45031ec7ce5dadb4979471d626367c09cd -Author: Stefano Brivio -Date: Wed May 27 22:51:21 2020 +0200 - - evaluate: Perform set evaluation on implicitly declared (anonymous) sets - - If a set is implicitly declared, set_evaluate() is not called as a - result of cmd_evaluate_add(), because we're adding in fact something - else (e.g. a rule). Expression-wise, evaluation still happens as the - implicit set expression is eventually found in the tree and handled - by expr_evaluate_set(), but context-wise evaluation (set_evaluate()) - is skipped, and this might be relevant instead. - - This is visible in the reported case of an anonymous set including - concatenated ranges: - - # nft add rule t c ip saddr . tcp dport { 192.0.2.1 . 20-30 } accept - BUG: invalid range expression type concat - nft: expression.c:1160: range_expr_value_low: Assertion `0' failed. - Aborted - - because we reach do_add_set() without properly evaluated flags and - set description, and eventually end up in expr_to_intervals(), which - can't handle that expression. - - Explicitly call set_evaluate() as we add anonymous sets into the - context, and instruct the same function to: - - skip expression-wise set evaluation if the set is anonymous, as - that happens later anyway as part of the general tree evaluation - - skip the insertion in the set cache, as it makes no sense to have - sets that shouldn't be referenced there - - For object maps, the allocation of the expression for set->data is - already handled by set_evaluate(), so we can now drop that from - stmt_evaluate_objref_map(). - - v2: - - skip insertion of set in cache (Pablo Neira Ayuso) - - drop double allocation of expression (and leak of the first - one) for object maps (Pablo Neira Ayuso) - - Reported-by: Pablo Neira Ayuso - Reported-by: Phil Sutter - Signed-off-by: Stefano Brivio - Signed-off-by: Pablo Neira Ayuso ---- - src/evaluate.c | 20 ++++++++++---------- - 1 file changed, 10 insertions(+), 10 deletions(-) - -diff --git a/src/evaluate.c b/src/evaluate.c -index 578dcae..fc45cef 100644 ---- a/src/evaluate.c -+++ b/src/evaluate.c -@@ -75,6 +75,7 @@ static void key_fix_dtype_byteorder(struct expr *key) - datatype_set(key, set_datatype_alloc(dtype, key->byteorder)); - } - -+static int set_evaluate(struct eval_ctx *ctx, struct set *set); - static struct expr *implicit_set_declaration(struct eval_ctx *ctx, - const char *name, - struct expr *key, -@@ -105,6 +106,8 @@ static struct expr *implicit_set_declaration(struct eval_ctx *ctx, - list_add_tail(&cmd->list, &ctx->cmd->list); - } - -+ set_evaluate(ctx, set); -+ - return set_ref_expr_alloc(&expr->location, set); - } - -@@ -3171,12 +3174,6 @@ static int stmt_evaluate_objref_map(struct eval_ctx *ctx, struct stmt *stmt) - - mappings = implicit_set_declaration(ctx, "__objmap%d", - key, mappings); -- -- mappings->set->data = constant_expr_alloc(&netlink_location, -- &string_type, -- BYTEORDER_HOST_ENDIAN, -- NFT_OBJ_MAXNAMELEN * BITS_PER_BYTE, -- NULL); - mappings->set->objtype = stmt->objref.type; - - map->mappings = mappings; -@@ -3381,6 +3378,13 @@ static int set_evaluate(struct eval_ctx *ctx, struct set *set) - - } - -+ /* Default timeout value implies timeout support */ -+ if (set->timeout) -+ set->flags |= NFT_SET_TIMEOUT; -+ -+ if (set_is_anonymous(set->flags)) -+ return 0; -+ - ctx->set = set; - if (set->init != NULL) { - expr_set_context(&ctx->ectx, set->key->dtype, set->key->len); -@@ -3392,10 +3396,6 @@ static int set_evaluate(struct eval_ctx *ctx, struct set *set) - if (set_lookup(table, set->handle.set.name) == NULL) - set_add_hash(set_get(set), table); - -- /* Default timeout value implies timeout support */ -- if (set->timeout) -- set->flags |= NFT_SET_TIMEOUT; -- - return 0; - } - --- -2.31.1 - diff --git a/SOURCES/0033-rule-check-address-family-in-set-collapse.patch b/SOURCES/0033-rule-check-address-family-in-set-collapse.patch new file mode 100644 index 0000000..ceb1d2c --- /dev/null +++ b/SOURCES/0033-rule-check-address-family-in-set-collapse.patch @@ -0,0 +1,114 @@ +From 955758b3ef4772bb92fc63a8f6d424f93ebb7a2f Mon Sep 17 00:00:00 2001 +From: Phil Sutter +Date: Thu, 21 Sep 2023 15:24:03 +0200 +Subject: [PATCH] rule: check address family in set collapse + +JIRA: https://issues.redhat.com/browse/RHEL-5160 +Upstream Status: nftables commit a817ea9655dee + +commit a817ea9655dee1915423a802c0133e3611e02b3a +Author: Derek Hageman +Date: Thu Sep 1 10:10:41 2022 -0600 + + rule: check address family in set collapse + + 498a5f0c219d added collapsing of set operations in different commands. + However, the logic is currently too relaxed. It is valid to have a + table and set with identical names on different address families. + For example: + + table ip a { + set x { + type inet_service; + } + } + table ip6 a { + set x { + type inet_service; + } + } + add element ip a x { 1 } + add element ip a x { 2 } + add element ip6 a x { 2 } + + The above currently results in nothing being added to the ip6 family + table due to being collapsed into the ip table add. Prior to + 498a5f0c219d the set add would work. The fix is simply to check the + family in addition to the table and set names before allowing a + collapse. + + [ Add testcase to tests/shell --pablo ] + + Fixes: 498a5f0c219d ("rule: collapse set element commands") + Signed-off-by: Derek Hageman + Signed-off-by: Pablo Neira Ayuso + +Signed-off-by: Phil Sutter +--- + src/rule.c | 3 ++- + tests/shell/testcases/sets/collapse_elem_0 | 19 +++++++++++++++++++ + .../testcases/sets/dumps/collapse_elem_0.nft | 12 ++++++++++++ + 3 files changed, 33 insertions(+), 1 deletion(-) + create mode 100755 tests/shell/testcases/sets/collapse_elem_0 + create mode 100644 tests/shell/testcases/sets/dumps/collapse_elem_0.nft + +diff --git a/src/rule.c b/src/rule.c +index 0526a14..3b60cca 100644 +--- a/src/rule.c ++++ b/src/rule.c +@@ -1409,7 +1409,8 @@ bool nft_cmd_collapse(struct list_head *cmds) + continue; + } + +- if (strcmp(elems->handle.table.name, cmd->handle.table.name) || ++ if (elems->handle.family != cmd->handle.family || ++ strcmp(elems->handle.table.name, cmd->handle.table.name) || + strcmp(elems->handle.set.name, cmd->handle.set.name)) { + elems = cmd; + continue; +diff --git a/tests/shell/testcases/sets/collapse_elem_0 b/tests/shell/testcases/sets/collapse_elem_0 +new file mode 100755 +index 0000000..7699e9d +--- /dev/null ++++ b/tests/shell/testcases/sets/collapse_elem_0 +@@ -0,0 +1,19 @@ ++#!/bin/bash ++ ++set -e ++ ++RULESET="table ip a { ++ set x { ++ type inet_service; ++ } ++} ++table ip6 a { ++ set x { ++ type inet_service; ++ } ++} ++add element ip a x { 1 } ++add element ip a x { 2 } ++add element ip6 a x { 2 }" ++ ++$NFT -f - <<< $RULESET +diff --git a/tests/shell/testcases/sets/dumps/collapse_elem_0.nft b/tests/shell/testcases/sets/dumps/collapse_elem_0.nft +new file mode 100644 +index 0000000..a3244fc +--- /dev/null ++++ b/tests/shell/testcases/sets/dumps/collapse_elem_0.nft +@@ -0,0 +1,12 @@ ++table ip a { ++ set x { ++ type inet_service ++ elements = { 1, 2 } ++ } ++} ++table ip6 a { ++ set x { ++ type inet_service ++ elements = { 2 } ++ } ++} +-- +2.41.0 + diff --git a/SOURCES/0034-evaluate-missing-datatype-definition-in-implicit_set.patch b/SOURCES/0034-evaluate-missing-datatype-definition-in-implicit_set.patch deleted file mode 100644 index 3b7244d..0000000 --- a/SOURCES/0034-evaluate-missing-datatype-definition-in-implicit_set.patch +++ /dev/null @@ -1,167 +0,0 @@ -From 3193f74613b16a42d7784452ebf4d53ccd33b887 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Tue, 12 Jan 2021 10:34:35 +0100 -Subject: [PATCH] evaluate: missing datatype definition in - implicit_set_declaration() - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1877022 -Upstream Status: nftables commit 54eb1e16cc478 - -commit 54eb1e16cc4787906fe8206858f0ea0bfb9c1209 -Author: Pablo Neira Ayuso -Date: Sun Jun 7 15:23:21 2020 +0200 - - evaluate: missing datatype definition in implicit_set_declaration() - - set->data from implicit_set_declaration(), otherwise, set_evaluation() - bails out with: - - # nft -f /etc/nftables/inet-filter.nft - /etc/nftables/inet-filter.nft:8:32-54: Error: map definition does not specify - mapping data type - tcp dport vmap { 22 : jump ssh_input } - ^^^^^^^^^^^^^^^^^^^^^^^ - /etc/nftables/inet-filter.nft:13:26-52: Error: map definition does not specify - mapping data type - iif vmap { "eth0" : jump wan_input } - ^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - Add a test to cover this case. - - Fixes: 7aa08d45031e ("evaluate: Perform set evaluation on implicitly declared (anonymous) sets") - Closes: https://bugzilla.kernel.org/show_bug.cgi?id=208093 - Reviewed-by: Stefano Brivio - Signed-off-by: Pablo Neira Ayuso ---- - src/evaluate.c | 22 +++++++++++---------- - tests/shell/testcases/maps/0009vmap_0 | 19 ++++++++++++++++++ - tests/shell/testcases/maps/dumps/0009vmap_0 | 13 ++++++++++++ - 3 files changed, 44 insertions(+), 10 deletions(-) - create mode 100755 tests/shell/testcases/maps/0009vmap_0 - create mode 100644 tests/shell/testcases/maps/dumps/0009vmap_0 - -diff --git a/src/evaluate.c b/src/evaluate.c -index fc45cef..a966ed4 100644 ---- a/src/evaluate.c -+++ b/src/evaluate.c -@@ -79,6 +79,7 @@ static int set_evaluate(struct eval_ctx *ctx, struct set *set); - static struct expr *implicit_set_declaration(struct eval_ctx *ctx, - const char *name, - struct expr *key, -+ struct expr *data, - struct expr *expr) - { - struct cmd *cmd; -@@ -92,6 +93,7 @@ static struct expr *implicit_set_declaration(struct eval_ctx *ctx, - set->flags = NFT_SET_ANONYMOUS | expr->set_flags; - set->handle.set.name = xstrdup(name); - set->key = key; -+ set->data = data; - set->init = expr; - set->automerge = set->flags & NFT_SET_INTERVAL; - -@@ -1387,7 +1389,7 @@ static int expr_evaluate_map(struct eval_ctx *ctx, struct expr **expr) - struct expr_ctx ectx = ctx->ectx; - struct expr *map = *expr, *mappings; - const struct datatype *dtype; -- struct expr *key; -+ struct expr *key, *data; - - expr_set_context(&ctx->ectx, NULL, 0); - if (expr_evaluate(ctx, &map->map) < 0) -@@ -1406,15 +1408,14 @@ static int expr_evaluate_map(struct eval_ctx *ctx, struct expr **expr) - ctx->ectx.byteorder, - ctx->ectx.len, NULL); - -+ dtype = set_datatype_alloc(ectx.dtype, ectx.byteorder); -+ data = constant_expr_alloc(&netlink_location, dtype, -+ dtype->byteorder, ectx.len, NULL); -+ - mappings = implicit_set_declaration(ctx, "__map%d", -- key, -+ key, data, - mappings); - -- dtype = set_datatype_alloc(ectx.dtype, ectx.byteorder); -- -- mappings->set->data = constant_expr_alloc(&netlink_location, -- dtype, dtype->byteorder, -- ectx.len, NULL); - if (ectx.len && mappings->set->data->len != ectx.len) - BUG("%d vs %d\n", mappings->set->data->len, ectx.len); - -@@ -1857,7 +1858,8 @@ static int expr_evaluate_relational(struct eval_ctx *ctx, struct expr **expr) - case EXPR_SET: - right = rel->right = - implicit_set_declaration(ctx, "__set%d", -- expr_get(left), right); -+ expr_get(left), NULL, -+ right); - /* fall through */ - case EXPR_SET_REF: - /* Data for range lookups needs to be in big endian order */ -@@ -2335,7 +2337,7 @@ static int stmt_evaluate_meter(struct eval_ctx *ctx, struct stmt *stmt) - set->set_flags |= NFT_SET_TIMEOUT; - - setref = implicit_set_declaration(ctx, stmt->meter.name, -- expr_get(key), set); -+ expr_get(key), NULL, set); - - setref->set->desc.size = stmt->meter.size; - stmt->meter.set = setref; -@@ -3173,7 +3175,7 @@ static int stmt_evaluate_objref_map(struct eval_ctx *ctx, struct stmt *stmt) - ctx->ectx.len, NULL); - - mappings = implicit_set_declaration(ctx, "__objmap%d", -- key, mappings); -+ key, NULL, mappings); - mappings->set->objtype = stmt->objref.type; - - map->mappings = mappings; -diff --git a/tests/shell/testcases/maps/0009vmap_0 b/tests/shell/testcases/maps/0009vmap_0 -new file mode 100755 -index 0000000..7627c81 ---- /dev/null -+++ b/tests/shell/testcases/maps/0009vmap_0 -@@ -0,0 +1,19 @@ -+#!/bin/bash -+ -+set -e -+ -+EXPECTED="table inet filter { -+ chain ssh_input { -+ } -+ -+ chain wan_input { -+ tcp dport vmap { 22 : jump ssh_input } -+ } -+ -+ chain prerouting { -+ type filter hook prerouting priority -300; policy accept; -+ iif vmap { "lo" : jump wan_input } -+ } -+}" -+ -+$NFT -f - <<< "$EXPECTED" -diff --git a/tests/shell/testcases/maps/dumps/0009vmap_0 b/tests/shell/testcases/maps/dumps/0009vmap_0 -new file mode 100644 -index 0000000..540a8af ---- /dev/null -+++ b/tests/shell/testcases/maps/dumps/0009vmap_0 -@@ -0,0 +1,13 @@ -+table inet filter { -+ chain ssh_input { -+ } -+ -+ chain wan_input { -+ tcp dport vmap { 22 : jump ssh_input } -+ } -+ -+ chain prerouting { -+ type filter hook prerouting priority -300; policy accept; -+ iif vmap { "lo" : jump wan_input } -+ } -+} --- -2.31.1 - diff --git a/SOURCES/0035-mergesort-unbreak-listing-with-binops.patch b/SOURCES/0035-mergesort-unbreak-listing-with-binops.patch deleted file mode 100644 index 7171ddd..0000000 --- a/SOURCES/0035-mergesort-unbreak-listing-with-binops.patch +++ /dev/null @@ -1,88 +0,0 @@ -From 9d67918643e7d17c433e82eb6cdb039cb103c50f Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 7 Dec 2020 18:26:24 +0100 -Subject: [PATCH] mergesort: unbreak listing with binops - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1891790 -Upstream Status: nftables commit 3926a3369bb5a - -commit 3926a3369bb5ada5c0706dadcbcf938517822a35 -Author: Pablo Neira Ayuso -Date: Thu Aug 20 01:05:04 2020 +0200 - - mergesort: unbreak listing with binops - - tcp flags == {syn, syn|ack} - tcp flags & (fin|syn|rst|psh|ack|urg) == {ack, psh|ack, fin, fin|psh|ack} - - results in: - - BUG: Unknown expression binop - nft: mergesort.c:47: expr_msort_cmp: Assertion `0' failed. - Aborted (core dumped) - - Signed-off-by: Pablo Neira Ayuso ---- - src/mergesort.c | 2 ++ - tests/py/inet/tcp.t | 2 ++ - tests/py/inet/tcp.t.payload | 21 +++++++++++++++++++++ - 3 files changed, 25 insertions(+) - -diff --git a/src/mergesort.c b/src/mergesort.c -index 649b780..02094b4 100644 ---- a/src/mergesort.c -+++ b/src/mergesort.c -@@ -43,6 +43,8 @@ static int expr_msort_cmp(const struct expr *e1, const struct expr *e2) - return concat_expr_msort_cmp(e1, e2); - case EXPR_MAPPING: - return expr_msort_cmp(e1->left, e2->left); -+ case EXPR_BINOP: -+ return expr_msort_cmp(e1->left, e2->left); - default: - BUG("Unknown expression %s\n", expr_name(e1)); - } -diff --git a/tests/py/inet/tcp.t b/tests/py/inet/tcp.t -index e0a83e2..29f06f5 100644 ---- a/tests/py/inet/tcp.t -+++ b/tests/py/inet/tcp.t -@@ -79,6 +79,8 @@ tcp flags != cwr;ok - tcp flags == syn;ok - tcp flags & (syn|fin) == (syn|fin);ok;tcp flags & (fin | syn) == fin | syn - tcp flags & (fin | syn | rst | psh | ack | urg | ecn | cwr) == fin | syn | rst | psh | ack | urg | ecn | cwr;ok;tcp flags == 0xff -+tcp flags { syn, syn | ack };ok -+tcp flags & (fin | syn | rst | psh | ack | urg) == { fin, ack, psh | ack, fin | psh | ack };ok - - tcp window 22222;ok - tcp window 22;ok -diff --git a/tests/py/inet/tcp.t.payload b/tests/py/inet/tcp.t.payload -index 55f1bc2..076e562 100644 ---- a/tests/py/inet/tcp.t.payload -+++ b/tests/py/inet/tcp.t.payload -@@ -680,3 +680,24 @@ inet test-inet input - [ bitwise reg 1 = (reg=1 & 0x000000f0 ) ^ 0x00000000 ] - [ cmp eq reg 1 0x00000080 ] - -+# tcp flags & (fin | syn | rst | psh | ack | urg) == { fin, ack, psh | ack, fin | psh | ack } -+__set%d test-inet 3 -+__set%d test-inet 0 -+ element 00000001 : 0 [end] element 00000010 : 0 [end] element 00000018 : 0 [end] element 00000019 : 0 [end] -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ payload load 1b @ transport header + 13 => reg 1 ] -+ [ bitwise reg 1 = (reg=1 & 0x0000003f ) ^ 0x00000000 ] -+ [ lookup reg 1 set __set%d ] -+ -+# tcp flags { syn, syn | ack } -+__set%d test-inet 3 -+__set%d test-inet 0 -+ element 00000002 : 0 [end] element 00000012 : 0 [end] -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ payload load 1b @ transport header + 13 => reg 1 ] -+ [ lookup reg 1 set __set%d ] -+ --- -2.31.1 - diff --git a/SOURCES/0036-proto-add-sctp-crc32-checksum-fixup.patch b/SOURCES/0036-proto-add-sctp-crc32-checksum-fixup.patch deleted file mode 100644 index a9e9f8c..0000000 --- a/SOURCES/0036-proto-add-sctp-crc32-checksum-fixup.patch +++ /dev/null @@ -1,134 +0,0 @@ -From 876a1202351264f6d3b105258f10bde693870bd4 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 7 Dec 2020 18:27:16 +0100 -Subject: [PATCH] proto: add sctp crc32 checksum fixup - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1895804 -Upstream Status: nftables commit 09a3b2ba0c822 - -commit 09a3b2ba0c8228d1c6bf0f030cae97addb397351 -Author: Florian Westphal -Date: Tue Oct 6 23:16:32 2020 +0200 - - proto: add sctp crc32 checksum fixup - - Stateless SCTP header mangling doesn't work reliably. - This tells the kernel to update the checksum field using - the sctp crc32 algorithm. - - Note that this needs additional kernel support to work. - - Signed-off-by: Florian Westphal ---- - include/linux/netfilter/nf_tables.h | 2 ++ - include/proto.h | 1 + - src/netlink_linearize.c | 2 +- - src/proto.c | 8 ++++++++ - 4 files changed, 12 insertions(+), 1 deletion(-) - -diff --git a/include/linux/netfilter/nf_tables.h b/include/linux/netfilter/nf_tables.h -index 9b54a86..1328b8e 100644 ---- a/include/linux/netfilter/nf_tables.h -+++ b/include/linux/netfilter/nf_tables.h -@@ -707,10 +707,12 @@ enum nft_payload_bases { - * - * @NFT_PAYLOAD_CSUM_NONE: no checksumming - * @NFT_PAYLOAD_CSUM_INET: internet checksum (RFC 791) -+ * @NFT_PAYLOAD_CSUM_SCTP: CRC-32c, for use in SCTP header (RFC 3309) - */ - enum nft_payload_csum_types { - NFT_PAYLOAD_CSUM_NONE, - NFT_PAYLOAD_CSUM_INET, -+ NFT_PAYLOAD_CSUM_SCTP, - }; - - enum nft_payload_csum_flags { -diff --git a/include/proto.h b/include/proto.h -index fab48c1..436cbe3 100644 ---- a/include/proto.h -+++ b/include/proto.h -@@ -78,6 +78,7 @@ struct proto_hdr_template { - struct proto_desc { - const char *name; - enum proto_bases base; -+ enum nft_payload_csum_types checksum_type; - unsigned int checksum_key; - unsigned int protocol_key; - unsigned int length; -diff --git a/src/netlink_linearize.c b/src/netlink_linearize.c -index cb1b7fe..606d97a 100644 ---- a/src/netlink_linearize.c -+++ b/src/netlink_linearize.c -@@ -937,7 +937,7 @@ static void netlink_gen_payload_stmt(struct netlink_linearize_ctx *ctx, - expr->len / BITS_PER_BYTE); - if (csum_off) { - nftnl_expr_set_u32(nle, NFTNL_EXPR_PAYLOAD_CSUM_TYPE, -- NFT_PAYLOAD_CSUM_INET); -+ desc->checksum_type); - nftnl_expr_set_u32(nle, NFTNL_EXPR_PAYLOAD_CSUM_OFFSET, - csum_off / BITS_PER_BYTE); - } -diff --git a/src/proto.c b/src/proto.c -index 40ce590..8360abf 100644 ---- a/src/proto.c -+++ b/src/proto.c -@@ -345,6 +345,7 @@ const struct proto_desc proto_icmp = { - .name = "icmp", - .base = PROTO_BASE_TRANSPORT_HDR, - .checksum_key = ICMPHDR_CHECKSUM, -+ .checksum_type = NFT_PAYLOAD_CSUM_INET, - .templates = { - [ICMPHDR_TYPE] = ICMPHDR_TYPE("type", &icmp_type_type, type), - [ICMPHDR_CODE] = ICMPHDR_TYPE("code", &icmp_code_type, code), -@@ -397,6 +398,7 @@ const struct proto_desc proto_igmp = { - .name = "igmp", - .base = PROTO_BASE_TRANSPORT_HDR, - .checksum_key = IGMPHDR_CHECKSUM, -+ .checksum_type = NFT_PAYLOAD_CSUM_INET, - .templates = { - [IGMPHDR_TYPE] = IGMPHDR_TYPE("type", &igmp_type_type, igmp_type), - [IGMPHDR_MRT] = IGMPHDR_FIELD("mrt", igmp_code), -@@ -417,6 +419,7 @@ const struct proto_desc proto_udp = { - .name = "udp", - .base = PROTO_BASE_TRANSPORT_HDR, - .checksum_key = UDPHDR_CHECKSUM, -+ .checksum_type = NFT_PAYLOAD_CSUM_INET, - .templates = { - [UDPHDR_SPORT] = INET_SERVICE("sport", struct udphdr, source), - [UDPHDR_DPORT] = INET_SERVICE("dport", struct udphdr, dest), -@@ -474,6 +477,7 @@ const struct proto_desc proto_tcp = { - .name = "tcp", - .base = PROTO_BASE_TRANSPORT_HDR, - .checksum_key = TCPHDR_CHECKSUM, -+ .checksum_type = NFT_PAYLOAD_CSUM_INET, - .templates = { - [TCPHDR_SPORT] = INET_SERVICE("sport", struct tcphdr, source), - [TCPHDR_DPORT] = INET_SERVICE("dport", struct tcphdr, dest), -@@ -553,6 +557,8 @@ const struct proto_desc proto_dccp = { - const struct proto_desc proto_sctp = { - .name = "sctp", - .base = PROTO_BASE_TRANSPORT_HDR, -+ .checksum_key = SCTPHDR_CHECKSUM, -+ .checksum_type = NFT_PAYLOAD_CSUM_SCTP, - .templates = { - [SCTPHDR_SPORT] = INET_SERVICE("sport", struct sctphdr, source), - [SCTPHDR_DPORT] = INET_SERVICE("dport", struct sctphdr, dest), -@@ -650,6 +656,7 @@ const struct proto_desc proto_ip = { - .name = "ip", - .base = PROTO_BASE_NETWORK_HDR, - .checksum_key = IPHDR_CHECKSUM, -+ .checksum_type = NFT_PAYLOAD_CSUM_INET, - .protocols = { - PROTO_LINK(IPPROTO_ICMP, &proto_icmp), - PROTO_LINK(IPPROTO_IGMP, &proto_igmp), -@@ -746,6 +753,7 @@ const struct proto_desc proto_icmp6 = { - .name = "icmpv6", - .base = PROTO_BASE_TRANSPORT_HDR, - .checksum_key = ICMP6HDR_CHECKSUM, -+ .checksum_type = NFT_PAYLOAD_CSUM_INET, - .templates = { - [ICMP6HDR_TYPE] = ICMP6HDR_TYPE("type", &icmp6_type_type, icmp6_type), - [ICMP6HDR_CODE] = ICMP6HDR_TYPE("code", &icmpv6_code_type, icmp6_code), --- -2.31.1 - diff --git a/SOURCES/0037-proto-Fix-ARP-header-field-ordering.patch b/SOURCES/0037-proto-Fix-ARP-header-field-ordering.patch deleted file mode 100644 index 8a0782d..0000000 --- a/SOURCES/0037-proto-Fix-ARP-header-field-ordering.patch +++ /dev/null @@ -1,233 +0,0 @@ -From 70dc225b23708c6ac96e2895488f3c6dea9e201d Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 7 Dec 2020 18:28:27 +0100 -Subject: [PATCH] proto: Fix ARP header field ordering - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1896334 -Upstream Status: nftables commit f751753f92ea7 - -commit f751753f92ea76f582f7d5d1fef8b4d5677ba589 -Author: Phil Sutter -Date: Tue Nov 10 13:07:49 2020 +0100 - - proto: Fix ARP header field ordering - - In ARP header, destination ether address sits between source IP and - destination IP addresses. Enum arp_hdr_fields had this wrong, which - in turn caused wrong ordering of entries in proto_arp->templates. When - expanding a combined payload expression, code assumes that template - entries are ordered by header offset, therefore the destination ether - address match was printed as raw if an earlier field was matched as - well: - - | arp saddr ip 192.168.1.1 arp daddr ether 3e:d1:3f:d6:12:0b - - was printed as: - - | arp saddr ip 192.168.1.1 @nh,144,48 69068440080907 - - Note: Although strictly not necessary, reorder fields in - proto_arp->templates as well to match their actual ordering, just to - avoid confusion. - - Fixes: 4b0f2a712b579 ("src: support for arp sender and target ethernet and IPv4 addresses") - Signed-off-by: Phil Sutter ---- - include/proto.h | 2 +- - src/proto.c | 2 +- - tests/py/arp/arp.t | 3 ++ - tests/py/arp/arp.t.json | 56 +++++++++++++++++++++++++++++++ - tests/py/arp/arp.t.json.output | 28 ++++++++++++++++ - tests/py/arp/arp.t.payload | 10 ++++++ - tests/py/arp/arp.t.payload.netdev | 14 ++++++++ - 7 files changed, 113 insertions(+), 2 deletions(-) - -diff --git a/include/proto.h b/include/proto.h -index 436cbe3..5a50059 100644 ---- a/include/proto.h -+++ b/include/proto.h -@@ -184,8 +184,8 @@ enum arp_hdr_fields { - ARPHDR_PLN, - ARPHDR_OP, - ARPHDR_SADDR_ETHER, -- ARPHDR_DADDR_ETHER, - ARPHDR_SADDR_IP, -+ ARPHDR_DADDR_ETHER, - ARPHDR_DADDR_IP, - }; - -diff --git a/src/proto.c b/src/proto.c -index 8360abf..49c8c92 100644 ---- a/src/proto.c -+++ b/src/proto.c -@@ -908,8 +908,8 @@ const struct proto_desc proto_arp = { - [ARPHDR_PLN] = ARPHDR_FIELD("plen", plen), - [ARPHDR_OP] = ARPHDR_TYPE("operation", &arpop_type, oper), - [ARPHDR_SADDR_ETHER] = ARPHDR_TYPE("saddr ether", ðeraddr_type, sha), -- [ARPHDR_DADDR_ETHER] = ARPHDR_TYPE("daddr ether", ðeraddr_type, tha), - [ARPHDR_SADDR_IP] = ARPHDR_TYPE("saddr ip", &ipaddr_type, spa), -+ [ARPHDR_DADDR_ETHER] = ARPHDR_TYPE("daddr ether", ðeraddr_type, tha), - [ARPHDR_DADDR_IP] = ARPHDR_TYPE("daddr ip", &ipaddr_type, tpa), - }, - .format = { -diff --git a/tests/py/arp/arp.t b/tests/py/arp/arp.t -index 2540c0a..109d01d 100644 ---- a/tests/py/arp/arp.t -+++ b/tests/py/arp/arp.t -@@ -61,4 +61,7 @@ arp daddr ip 4.3.2.1;ok - arp saddr ether aa:bb:cc:aa:bb:cc;ok - arp daddr ether aa:bb:cc:aa:bb:cc;ok - -+arp saddr ip 192.168.1.1 arp daddr ether fe:ed:00:c0:ff:ee;ok -+arp daddr ether fe:ed:00:c0:ff:ee arp saddr ip 192.168.1.1;ok;arp saddr ip 192.168.1.1 arp daddr ether fe:ed:00:c0:ff:ee -+ - meta iifname "invalid" arp ptype 0x0800 arp htype 1 arp hlen 6 arp plen 4 @nh,192,32 0xc0a88f10 @nh,144,48 set 0x112233445566;ok;iifname "invalid" arp htype 1 arp ptype ip arp hlen 6 arp plen 4 arp daddr ip 192.168.143.16 arp daddr ether set 11:22:33:44:55:66 -diff --git a/tests/py/arp/arp.t.json b/tests/py/arp/arp.t.json -index 5f2f6cd..8508c17 100644 ---- a/tests/py/arp/arp.t.json -+++ b/tests/py/arp/arp.t.json -@@ -901,6 +901,62 @@ - } - ] - -+# arp saddr ip 192.168.1.1 arp daddr ether fe:ed:00:c0:ff:ee -+[ -+ { -+ "match": { -+ "left": { -+ "payload": { -+ "field": "saddr ip", -+ "protocol": "arp" -+ } -+ }, -+ "op": "==", -+ "right": "192.168.1.1" -+ } -+ }, -+ { -+ "match": { -+ "left": { -+ "payload": { -+ "field": "daddr ether", -+ "protocol": "arp" -+ } -+ }, -+ "op": "==", -+ "right": "fe:ed:00:c0:ff:ee" -+ } -+ } -+] -+ -+# arp daddr ether fe:ed:00:c0:ff:ee arp saddr ip 192.168.1.1 -+[ -+ { -+ "match": { -+ "left": { -+ "payload": { -+ "field": "daddr ether", -+ "protocol": "arp" -+ } -+ }, -+ "op": "==", -+ "right": "fe:ed:00:c0:ff:ee" -+ } -+ }, -+ { -+ "match": { -+ "left": { -+ "payload": { -+ "field": "saddr ip", -+ "protocol": "arp" -+ } -+ }, -+ "op": "==", -+ "right": "192.168.1.1" -+ } -+ } -+] -+ - # meta iifname "invalid" arp ptype 0x0800 arp htype 1 arp hlen 6 arp plen 4 @nh,192,32 0xc0a88f10 @nh,144,48 set 0x112233445566 - [ - { -diff --git a/tests/py/arp/arp.t.json.output b/tests/py/arp/arp.t.json.output -index b8507bf..afa75b2 100644 ---- a/tests/py/arp/arp.t.json.output -+++ b/tests/py/arp/arp.t.json.output -@@ -66,6 +66,34 @@ - } - ] - -+# arp daddr ether fe:ed:00:c0:ff:ee arp saddr ip 192.168.1.1 -+[ -+ { -+ "match": { -+ "left": { -+ "payload": { -+ "field": "saddr ip", -+ "protocol": "arp" -+ } -+ }, -+ "op": "==", -+ "right": "192.168.1.1" -+ } -+ }, -+ { -+ "match": { -+ "left": { -+ "payload": { -+ "field": "daddr ether", -+ "protocol": "arp" -+ } -+ }, -+ "op": "==", -+ "right": "fe:ed:00:c0:ff:ee" -+ } -+ } -+] -+ - # meta iifname "invalid" arp ptype 0x0800 arp htype 1 arp hlen 6 arp plen 4 @nh,192,32 0xc0a88f10 @nh,144,48 set 0x112233445566 - [ - { -diff --git a/tests/py/arp/arp.t.payload b/tests/py/arp/arp.t.payload -index 52c9932..f819853 100644 ---- a/tests/py/arp/arp.t.payload -+++ b/tests/py/arp/arp.t.payload -@@ -307,3 +307,13 @@ arp test-arp input - [ payload load 6b @ network header + 18 => reg 1 ] - [ cmp eq reg 1 0xaaccbbaa 0x0000ccbb ] - -+# arp saddr ip 192.168.1.1 arp daddr ether fe:ed:00:c0:ff:ee -+arp -+ [ payload load 10b @ network header + 14 => reg 1 ] -+ [ cmp eq reg 1 0x0101a8c0 0xc000edfe 0x0000eeff ] -+ -+# arp daddr ether fe:ed:00:c0:ff:ee arp saddr ip 192.168.1.1 -+arp -+ [ payload load 10b @ network header + 14 => reg 1 ] -+ [ cmp eq reg 1 0x0101a8c0 0xc000edfe 0x0000eeff ] -+ -diff --git a/tests/py/arp/arp.t.payload.netdev b/tests/py/arp/arp.t.payload.netdev -index 667691f..f57610c 100644 ---- a/tests/py/arp/arp.t.payload.netdev -+++ b/tests/py/arp/arp.t.payload.netdev -@@ -409,3 +409,17 @@ netdev test-netdev ingress - [ payload load 6b @ network header + 18 => reg 1 ] - [ cmp eq reg 1 0xaaccbbaa 0x0000ccbb ] - -+# arp saddr ip 192.168.1.1 arp daddr ether fe:ed:00:c0:ff:ee -+netdev -+ [ meta load protocol => reg 1 ] -+ [ cmp eq reg 1 0x00000608 ] -+ [ payload load 10b @ network header + 14 => reg 1 ] -+ [ cmp eq reg 1 0x0101a8c0 0xc000edfe 0x0000eeff ] -+ -+# arp daddr ether fe:ed:00:c0:ff:ee arp saddr ip 192.168.1.1 -+netdev -+ [ meta load protocol => reg 1 ] -+ [ cmp eq reg 1 0x00000608 ] -+ [ payload load 10b @ network header + 14 => reg 1 ] -+ [ cmp eq reg 1 0x0101a8c0 0xc000edfe 0x0000eeff ] -+ --- -2.31.1 - diff --git a/SOURCES/0038-json-echo-Speedup-seqnum_to_json.patch b/SOURCES/0038-json-echo-Speedup-seqnum_to_json.patch deleted file mode 100644 index a62f001..0000000 --- a/SOURCES/0038-json-echo-Speedup-seqnum_to_json.patch +++ /dev/null @@ -1,108 +0,0 @@ -From 26c4f15080663a12006abf8539ebf28bb223e6d9 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 7 Dec 2020 18:29:15 +0100 -Subject: [PATCH] json: echo: Speedup seqnum_to_json() - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1900565 -Upstream Status: nftables commit 389a0e1edc89a - -commit 389a0e1edc89a4048a272e569d3349b1d43bc567 -Author: Phil Sutter -Date: Fri Nov 20 20:01:59 2020 +0100 - - json: echo: Speedup seqnum_to_json() - - Derek Dai reports: - "If there are a lot of command in JSON node, seqnum_to_json() will slow - down application (eg: firewalld) dramatically since it iterate whole - command list every time." - - He sent a patch implementing a lookup table, but we can do better: Speed - this up by introducing a hash table to store the struct json_cmd_assoc - objects in, taking their netlink sequence number as key. - - Quickly tested restoring a ruleset containing about 19k rules: - - | # time ./before/nft -jeaf large_ruleset.json >/dev/null - | 4.85user 0.47system 0:05.48elapsed 97%CPU (0avgtext+0avgdata 69732maxresident)k - | 0inputs+0outputs (15major+16937minor)pagefaults 0swaps - - | # time ./after/nft -jeaf large_ruleset.json >/dev/null - | 0.18user 0.44system 0:00.70elapsed 89%CPU (0avgtext+0avgdata 68484maxresident)k - | 0inputs+0outputs (15major+16645minor)pagefaults 0swaps - - Bugzilla: https://bugzilla.netfilter.org/show_bug.cgi?id=1479 - Reported-by: Derek Dai - Signed-off-by: Phil Sutter ---- - src/parser_json.c | 28 ++++++++++++++++++---------- - 1 file changed, 18 insertions(+), 10 deletions(-) - -diff --git a/src/parser_json.c b/src/parser_json.c -index ddc694f..107dc38 100644 ---- a/src/parser_json.c -+++ b/src/parser_json.c -@@ -3646,42 +3646,50 @@ static int json_verify_metainfo(struct json_ctx *ctx, json_t *root) - } - - struct json_cmd_assoc { -- struct json_cmd_assoc *next; -+ struct hlist_node hnode; - const struct cmd *cmd; - json_t *json; - }; - --static struct json_cmd_assoc *json_cmd_list = NULL; -+#define CMD_ASSOC_HSIZE 512 -+static struct hlist_head json_cmd_assoc_hash[CMD_ASSOC_HSIZE]; - - static void json_cmd_assoc_free(void) - { - struct json_cmd_assoc *cur; -+ struct hlist_node *pos, *n; -+ int i; - -- while (json_cmd_list) { -- cur = json_cmd_list; -- json_cmd_list = cur->next; -- free(cur); -+ for (i = 0; i < CMD_ASSOC_HSIZE; i++) { -+ hlist_for_each_entry_safe(cur, pos, n, -+ &json_cmd_assoc_hash[i], hnode) -+ free(cur); - } - } - - static void json_cmd_assoc_add(json_t *json, const struct cmd *cmd) - { - struct json_cmd_assoc *new = xzalloc(sizeof *new); -+ int key = cmd->seqnum % CMD_ASSOC_HSIZE; - -- new->next = json_cmd_list; - new->json = json; - new->cmd = cmd; -- json_cmd_list = new; -+ -+ hlist_add_head(&new->hnode, &json_cmd_assoc_hash[key]); - } - - static json_t *seqnum_to_json(const uint32_t seqnum) - { -- const struct json_cmd_assoc *cur; -+ int key = seqnum % CMD_ASSOC_HSIZE; -+ struct json_cmd_assoc *cur; -+ struct hlist_node *n; - -- for (cur = json_cmd_list; cur; cur = cur->next) { -+ -+ hlist_for_each_entry(cur, n, &json_cmd_assoc_hash[key], hnode) { - if (cur->cmd->seqnum == seqnum) - return cur->json; - } -+ - return NULL; - } - --- -2.31.1 - diff --git a/SOURCES/0039-json-Fix-seqnum_to_json-functionality.patch b/SOURCES/0039-json-Fix-seqnum_to_json-functionality.patch deleted file mode 100644 index 73e9ad1..0000000 --- a/SOURCES/0039-json-Fix-seqnum_to_json-functionality.patch +++ /dev/null @@ -1,116 +0,0 @@ -From 0dcfa1b0211fa50201d51d0f52869a8e2d93ba76 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 7 Dec 2020 18:29:15 +0100 -Subject: [PATCH] json: Fix seqnum_to_json() functionality - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1900565 -Upstream Status: nftables commit 299ec575faa6b - -commit 299ec575faa6b070940b483dc517ecd883b9f1a4 -Author: Phil Sutter -Date: Wed Dec 2 23:07:11 2020 +0100 - - json: Fix seqnum_to_json() functionality - - Introduction of json_cmd_assoc_hash missed that by the time the hash - table insert happens, the struct cmd object's 'seqnum' field which is - used as key is not initialized yet. This doesn't happen until - nft_netlink() prepares the batch object which records the lowest seqnum. - Therefore push all json_cmd_assoc objects into a temporary list until - the first lookup happens. At this time, all referenced cmd objects have - their seqnum set and the list entries can be moved into the hash table - for fast lookups. - - To expose such problems in the future, make json_events_cb() emit an - error message if the passed message has a handle but no assoc entry is - found for its seqnum. - - Fixes: 389a0e1edc89a ("json: echo: Speedup seqnum_to_json()") - Cc: Derek Dai - Signed-off-by: Phil Sutter ---- - src/parser_json.c | 27 +++++++++++++++++++++++---- - 1 file changed, 23 insertions(+), 4 deletions(-) - -diff --git a/src/parser_json.c b/src/parser_json.c -index 107dc38..785f0e7 100644 ---- a/src/parser_json.c -+++ b/src/parser_json.c -@@ -3646,6 +3646,7 @@ static int json_verify_metainfo(struct json_ctx *ctx, json_t *root) - } - - struct json_cmd_assoc { -+ struct json_cmd_assoc *next; - struct hlist_node hnode; - const struct cmd *cmd; - json_t *json; -@@ -3653,6 +3654,7 @@ struct json_cmd_assoc { - - #define CMD_ASSOC_HSIZE 512 - static struct hlist_head json_cmd_assoc_hash[CMD_ASSOC_HSIZE]; -+static struct json_cmd_assoc *json_cmd_assoc_list; - - static void json_cmd_assoc_free(void) - { -@@ -3660,6 +3662,12 @@ static void json_cmd_assoc_free(void) - struct hlist_node *pos, *n; - int i; - -+ while (json_cmd_assoc_list) { -+ cur = json_cmd_assoc_list->next; -+ free(json_cmd_assoc_list); -+ json_cmd_assoc_list = cur; -+ } -+ - for (i = 0; i < CMD_ASSOC_HSIZE; i++) { - hlist_for_each_entry_safe(cur, pos, n, - &json_cmd_assoc_hash[i], hnode) -@@ -3670,21 +3678,29 @@ static void json_cmd_assoc_free(void) - static void json_cmd_assoc_add(json_t *json, const struct cmd *cmd) - { - struct json_cmd_assoc *new = xzalloc(sizeof *new); -- int key = cmd->seqnum % CMD_ASSOC_HSIZE; - - new->json = json; - new->cmd = cmd; -+ new->next = json_cmd_assoc_list; - -- hlist_add_head(&new->hnode, &json_cmd_assoc_hash[key]); -+ json_cmd_assoc_list = new; - } - - static json_t *seqnum_to_json(const uint32_t seqnum) - { -- int key = seqnum % CMD_ASSOC_HSIZE; - struct json_cmd_assoc *cur; - struct hlist_node *n; -+ int key; - -+ while (json_cmd_assoc_list) { -+ cur = json_cmd_assoc_list; -+ json_cmd_assoc_list = cur->next; - -+ key = cur->cmd->seqnum % CMD_ASSOC_HSIZE; -+ hlist_add_head(&cur->hnode, &json_cmd_assoc_hash[key]); -+ } -+ -+ key = seqnum % CMD_ASSOC_HSIZE; - hlist_for_each_entry(cur, n, &json_cmd_assoc_hash[key], hnode) { - if (cur->cmd->seqnum == seqnum) - return cur->json; -@@ -3865,8 +3881,11 @@ int json_events_cb(const struct nlmsghdr *nlh, struct netlink_mon_handler *monh) - return MNL_CB_OK; - - json = seqnum_to_json(nlh->nlmsg_seq); -- if (!json) -+ if (!json) { -+ json_echo_error(monh, "No JSON command found with seqnum %lu\n", -+ nlh->nlmsg_seq); - return MNL_CB_OK; -+ } - - tmp = json_object_get(json, "add"); - if (!tmp) --- -2.31.1 - diff --git a/SOURCES/0040-json-don-t-leave-dangling-pointers-on-hlist.patch b/SOURCES/0040-json-don-t-leave-dangling-pointers-on-hlist.patch deleted file mode 100644 index 165db16..0000000 --- a/SOURCES/0040-json-don-t-leave-dangling-pointers-on-hlist.patch +++ /dev/null @@ -1,47 +0,0 @@ -From b7964157c40066f09411ac52547acb07d1966aee Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Tue, 12 Jan 2021 15:49:43 +0100 -Subject: [PATCH] json: don't leave dangling pointers on hlist - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1900565 -Upstream Status: nftables commit 48917d876d51c - -commit 48917d876d51cd6ba5bff07172acef05c9e12474 -Author: Florian Westphal -Date: Mon Dec 14 16:53:29 2020 +0100 - - json: don't leave dangling pointers on hlist - - unshare -n tests/json_echo/run-test.py - [..] - Adding chain c - free(): double free detected in tcache 2 - Aborted (core dumped) - - The element must be deleted from the hlist prior to freeing it. - - Fixes: 389a0e1edc89a ("json: echo: Speedup seqnum_to_json()") - Signed-off-by: Florian Westphal ---- - src/parser_json.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/src/parser_json.c b/src/parser_json.c -index 785f0e7..986f128 100644 ---- a/src/parser_json.c -+++ b/src/parser_json.c -@@ -3670,8 +3670,10 @@ static void json_cmd_assoc_free(void) - - for (i = 0; i < CMD_ASSOC_HSIZE; i++) { - hlist_for_each_entry_safe(cur, pos, n, -- &json_cmd_assoc_hash[i], hnode) -+ &json_cmd_assoc_hash[i], hnode) { -+ hlist_del(&cur->hnode); - free(cur); -+ } - } - } - --- -2.31.1 - diff --git a/SOURCES/0041-json-init-parser-state-for-every-new-buffer-file.patch b/SOURCES/0041-json-init-parser-state-for-every-new-buffer-file.patch deleted file mode 100644 index 6291fbf..0000000 --- a/SOURCES/0041-json-init-parser-state-for-every-new-buffer-file.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 2b4da3af37ac10d96650da1b8642f82a3aa92e30 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Sat, 20 Feb 2021 09:52:59 +0100 -Subject: [PATCH] json: init parser state for every new buffer/file - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1930873 -Upstream Status: nftables commit 267338ec39234 - -commit 267338ec392346ef55ed51509e5f8e8354d6c19a -Author: Eric Garver -Date: Fri Feb 19 10:11:26 2021 -0500 - - json: init parser state for every new buffer/file - - Otherwise invalid error states cause subsequent json parsing to fail - when it should not. - - Signed-off-by: Eric Garver - Signed-off-by: Phil Sutter ---- - src/parser_json.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/src/parser_json.c b/src/parser_json.c -index 986f128..662bb4b 100644 ---- a/src/parser_json.c -+++ b/src/parser_json.c -@@ -3777,6 +3777,7 @@ int nft_parse_json_buffer(struct nft_ctx *nft, const char *buf, - }; - int ret; - -+ parser_init(nft, nft->state, msgs, cmds, nft->top_scope); - nft->json_root = json_loads(buf, 0, NULL); - if (!nft->json_root) - return -EINVAL; -@@ -3805,6 +3806,7 @@ int nft_parse_json_filename(struct nft_ctx *nft, const char *filename, - json_error_t err; - int ret; - -+ parser_init(nft, nft->state, msgs, cmds, nft->top_scope); - nft->json_root = json_load_file(filename, 0, &err); - if (!nft->json_root) - return -EINVAL; --- -2.31.1 - diff --git a/SOURCES/0042-tests-Disable-tests-known-to-fail-on-RHEL8.patch b/SOURCES/0042-tests-Disable-tests-known-to-fail-on-RHEL8.patch deleted file mode 100644 index 6a866a1..0000000 --- a/SOURCES/0042-tests-Disable-tests-known-to-fail-on-RHEL8.patch +++ /dev/null @@ -1,465 +0,0 @@ -From f9dca1704ce66be31eceac4d7317b825269b3d07 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Tue, 2 Mar 2021 17:06:06 +0100 -Subject: [PATCH] tests: Disable tests known to fail on RHEL8 - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1919203 -Upstream Status: RHEL-only - -RHEL8 kernel does not support: - -- ct timeout or expectation objects -- synproxy -- flowtables in families other than inet -- meta time -- bridge family-specific meta expressions (e.g. ibrvproto, ibrpvid) -- socket mark -- osf -- delete set elements from packet path -- update stateful objects -- explicitly setting set element expiration (commit 79ebb5bb4e3) -- flushing chains and deleting referenced objects in the same - transaction (upstream commits with 'bogus EBUSY' in subject) - -Disable all related tests to make the testsuites pass. ---- - tests/monitor/testcases/object.t | 14 +++--- - tests/py/any/meta.t | 36 +++++++-------- - tests/py/bridge/meta.t | 8 ++-- - tests/py/inet/osf.t | 24 +++++----- - tests/py/inet/socket.t | 2 +- - tests/py/inet/synproxy.t | 12 ++--- - tests/py/ip/objects.t | 46 +++++++++---------- - tests/py/ip6/sets.t | 2 +- - .../flowtable/0002create_flowtable_0 | 8 ++-- - .../testcases/flowtable/0003add_after_flush_0 | 8 ++-- - .../flowtable/0004delete_after_add_0 | 6 +-- - .../testcases/flowtable/0005delete_in_use_1 | 10 ++-- - tests/shell/testcases/flowtable/0007prio_0 | 6 +-- - tests/shell/testcases/flowtable/0008prio_1 | 4 +- - .../flowtable/0009deleteafterflush_0 | 12 ++--- - tests/shell/testcases/listing/0013objects_0 | 2 + - .../testcases/nft-f/0017ct_timeout_obj_0 | 2 + - .../testcases/nft-f/0018ct_expectation_obj_0 | 2 + - ....nft => 0017ct_timeout_obj_0.nft.disabled} | 0 - .../optionals/update_object_handles_0 | 2 + - .../sets/0036add_set_element_expiration_0 | 2 + - tests/shell/testcases/transactions/0046set_0 | 2 + - 22 files changed, 111 insertions(+), 99 deletions(-) - rename tests/shell/testcases/nft-f/dumps/{0017ct_timeout_obj_0.nft => 0017ct_timeout_obj_0.nft.disabled} (100%) - -diff --git a/tests/monitor/testcases/object.t b/tests/monitor/testcases/object.t -index 2afe33c..1b30384 100644 ---- a/tests/monitor/testcases/object.t -+++ b/tests/monitor/testcases/object.t -@@ -37,10 +37,10 @@ I delete ct helper ip t cth - O - - J {"delete": {"ct helper": {"family": "ip", "name": "cth", "table": "t", "handle": 0, "type": "sip", "protocol": "tcp", "l3proto": "ip"}}} - --I add ct timeout ip t ctt { protocol udp; l3proto ip; policy = { unreplied : 15, replied : 12 }; } --O - --J {"add": {"ct timeout": {"family": "ip", "name": "ctt", "table": "t", "handle": 0, "protocol": "udp", "l3proto": "ip", "policy": {"unreplied": 15, "replied": 12}}}} -- --I delete ct timeout ip t ctt --O - --J {"delete": {"ct timeout": {"family": "ip", "name": "ctt", "table": "t", "handle": 0, "protocol": "udp", "l3proto": "ip", "policy": {"unreplied": 15, "replied": 12}}}} -+# I add ct timeout ip t ctt { protocol udp; l3proto ip; policy = { unreplied : 15, replied : 12 }; } -+# O - -+# J {"add": {"ct timeout": {"family": "ip", "name": "ctt", "table": "t", "handle": 0, "protocol": "udp", "l3proto": "ip", "policy": {"unreplied": 15, "replied": 12}}}} -+# -+# I delete ct timeout ip t ctt -+# O - -+# J {"delete": {"ct timeout": {"family": "ip", "name": "ctt", "table": "t", "handle": 0, "protocol": "udp", "l3proto": "ip", "policy": {"unreplied": 15, "replied": 12}}}} -diff --git a/tests/py/any/meta.t b/tests/py/any/meta.t -index 327f973..241b466 100644 ---- a/tests/py/any/meta.t -+++ b/tests/py/any/meta.t -@@ -204,21 +204,21 @@ meta iif . meta oif vmap { "lo" . "lo" : drop };ok;iif . oif vmap { "lo" . "lo" - meta random eq 1;ok;meta random 1 - meta random gt 1000000;ok;meta random > 1000000 - --meta time "1970-05-23 21:07:14" drop;ok --meta time 12341234 drop;ok;meta time "1970-05-23 22:07:14" drop --meta time "2019-06-21 17:00:00" drop;ok --meta time "2019-07-01 00:00:00" drop;ok --meta time "2019-07-01 00:01:00" drop;ok --meta time "2019-07-01 00:00:01" drop;ok --meta day "Saturday" drop;ok --meta day 6 drop;ok;meta day "Saturday" drop --meta day "Satturday" drop;fail --meta hour "17:00" drop;ok --meta hour "17:00:00" drop;ok;meta hour "17:00" drop --meta hour "17:00:01" drop;ok --meta hour "00:00" drop;ok --meta hour "00:01" drop;ok -- --meta time "meh";fail --meta hour "24:00" drop;fail --meta day 7 drop;fail -+- meta time "1970-05-23 21:07:14" drop;ok -+- meta time 12341234 drop;ok;meta time "1970-05-23 22:07:14" drop -+- meta time "2019-06-21 17:00:00" drop;ok -+- meta time "2019-07-01 00:00:00" drop;ok -+- meta time "2019-07-01 00:01:00" drop;ok -+- meta time "2019-07-01 00:00:01" drop;ok -+- meta day "Saturday" drop;ok -+- meta day 6 drop;ok;meta day "Saturday" drop -+- meta day "Satturday" drop;fail -+- meta hour "17:00" drop;ok -+- meta hour "17:00:00" drop;ok;meta hour "17:00" drop -+- meta hour "17:00:01" drop;ok -+- meta hour "00:00" drop;ok -+- meta hour "00:01" drop;ok -+ -+- meta time "meh";fail -+- meta hour "24:00" drop;fail -+- meta day 7 drop;fail -diff --git a/tests/py/bridge/meta.t b/tests/py/bridge/meta.t -index 94525f2..9f55cde 100644 ---- a/tests/py/bridge/meta.t -+++ b/tests/py/bridge/meta.t -@@ -2,7 +2,7 @@ - - *bridge;test-bridge;input - --meta obrname "br0";ok --meta ibrname "br0";ok --meta ibrvproto vlan;ok --meta ibrpvid 100;ok -+- meta obrname "br0";ok -+- meta ibrname "br0";ok -+- meta ibrvproto vlan;ok -+- meta ibrpvid 100;ok -diff --git a/tests/py/inet/osf.t b/tests/py/inet/osf.t -index c828541..5191e72 100644 ---- a/tests/py/inet/osf.t -+++ b/tests/py/inet/osf.t -@@ -4,15 +4,15 @@ - *ip6;osfip6;osfchain - *inet;osfinet;osfchain - --osf name "Linux";ok --osf ttl loose name "Linux";ok --osf ttl skip name "Linux";ok --osf ttl skip version "Linux:3.0";ok --osf ttl skip version "morethan:sixteenbytes";fail --osf ttl nottl name "Linux";fail --osf name "morethansixteenbytes";fail --osf name ;fail --osf name { "Windows", "MacOs" };ok --osf version { "Windows:XP", "MacOs:Sierra" };ok --ct mark set osf name map { "Windows" : 0x00000001, "MacOs" : 0x00000002 };ok --ct mark set osf version map { "Windows:XP" : 0x00000003, "MacOs:Sierra" : 0x00000004 };ok -+- osf name "Linux";ok -+- osf ttl loose name "Linux";ok -+- osf ttl skip name "Linux";ok -+- osf ttl skip version "Linux:3.0";ok -+- osf ttl skip version "morethan:sixteenbytes";fail -+- osf ttl nottl name "Linux";fail -+- osf name "morethansixteenbytes";fail -+- osf name ;fail -+- osf name { "Windows", "MacOs" };ok -+- osf version { "Windows:XP", "MacOs:Sierra" };ok -+- ct mark set osf name map { "Windows" : 0x00000001, "MacOs" : 0x00000002 };ok -+- ct mark set osf version map { "Windows:XP" : 0x00000003, "MacOs:Sierra" : 0x00000004 };ok -diff --git a/tests/py/inet/socket.t b/tests/py/inet/socket.t -index 91846e8..dbc0554 100644 ---- a/tests/py/inet/socket.t -+++ b/tests/py/inet/socket.t -@@ -8,4 +8,4 @@ socket transparent 0;ok - socket transparent 1;ok - socket transparent 2;fail - --socket mark 0x00000005;ok -+- socket mark 0x00000005;ok -diff --git a/tests/py/inet/synproxy.t b/tests/py/inet/synproxy.t -index 55a05e1..9c58239 100644 ---- a/tests/py/inet/synproxy.t -+++ b/tests/py/inet/synproxy.t -@@ -4,10 +4,10 @@ - *ip6;synproxyip6;synproxychain - *inet;synproxyinet;synproxychain - --synproxy;ok --synproxy mss 1460 wscale 7;ok --synproxy mss 1460 wscale 5 timestamp sack-perm;ok --synproxy timestamp sack-perm;ok --synproxy timestamp;ok --synproxy sack-perm;ok -+-synproxy;ok -+-synproxy mss 1460 wscale 7;ok -+-synproxy mss 1460 wscale 5 timestamp sack-perm;ok -+-synproxy timestamp sack-perm;ok -+-synproxy timestamp;ok -+-synproxy sack-perm;ok - -diff --git a/tests/py/ip/objects.t b/tests/py/ip/objects.t -index 4fcde7c..06e94f1 100644 ---- a/tests/py/ip/objects.t -+++ b/tests/py/ip/objects.t -@@ -33,26 +33,26 @@ ip saddr 192.168.1.3 limit name "lim1";ok - ip saddr 192.168.1.3 limit name "lim3";fail - limit name tcp dport map {443 : "lim1", 80 : "lim2", 22 : "lim1"};ok - --# ct timeout --%cttime1 type ct timeout { protocol tcp; policy = { established:122 } ;};ok --%cttime2 type ct timeout { protocol udp; policy = { syn_sent:122 } ;};fail --%cttime3 type ct timeout { protocol tcp; policy = { established:132, close:16, close_wait:16 } ; l3proto ip ;};ok --%cttime4 type ct timeout { protocol udp; policy = { replied:14, unreplied:19 } ;};ok --%cttime5 type ct timeout {protocol tcp; policy = { estalbished:100 } ;};fail -- --ct timeout set "cttime1";ok -- --# ct expectation --%ctexpect1 type ct expectation { protocol tcp; dport 1234; timeout 2m; size 12; };ok --%ctexpect2 type ct expectation { protocol udp; };fail --%ctexpect3 type ct expectation { protocol tcp; dport 4321; };fail --%ctexpect4 type ct expectation { protocol tcp; dport 4321; timeout 2m; };fail --%ctexpect5 type ct expectation { protocol udp; dport 9876; timeout 2m; size 12; l3proto ip; };ok -- --ct expectation set "ctexpect1";ok -- --# synproxy --%synproxy1 type synproxy mss 1460 wscale 7;ok --%synproxy2 type synproxy mss 1460 wscale 7 timestamp sack-perm;ok -- --synproxy name tcp dport map {443 : "synproxy1", 80 : "synproxy2"};ok -+# # ct timeout -+# %cttime1 type ct timeout { protocol tcp; policy = { established:122 } ;};ok -+# %cttime2 type ct timeout { protocol udp; policy = { syn_sent:122 } ;};fail -+# %cttime3 type ct timeout { protocol tcp; policy = { established:132, close:16, close_wait:16 } ; l3proto ip ;};ok -+# %cttime4 type ct timeout { protocol udp; policy = { replied:14, unreplied:19 } ;};ok -+# %cttime5 type ct timeout {protocol tcp; policy = { estalbished:100 } ;};fail -+# -+# ct timeout set "cttime1";ok -+ -+# # ct expectation -+# %ctexpect1 type ct expectation { protocol tcp; dport 1234; timeout 2m; size 12; };ok -+# %ctexpect2 type ct expectation { protocol udp; };fail -+# %ctexpect3 type ct expectation { protocol tcp; dport 4321; };fail -+# %ctexpect4 type ct expectation { protocol tcp; dport 4321; timeout 2m; };fail -+# %ctexpect5 type ct expectation { protocol udp; dport 9876; timeout 2m; size 12; l3proto ip; };ok -+# -+# ct expectation set "ctexpect1";ok -+ -+# # synproxy -+# %synproxy1 type synproxy mss 1460 wscale 7;ok -+# %synproxy2 type synproxy mss 1460 wscale 7 timestamp sack-perm;ok -+# -+# synproxy name tcp dport map {443 : "synproxy1", 80 : "synproxy2"};ok -diff --git a/tests/py/ip6/sets.t b/tests/py/ip6/sets.t -index add82eb..cc43aca 100644 ---- a/tests/py/ip6/sets.t -+++ b/tests/py/ip6/sets.t -@@ -40,4 +40,4 @@ ip6 saddr != @set33 drop;fail - !set5 type ipv6_addr . ipv6_addr;ok - ip6 saddr . ip6 daddr @set5 drop;ok - add @set5 { ip6 saddr . ip6 daddr };ok --delete @set5 { ip6 saddr . ip6 daddr };ok -+- delete @set5 { ip6 saddr . ip6 daddr };ok -diff --git a/tests/shell/testcases/flowtable/0002create_flowtable_0 b/tests/shell/testcases/flowtable/0002create_flowtable_0 -index 4c85c3f..8b80e34 100755 ---- a/tests/shell/testcases/flowtable/0002create_flowtable_0 -+++ b/tests/shell/testcases/flowtable/0002create_flowtable_0 -@@ -1,12 +1,12 @@ - #!/bin/bash - - set -e --$NFT add table t --$NFT add flowtable t f { hook ingress priority 10 \; devices = { lo }\; } --if $NFT create flowtable t f { hook ingress priority 10 \; devices = { lo }\; } 2>/dev/null ; then -+$NFT add table inet t -+$NFT add flowtable inet t f { hook ingress priority 10 \; devices = { lo }\; } -+if $NFT create flowtable inet t f { hook ingress priority 10 \; devices = { lo }\; } 2>/dev/null ; then - echo "E: flowtable creation not failing on existing set" >&2 - exit 1 - fi --$NFT add flowtable t f { hook ingress priority 10 \; devices = { lo }\; } -+$NFT add flowtable inet t f { hook ingress priority 10 \; devices = { lo }\; } - - exit 0 -diff --git a/tests/shell/testcases/flowtable/0003add_after_flush_0 b/tests/shell/testcases/flowtable/0003add_after_flush_0 -index 481c7ed..b4243bc 100755 ---- a/tests/shell/testcases/flowtable/0003add_after_flush_0 -+++ b/tests/shell/testcases/flowtable/0003add_after_flush_0 -@@ -1,8 +1,8 @@ - #!/bin/bash - - set -e --$NFT add table x --$NFT add flowtable x y { hook ingress priority 0\; devices = { lo }\;} -+$NFT add table inet x -+$NFT add flowtable inet x y { hook ingress priority 0\; devices = { lo }\;} - $NFT flush ruleset --$NFT add table x --$NFT add flowtable x y { hook ingress priority 0\; devices = { lo }\;} -+$NFT add table inet x -+$NFT add flowtable inet x y { hook ingress priority 0\; devices = { lo }\;} -diff --git a/tests/shell/testcases/flowtable/0004delete_after_add_0 b/tests/shell/testcases/flowtable/0004delete_after_add_0 -index 8d9a842..4618595 100755 ---- a/tests/shell/testcases/flowtable/0004delete_after_add_0 -+++ b/tests/shell/testcases/flowtable/0004delete_after_add_0 -@@ -1,6 +1,6 @@ - #!/bin/bash - - set -e --$NFT add table x --$NFT add flowtable x y { hook ingress priority 0\; devices = { lo }\;} --$NFT delete flowtable x y -+$NFT add table inet x -+$NFT add flowtable inet x y { hook ingress priority 0\; devices = { lo }\;} -+$NFT delete flowtable inet x y -diff --git a/tests/shell/testcases/flowtable/0005delete_in_use_1 b/tests/shell/testcases/flowtable/0005delete_in_use_1 -index ef52620..eda1fb9 100755 ---- a/tests/shell/testcases/flowtable/0005delete_in_use_1 -+++ b/tests/shell/testcases/flowtable/0005delete_in_use_1 -@@ -1,11 +1,11 @@ - #!/bin/bash - - set -e --$NFT add table x --$NFT add chain x x --$NFT add flowtable x y { hook ingress priority 0\; devices = { lo }\;} --$NFT add rule x x flow add @y -+$NFT add table inet x -+$NFT add chain inet x x -+$NFT add flowtable inet x y { hook ingress priority 0\; devices = { lo }\;} -+$NFT add rule inet x x flow add @y - --$NFT delete flowtable x y || exit 0 -+$NFT delete flowtable inet x y || exit 0 - echo "E: delete flowtable in use" - exit 1 -diff --git a/tests/shell/testcases/flowtable/0007prio_0 b/tests/shell/testcases/flowtable/0007prio_0 -index 49bbcac..0ea262f 100755 ---- a/tests/shell/testcases/flowtable/0007prio_0 -+++ b/tests/shell/testcases/flowtable/0007prio_0 -@@ -15,10 +15,10 @@ format_offset () { - fi - } - --$NFT add table t -+$NFT add table inet t - for offset in -11 -10 0 10 11 - do -- $NFT add flowtable t f "{ hook ingress priority filter `format_offset $offset`; devices = { lo }; }" -- $NFT delete flowtable t f -+ $NFT add flowtable inet t f "{ hook ingress priority filter `format_offset $offset`; devices = { lo }; }" -+ $NFT delete flowtable inet t f - done - -diff --git a/tests/shell/testcases/flowtable/0008prio_1 b/tests/shell/testcases/flowtable/0008prio_1 -index 48953d7..0d8cdff 100755 ---- a/tests/shell/testcases/flowtable/0008prio_1 -+++ b/tests/shell/testcases/flowtable/0008prio_1 -@@ -1,9 +1,9 @@ - #!/bin/bash - --$NFT add table t -+$NFT add table inet t - for prioname in raw mangle dstnar security srcnat out dummy - do -- $NFT add flowtable t f { hook ingress priority $prioname \; devices = { lo }\; } -+ $NFT add flowtable inet t f { hook ingress priority $prioname \; devices = { lo }\; } - if (($? == 0)) - then - echo "E: $prioname should not be a valid priority name for flowtables" >&2 -diff --git a/tests/shell/testcases/flowtable/0009deleteafterflush_0 b/tests/shell/testcases/flowtable/0009deleteafterflush_0 -index 2cda563..061e22e 100755 ---- a/tests/shell/testcases/flowtable/0009deleteafterflush_0 -+++ b/tests/shell/testcases/flowtable/0009deleteafterflush_0 -@@ -1,9 +1,9 @@ - #!/bin/bash - - set -e --$NFT add table x --$NFT add chain x y --$NFT add flowtable x f { hook ingress priority 0\; devices = { lo }\;} --$NFT add rule x y flow add @f --$NFT flush chain x y --$NFT delete flowtable x f -+$NFT add table inet x -+$NFT add chain inet x y -+$NFT add flowtable inet x f { hook ingress priority 0\; devices = { lo }\;} -+$NFT add rule inet x y flow add @f -+$NFT flush chain inet x y -+$NFT delete flowtable inet x f -diff --git a/tests/shell/testcases/listing/0013objects_0 b/tests/shell/testcases/listing/0013objects_0 -index 4d39143..130d02c 100755 ---- a/tests/shell/testcases/listing/0013objects_0 -+++ b/tests/shell/testcases/listing/0013objects_0 -@@ -1,5 +1,7 @@ - #!/bin/bash - -+exit 0 -+ - # list table with all objects and chains - - EXPECTED="table ip test { -diff --git a/tests/shell/testcases/nft-f/0017ct_timeout_obj_0 b/tests/shell/testcases/nft-f/0017ct_timeout_obj_0 -index 4f40779..e0f9e44 100755 ---- a/tests/shell/testcases/nft-f/0017ct_timeout_obj_0 -+++ b/tests/shell/testcases/nft-f/0017ct_timeout_obj_0 -@@ -1,5 +1,7 @@ - #!/bin/bash - -+exit 0 -+ - EXPECTED='table ip filter { - ct timeout cttime{ - protocol tcp -diff --git a/tests/shell/testcases/nft-f/0018ct_expectation_obj_0 b/tests/shell/testcases/nft-f/0018ct_expectation_obj_0 -index 4f9872f..f518cf7 100755 ---- a/tests/shell/testcases/nft-f/0018ct_expectation_obj_0 -+++ b/tests/shell/testcases/nft-f/0018ct_expectation_obj_0 -@@ -1,5 +1,7 @@ - #!/bin/bash - -+exit 0 -+ - EXPECTED='table ip filter { - ct expectation ctexpect{ - protocol tcp -diff --git a/tests/shell/testcases/nft-f/dumps/0017ct_timeout_obj_0.nft b/tests/shell/testcases/nft-f/dumps/0017ct_timeout_obj_0.nft.disabled -similarity index 100% -rename from tests/shell/testcases/nft-f/dumps/0017ct_timeout_obj_0.nft -rename to tests/shell/testcases/nft-f/dumps/0017ct_timeout_obj_0.nft.disabled -diff --git a/tests/shell/testcases/optionals/update_object_handles_0 b/tests/shell/testcases/optionals/update_object_handles_0 -index 8b12b8c..e11b4e7 100755 ---- a/tests/shell/testcases/optionals/update_object_handles_0 -+++ b/tests/shell/testcases/optionals/update_object_handles_0 -@@ -1,5 +1,7 @@ - #!/bin/bash - -+exit 0 -+ - set -e - $NFT add table test-ip - $NFT add counter test-ip traffic-counter -diff --git a/tests/shell/testcases/sets/0036add_set_element_expiration_0 b/tests/shell/testcases/sets/0036add_set_element_expiration_0 -index 51ed0f2..043bb8f 100755 ---- a/tests/shell/testcases/sets/0036add_set_element_expiration_0 -+++ b/tests/shell/testcases/sets/0036add_set_element_expiration_0 -@@ -1,5 +1,7 @@ - #!/bin/bash - -+exit 0 -+ - set -e - - RULESET="add table ip x -diff --git a/tests/shell/testcases/transactions/0046set_0 b/tests/shell/testcases/transactions/0046set_0 -index 172e24d..1b24964 100755 ---- a/tests/shell/testcases/transactions/0046set_0 -+++ b/tests/shell/testcases/transactions/0046set_0 -@@ -1,5 +1,7 @@ - #!/bin/bash - -+exit 0 -+ - RULESET='add table ip filter - add chain ip filter group_7933 - add map ip filter group_7933 { type ipv4_addr : classid; flags interval; } --- -2.31.1 - diff --git a/SOURCES/0043-monitor-Fix-for-use-after-free-when-printing-map-ele.patch b/SOURCES/0043-monitor-Fix-for-use-after-free-when-printing-map-ele.patch deleted file mode 100644 index 2f86c7a..0000000 --- a/SOURCES/0043-monitor-Fix-for-use-after-free-when-printing-map-ele.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 1490609a3d82e494168a390b34094bacc5e83c02 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Tue, 18 May 2021 18:06:50 +0200 -Subject: [PATCH] monitor: Fix for use after free when printing map elements - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1919203 -Upstream Status: nftables commit 02174ffad484d - -commit 02174ffad484d9711678e5d415c32307efc39857 -Author: Phil Sutter -Date: Thu Jan 9 17:43:11 2020 +0100 - - monitor: Fix for use after free when printing map elements - - When populating the dummy set, 'data' field must be cloned just like - 'key' field. - - Fixes: 343a51702656a ("src: store expr, not dtype to track data in sets") - Signed-off-by: Phil Sutter - Acked-by: Pablo Neira Ayuso ---- - src/monitor.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/src/monitor.c b/src/monitor.c -index 7927b6f..142cc92 100644 ---- a/src/monitor.c -+++ b/src/monitor.c -@@ -401,7 +401,8 @@ static int netlink_events_setelem_cb(const struct nlmsghdr *nlh, int type, - */ - dummyset = set_alloc(monh->loc); - dummyset->key = expr_clone(set->key); -- dummyset->data = set->data; -+ if (set->data) -+ dummyset->data = expr_clone(set->data); - dummyset->flags = set->flags; - dummyset->init = set_expr_alloc(monh->loc, set); - --- -2.31.1 - diff --git a/SOURCES/0044-tests-monitor-use-correct-nft-value-in-EXIT-trap.patch b/SOURCES/0044-tests-monitor-use-correct-nft-value-in-EXIT-trap.patch deleted file mode 100644 index cfb0df1..0000000 --- a/SOURCES/0044-tests-monitor-use-correct-nft-value-in-EXIT-trap.patch +++ /dev/null @@ -1,44 +0,0 @@ -From 4ee4ed8d54a8b9f0f0a2b195b3b95b892e4e79a3 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Tue, 18 May 2021 18:06:50 +0200 -Subject: [PATCH] tests: monitor: use correct $nft value in EXIT trap -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1919203 -Upstream Status: nftables commit 990cbbf75c40b - -commit 990cbbf75c40b92e6d6dc66721dfbedf33cacf8f -Author: Å tÄ›pán NÄ›mec -Date: Wed Jan 27 15:02:03 2021 +0100 - - tests: monitor: use correct $nft value in EXIT trap - - With double quotes, $nft was being expanded to the default value even - in presence of the -H option. - - Signed-off-by: Å tÄ›pán NÄ›mec - Helped-by: Tomáš Doležal - Acked-by: Phil Sutter - Signed-off-by: Phil Sutter ---- - tests/monitor/run-tests.sh | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/tests/monitor/run-tests.sh b/tests/monitor/run-tests.sh -index ffb833a..c1cacb4 100755 ---- a/tests/monitor/run-tests.sh -+++ b/tests/monitor/run-tests.sh -@@ -19,7 +19,7 @@ if [ ! -d $testdir ]; then - echo "Failed to create test directory" >&2 - exit 1 - fi --trap "rm -rf $testdir; $nft flush ruleset" EXIT -+trap 'rm -rf $testdir; $nft flush ruleset' EXIT - - command_file=$(mktemp -p $testdir) - output_file=$(mktemp -p $testdir) --- -2.31.1 - diff --git a/SOURCES/0045-evaluate-Reject-quoted-strings-containing-only-wildc.patch b/SOURCES/0045-evaluate-Reject-quoted-strings-containing-only-wildc.patch deleted file mode 100644 index 2178c15..0000000 --- a/SOURCES/0045-evaluate-Reject-quoted-strings-containing-only-wildc.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 805fe6f5c9c8f2af78d8e94bd6b5c33724df3c80 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Tue, 18 May 2021 18:16:21 +0200 -Subject: [PATCH] evaluate: Reject quoted strings containing only wildcard - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1818117 -Upstream Status: nftables commit 032c9f745c6da - -commit 032c9f745c6daab8c27176a95963b1c32b0a5d12 -Author: Phil Sutter -Date: Thu Sep 24 17:38:45 2020 +0200 - - evaluate: Reject quoted strings containing only wildcard - - Fix for an assertion fail when trying to match against an all-wildcard - interface name: - - | % nft add rule t c iifname '"*"' - | nft: expression.c:402: constant_expr_alloc: Assertion `(((len) + (8) - 1) / (8)) > 0' failed. - | zsh: abort nft add rule t c iifname '"*"' - - Fix this by detecting the string in expr_evaluate_string() and returning - an error message: - - | % nft add rule t c iifname '"*"' - | Error: All-wildcard strings are not supported - | add rule t c iifname "*" - | ^^^ - - While being at it, drop the 'datalen >= 1' clause from the following - conditional as together with the added check for 'datalen == 0', all - possible other values have been caught already. ---- - src/evaluate.c | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) - -diff --git a/src/evaluate.c b/src/evaluate.c -index a966ed4..0181750 100644 ---- a/src/evaluate.c -+++ b/src/evaluate.c -@@ -321,8 +321,11 @@ static int expr_evaluate_string(struct eval_ctx *ctx, struct expr **exprp) - return 0; - } - -- if (datalen >= 1 && -- data[datalen - 1] == '\\') { -+ if (datalen == 0) -+ return expr_error(ctx->msgs, expr, -+ "All-wildcard strings are not supported"); -+ -+ if (data[datalen - 1] == '\\') { - char unescaped_str[data_len]; - - memset(unescaped_str, 0, sizeof(unescaped_str)); --- -2.31.1 - diff --git a/SOURCES/0046-src-Support-odd-sized-payload-matches.patch b/SOURCES/0046-src-Support-odd-sized-payload-matches.patch deleted file mode 100644 index 9b17f0c..0000000 --- a/SOURCES/0046-src-Support-odd-sized-payload-matches.patch +++ /dev/null @@ -1,64 +0,0 @@ -From 64f34f34acedad6cce70f2dd91c82a814d4ffe34 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 19 May 2021 18:03:43 +0200 -Subject: [PATCH] src: Support odd-sized payload matches - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1934926 -Upstream Status: nftables commit 8a927c56d83ed - -commit 8a927c56d83ed0f78785011bd92a53edc25a0ca0 -Author: Phil Sutter -Date: Tue Oct 27 17:05:25 2020 +0100 - - src: Support odd-sized payload matches - - When expanding a payload match, don't disregard oversized templates at - the right offset. A more flexible user may extract less bytes from the - packet if only parts of a field are interesting, e.g. only the prefix of - source/destination address. Support that by using the template, but fix - the length. Later when creating a relational expression for it, detect - the unusually small payload expression length and turn the RHS value - into a prefix expression. - - Signed-off-by: Phil Sutter ---- - src/netlink_delinearize.c | 6 ++++++ - src/payload.c | 5 +++++ - 2 files changed, 11 insertions(+) - -diff --git a/src/netlink_delinearize.c b/src/netlink_delinearize.c -index 88dbd5a..8bdee12 100644 ---- a/src/netlink_delinearize.c -+++ b/src/netlink_delinearize.c -@@ -1577,6 +1577,12 @@ static void payload_match_expand(struct rule_pp_ctx *ctx, - tmp = constant_expr_splice(right, left->len); - expr_set_type(tmp, left->dtype, left->byteorder); - -+ if (left->payload.tmpl && (left->len < left->payload.tmpl->len)) { -+ mpz_lshift_ui(tmp->value, left->payload.tmpl->len - left->len); -+ tmp->len = left->payload.tmpl->len; -+ tmp = prefix_expr_alloc(&tmp->location, tmp, left->len); -+ } -+ - nexpr = relational_expr_alloc(&expr->location, expr->op, - left, tmp); - if (expr->op == OP_EQ) -diff --git a/src/payload.c b/src/payload.c -index 3576400..45280ef 100644 ---- a/src/payload.c -+++ b/src/payload.c -@@ -746,6 +746,11 @@ void payload_expr_expand(struct list_head *list, struct expr *expr, - expr->payload.offset += tmpl->len; - if (expr->len == 0) - return; -+ } else if (expr->len > 0) { -+ new = payload_expr_alloc(&expr->location, desc, i); -+ new->len = expr->len; -+ list_add_tail(&new->list, list); -+ return; - } else - break; - } --- -2.31.1 - diff --git a/SOURCES/0047-src-Optimize-prefix-matches-on-byte-boundaries.patch b/SOURCES/0047-src-Optimize-prefix-matches-on-byte-boundaries.patch deleted file mode 100644 index c6288ac..0000000 --- a/SOURCES/0047-src-Optimize-prefix-matches-on-byte-boundaries.patch +++ /dev/null @@ -1,241 +0,0 @@ -From 6fb6d8f15a82b3348184f6950a436becb06931cb Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 19 May 2021 18:03:43 +0200 -Subject: [PATCH] src: Optimize prefix matches on byte-boundaries - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1934926 -Upstream Status: nftables commit 25338cdb6c77a -Conflicts: There is a hidden dependency on commit ee4391d0ac1e7 ("nat: - transform range to prefix expression when possible"). - Backport only the single chunk required to keep prefix - parsing intact to avoid having to backport 9599d9d25a6b3 - ("src: NAT support for intervals in maps") as a dependency - which is clearly oversized for the sake of this purpose. - -commit 25338cdb6c77aa2f0977afbbb612571c9d325213 -Author: Phil Sutter -Date: Tue Oct 27 17:33:15 2020 +0100 - - src: Optimize prefix matches on byte-boundaries - - If a prefix expression's length is on a byte-boundary, it is sufficient - to just reduce the length passed to "cmp" expression. No need for - explicit bitwise modification of data on LHS. The relevant code is - already there, used for string prefix matches. There is one exception - though, namely zero-length prefixes: Kernel doesn't accept zero-length - "cmp" expressions, so keep them in the old code-path for now. - - This patch depends upon the previous one to correctly parse odd-sized - payload matches but has to extend support for non-payload LHS as well. - In practice, this is needed for "ct" expressions as they allow matching - against IP address prefixes, too. - - Signed-off-by: Phil Sutter ---- - src/netlink_delinearize.c | 8 ++++++-- - src/netlink_linearize.c | 4 +++- - tests/py/ip/ct.t.payload | 4 ---- - tests/py/ip/ip.t.payload | 6 ++---- - tests/py/ip/ip.t.payload.bridge | 6 ++---- - tests/py/ip/ip.t.payload.inet | 6 ++---- - tests/py/ip/ip.t.payload.netdev | 6 ++---- - tests/py/ip6/ip6.t.payload.inet | 5 ++--- - tests/py/ip6/ip6.t.payload.ip6 | 5 ++--- - 9 files changed, 21 insertions(+), 29 deletions(-) - -diff --git a/src/netlink_delinearize.c b/src/netlink_delinearize.c -index 8bdee12..157a473 100644 ---- a/src/netlink_delinearize.c -+++ b/src/netlink_delinearize.c -@@ -291,8 +291,9 @@ static void netlink_parse_cmp(struct netlink_parse_ctx *ctx, - - if (left->len > right->len && - expr_basetype(left) != &string_type) { -- netlink_error(ctx, loc, "Relational expression size mismatch"); -- goto err_free; -+ mpz_lshift_ui(right->value, left->len - right->len); -+ right = prefix_expr_alloc(loc, right, right->len); -+ right->prefix->len = left->len; - } else if (left->len > 0 && left->len < right->len) { - expr_free(left); - left = netlink_parse_concat_expr(ctx, loc, sreg, right->len); -@@ -2164,6 +2165,9 @@ static void expr_postprocess(struct rule_pp_ctx *ctx, struct expr **exprp) - expr_postprocess(ctx, &expr->left); - expr_postprocess(ctx, &expr->right); - break; -+ case EXPR_PREFIX: -+ expr_postprocess(ctx, &expr->prefix); -+ break; - case EXPR_SET_ELEM: - expr_postprocess(ctx, &expr->key); - break; -diff --git a/src/netlink_linearize.c b/src/netlink_linearize.c -index 606d97a..25be634 100644 ---- a/src/netlink_linearize.c -+++ b/src/netlink_linearize.c -@@ -501,7 +501,9 @@ static void netlink_gen_relational(struct netlink_linearize_ctx *ctx, - return netlink_gen_flagcmp(ctx, expr, dreg); - case EXPR_PREFIX: - sreg = get_register(ctx, expr->left); -- if (expr_basetype(expr->left)->type != TYPE_STRING) { -+ if (expr_basetype(expr->left)->type != TYPE_STRING && -+ (!expr->right->prefix_len || -+ expr->right->prefix_len % BITS_PER_BYTE)) { - len = div_round_up(expr->right->len, BITS_PER_BYTE); - netlink_gen_expr(ctx, expr->left, sreg); - right = netlink_gen_prefix(ctx, expr, sreg); -diff --git a/tests/py/ip/ct.t.payload b/tests/py/ip/ct.t.payload -index d5faed4..a7e08f9 100644 ---- a/tests/py/ip/ct.t.payload -+++ b/tests/py/ip/ct.t.payload -@@ -21,25 +21,21 @@ ip test-ip4 output - # ct original ip saddr 192.168.1.0/24 - ip test-ip4 output - [ ct load src_ip => reg 1 , dir original ] -- [ bitwise reg 1 = (reg=1 & 0x00ffffff ) ^ 0x00000000 ] - [ cmp eq reg 1 0x0001a8c0 ] - - # ct reply ip saddr 192.168.1.0/24 - ip test-ip4 output - [ ct load src_ip => reg 1 , dir reply ] -- [ bitwise reg 1 = (reg=1 & 0x00ffffff ) ^ 0x00000000 ] - [ cmp eq reg 1 0x0001a8c0 ] - - # ct original ip daddr 192.168.1.0/24 - ip test-ip4 output - [ ct load dst_ip => reg 1 , dir original ] -- [ bitwise reg 1 = (reg=1 & 0x00ffffff ) ^ 0x00000000 ] - [ cmp eq reg 1 0x0001a8c0 ] - - # ct reply ip daddr 192.168.1.0/24 - ip test-ip4 output - [ ct load dst_ip => reg 1 , dir reply ] -- [ bitwise reg 1 = (reg=1 & 0x00ffffff ) ^ 0x00000000 ] - [ cmp eq reg 1 0x0001a8c0 ] - - # ct l3proto ipv4 -diff --git a/tests/py/ip/ip.t.payload b/tests/py/ip/ip.t.payload -index d627b22..825c0f0 100644 ---- a/tests/py/ip/ip.t.payload -+++ b/tests/py/ip/ip.t.payload -@@ -358,14 +358,12 @@ ip test-ip4 input - - # ip saddr 192.168.2.0/24 - ip test-ip4 input -- [ payload load 4b @ network header + 12 => reg 1 ] -- [ bitwise reg 1 = (reg=1 & 0x00ffffff ) ^ 0x00000000 ] -+ [ payload load 3b @ network header + 12 => reg 1 ] - [ cmp eq reg 1 0x0002a8c0 ] - - # ip saddr != 192.168.2.0/24 - ip test-ip4 input -- [ payload load 4b @ network header + 12 => reg 1 ] -- [ bitwise reg 1 = (reg=1 & 0x00ffffff ) ^ 0x00000000 ] -+ [ payload load 3b @ network header + 12 => reg 1 ] - [ cmp neq reg 1 0x0002a8c0 ] - - # ip saddr 192.168.3.1 ip daddr 192.168.3.100 -diff --git a/tests/py/ip/ip.t.payload.bridge b/tests/py/ip/ip.t.payload.bridge -index 91a4fde..e958a5b 100644 ---- a/tests/py/ip/ip.t.payload.bridge -+++ b/tests/py/ip/ip.t.payload.bridge -@@ -466,16 +466,14 @@ bridge test-bridge input - bridge test-bridge input - [ meta load protocol => reg 1 ] - [ cmp eq reg 1 0x00000008 ] -- [ payload load 4b @ network header + 12 => reg 1 ] -- [ bitwise reg 1 = (reg=1 & 0x00ffffff ) ^ 0x00000000 ] -+ [ payload load 3b @ network header + 12 => reg 1 ] - [ cmp eq reg 1 0x0002a8c0 ] - - # ip saddr != 192.168.2.0/24 - bridge test-bridge input - [ meta load protocol => reg 1 ] - [ cmp eq reg 1 0x00000008 ] -- [ payload load 4b @ network header + 12 => reg 1 ] -- [ bitwise reg 1 = (reg=1 & 0x00ffffff ) ^ 0x00000000 ] -+ [ payload load 3b @ network header + 12 => reg 1 ] - [ cmp neq reg 1 0x0002a8c0 ] - - # ip saddr 192.168.3.1 ip daddr 192.168.3.100 -diff --git a/tests/py/ip/ip.t.payload.inet b/tests/py/ip/ip.t.payload.inet -index b9cb28a..6501473 100644 ---- a/tests/py/ip/ip.t.payload.inet -+++ b/tests/py/ip/ip.t.payload.inet -@@ -466,16 +466,14 @@ inet test-inet input - inet test-inet input - [ meta load nfproto => reg 1 ] - [ cmp eq reg 1 0x00000002 ] -- [ payload load 4b @ network header + 12 => reg 1 ] -- [ bitwise reg 1 = (reg=1 & 0x00ffffff ) ^ 0x00000000 ] -+ [ payload load 3b @ network header + 12 => reg 1 ] - [ cmp eq reg 1 0x0002a8c0 ] - - # ip saddr != 192.168.2.0/24 - inet test-inet input - [ meta load nfproto => reg 1 ] - [ cmp eq reg 1 0x00000002 ] -- [ payload load 4b @ network header + 12 => reg 1 ] -- [ bitwise reg 1 = (reg=1 & 0x00ffffff ) ^ 0x00000000 ] -+ [ payload load 3b @ network header + 12 => reg 1 ] - [ cmp neq reg 1 0x0002a8c0 ] - - # ip saddr 192.168.3.1 ip daddr 192.168.3.100 -diff --git a/tests/py/ip/ip.t.payload.netdev b/tests/py/ip/ip.t.payload.netdev -index 588e5ca..58ae358 100644 ---- a/tests/py/ip/ip.t.payload.netdev -+++ b/tests/py/ip/ip.t.payload.netdev -@@ -379,16 +379,14 @@ netdev test-netdev ingress - netdev test-netdev ingress - [ meta load protocol => reg 1 ] - [ cmp eq reg 1 0x00000008 ] -- [ payload load 4b @ network header + 12 => reg 1 ] -- [ bitwise reg 1 = (reg=1 & 0x00ffffff ) ^ 0x00000000 ] -+ [ payload load 3b @ network header + 12 => reg 1 ] - [ cmp eq reg 1 0x0002a8c0 ] - - # ip saddr != 192.168.2.0/24 - netdev test-netdev ingress - [ meta load protocol => reg 1 ] - [ cmp eq reg 1 0x00000008 ] -- [ payload load 4b @ network header + 12 => reg 1 ] -- [ bitwise reg 1 = (reg=1 & 0x00ffffff ) ^ 0x00000000 ] -+ [ payload load 3b @ network header + 12 => reg 1 ] - [ cmp neq reg 1 0x0002a8c0 ] - - # ip saddr 192.168.3.1 ip daddr 192.168.3.100 -diff --git a/tests/py/ip6/ip6.t.payload.inet b/tests/py/ip6/ip6.t.payload.inet -index d015c8e..ffc9b9f 100644 ---- a/tests/py/ip6/ip6.t.payload.inet -+++ b/tests/py/ip6/ip6.t.payload.inet -@@ -604,9 +604,8 @@ inet test-inet input - inet test-inet input - [ meta load nfproto => reg 1 ] - [ cmp eq reg 1 0x0000000a ] -- [ payload load 16b @ network header + 8 => reg 1 ] -- [ bitwise reg 1 = (reg=1 & 0xffffffff 0xffffffff 0x00000000 0x00000000 ) ^ 0x00000000 0x00000000 0x00000000 0x00000000 ] -- [ cmp eq reg 1 0x00000000 0x00000000 0x00000000 0x00000000 ] -+ [ payload load 8b @ network header + 8 => reg 1 ] -+ [ cmp eq reg 1 0x00000000 0x00000000 ] - - # ip6 saddr ::1 ip6 daddr ::2 - inet test-inet input -diff --git a/tests/py/ip6/ip6.t.payload.ip6 b/tests/py/ip6/ip6.t.payload.ip6 -index b2e8363..18b8bcb 100644 ---- a/tests/py/ip6/ip6.t.payload.ip6 -+++ b/tests/py/ip6/ip6.t.payload.ip6 -@@ -452,9 +452,8 @@ ip6 test-ip6 input - - # ip6 saddr ::/64 - ip6 test-ip6 input -- [ payload load 16b @ network header + 8 => reg 1 ] -- [ bitwise reg 1 = (reg=1 & 0xffffffff 0xffffffff 0x00000000 0x00000000 ) ^ 0x00000000 0x00000000 0x00000000 0x00000000 ] -- [ cmp eq reg 1 0x00000000 0x00000000 0x00000000 0x00000000 ] -+ [ payload load 8b @ network header + 8 => reg 1 ] -+ [ cmp eq reg 1 0x00000000 0x00000000 ] - - # ip6 saddr ::1 ip6 daddr ::2 - ip6 test-ip6 input --- -2.31.1 - diff --git a/SOURCES/0048-tests-py-Move-tcpopt.t-to-any-directory.patch b/SOURCES/0048-tests-py-Move-tcpopt.t-to-any-directory.patch deleted file mode 100644 index f366a09..0000000 --- a/SOURCES/0048-tests-py-Move-tcpopt.t-to-any-directory.patch +++ /dev/null @@ -1,2507 +0,0 @@ -From c925727c50aa0c916105deaca95cb2f7292ea906 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 12 Jul 2021 17:44:08 +0200 -Subject: [PATCH] tests/py: Move tcpopt.t to any/ directory - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1979334 -Upstream Status: nftables commit d566fecdfcc37 - -commit d566fecdfcc37b721729d26a90af01fc87e09c89 -Author: Phil Sutter -Date: Tue Mar 10 12:56:18 2020 +0100 - - tests/py: Move tcpopt.t to any/ directory - - Merge tcpopt.t files in ip, ip6 and inet into a common one, they were - just marignally different. - - Signed-off-by: Phil Sutter ---- - tests/py/{inet => any}/tcpopt.t | 3 + - tests/py/{inet => any}/tcpopt.t.json | 70 ++- - tests/py/{inet => any}/tcpopt.t.json.output | 0 - tests/py/any/tcpopt.t.payload | 603 ++++++++++++++++++++ - tests/py/inet/tcpopt.t.payload | 200 ------- - tests/py/ip/tcpopt.t | 38 -- - tests/py/ip/tcpopt.t.json | 416 -------------- - tests/py/ip/tcpopt.t.json.output | 16 - - tests/py/ip/tcpopt.t.payload | 181 ------ - tests/py/ip6/tcpopt.t | 37 -- - tests/py/ip6/tcpopt.t.json | 416 -------------- - tests/py/ip6/tcpopt.t.json.output | 16 - - tests/py/ip6/tcpopt.t.payload | 181 ------ - 13 files changed, 649 insertions(+), 1528 deletions(-) - rename tests/py/{inet => any}/tcpopt.t (94%) - rename tests/py/{inet => any}/tcpopt.t.json (88%) - rename tests/py/{inet => any}/tcpopt.t.json.output (100%) - create mode 100644 tests/py/any/tcpopt.t.payload - delete mode 100644 tests/py/inet/tcpopt.t.payload - delete mode 100644 tests/py/ip/tcpopt.t - delete mode 100644 tests/py/ip/tcpopt.t.json - delete mode 100644 tests/py/ip/tcpopt.t.json.output - delete mode 100644 tests/py/ip/tcpopt.t.payload - delete mode 100644 tests/py/ip6/tcpopt.t - delete mode 100644 tests/py/ip6/tcpopt.t.json - delete mode 100644 tests/py/ip6/tcpopt.t.json.output - delete mode 100644 tests/py/ip6/tcpopt.t.payload - -diff --git a/tests/py/inet/tcpopt.t b/tests/py/any/tcpopt.t -similarity index 94% -rename from tests/py/inet/tcpopt.t -rename to tests/py/any/tcpopt.t -index b457691..08b1dcb 100644 ---- a/tests/py/inet/tcpopt.t -+++ b/tests/py/any/tcpopt.t -@@ -1,5 +1,7 @@ - :input;type filter hook input priority 0 - -+*ip;test-ip4;input -+*ip6;test-ip6;input - *inet;test-inet;input - - tcp option eol kind 1;ok -@@ -19,6 +21,7 @@ tcp option sack0 left 1;ok;tcp option sack left 1 - tcp option sack1 left 1;ok - tcp option sack2 left 1;ok - tcp option sack3 left 1;ok -+tcp option sack right 1;ok - tcp option sack0 right 1;ok;tcp option sack right 1 - tcp option sack1 right 1;ok - tcp option sack2 right 1;ok -diff --git a/tests/py/inet/tcpopt.t.json b/tests/py/any/tcpopt.t.json -similarity index 88% -rename from tests/py/inet/tcpopt.t.json -rename to tests/py/any/tcpopt.t.json -index 45e9c29..48eb339 100644 ---- a/tests/py/inet/tcpopt.t.json -+++ b/tests/py/any/tcpopt.t.json -@@ -8,7 +8,7 @@ - "name": "eol" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -24,7 +24,7 @@ - "name": "noop" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -40,7 +40,7 @@ - "name": "maxseg" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -56,7 +56,7 @@ - "name": "maxseg" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -72,7 +72,7 @@ - "name": "maxseg" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -88,7 +88,7 @@ - "name": "window" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -104,7 +104,7 @@ - "name": "window" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -120,7 +120,7 @@ - "name": "window" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -136,7 +136,7 @@ - "name": "sack-permitted" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -152,7 +152,7 @@ - "name": "sack-permitted" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -168,7 +168,7 @@ - "name": "sack" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -184,7 +184,7 @@ - "name": "sack" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -200,7 +200,7 @@ - "name": "sack" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -216,7 +216,7 @@ - "name": "sack0" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -232,7 +232,7 @@ - "name": "sack1" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -248,7 +248,7 @@ - "name": "sack2" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -264,7 +264,23 @@ - "name": "sack3" - } - }, -- "op": "==", -+ "op": "==", -+ "right": 1 -+ } -+ } -+] -+ -+# tcp option sack right 1 -+[ -+ { -+ "match": { -+ "left": { -+ "tcp option": { -+ "field": "right", -+ "name": "sack" -+ } -+ }, -+ "op": "==", - "right": 1 - } - } -@@ -280,7 +296,7 @@ - "name": "sack0" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -296,7 +312,7 @@ - "name": "sack1" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -312,7 +328,7 @@ - "name": "sack2" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -328,7 +344,7 @@ - "name": "sack3" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -344,7 +360,7 @@ - "name": "timestamp" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -360,7 +376,7 @@ - "name": "timestamp" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -376,7 +392,7 @@ - "name": "timestamp" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -392,7 +408,7 @@ - "name": "timestamp" - } - }, -- "op": "==", -+ "op": "==", - "right": 1 - } - } -@@ -407,7 +423,7 @@ - "name": "window" - } - }, -- "op": "==", -+ "op": "==", - "right": true - } - } -@@ -422,7 +438,7 @@ - "name": "window" - } - }, -- "op": "==", -+ "op": "==", - "right": false - } - } -diff --git a/tests/py/inet/tcpopt.t.json.output b/tests/py/any/tcpopt.t.json.output -similarity index 100% -rename from tests/py/inet/tcpopt.t.json.output -rename to tests/py/any/tcpopt.t.json.output -diff --git a/tests/py/any/tcpopt.t.payload b/tests/py/any/tcpopt.t.payload -new file mode 100644 -index 0000000..63751cf ---- /dev/null -+++ b/tests/py/any/tcpopt.t.payload -@@ -0,0 +1,603 @@ -+# tcp option eol kind 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 0 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option eol kind 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 0 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option eol kind 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 0 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option noop kind 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 1 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option noop kind 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 1 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option noop kind 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 1 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option maxseg kind 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 2 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option maxseg kind 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 2 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option maxseg kind 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 2 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option maxseg length 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 2 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option maxseg length 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 2 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option maxseg length 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 2 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option maxseg size 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 2b @ 2 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x00000100 ] -+ -+# tcp option maxseg size 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 2b @ 2 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x00000100 ] -+ -+# tcp option maxseg size 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 2b @ 2 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x00000100 ] -+ -+# tcp option window kind 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 3 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option window kind 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 3 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option window kind 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 3 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option window length 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 3 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option window length 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 3 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option window length 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 3 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option window count 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 3 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option window count 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 3 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option window count 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 3 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option sack-permitted kind 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 4 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option sack-permitted kind 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 4 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option sack-permitted kind 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 4 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option sack-permitted length 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 4 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option sack-permitted length 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 4 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option sack-permitted length 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 4 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option sack kind 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 5 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option sack kind 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 5 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option sack kind 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 5 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option sack length 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 5 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option sack length 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 5 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option sack length 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 5 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option sack left 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack left 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack left 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack0 left 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack0 left 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack0 left 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack1 left 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 10 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack1 left 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 10 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack1 left 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 10 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack2 left 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 18 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack2 left 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 18 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack2 left 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 18 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack3 left 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 26 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack3 left 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 26 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack3 left 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 26 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack right 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 6 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack right 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 6 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack right 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 6 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack0 right 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 6 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack0 right 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 6 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack0 right 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 6 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack1 right 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 14 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack1 right 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 14 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack1 right 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 14 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack2 right 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 22 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack2 right 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 22 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack2 right 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 22 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack3 right 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 30 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack3 right 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 30 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option sack3 right 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 5 + 30 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option timestamp kind 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 8 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option timestamp kind 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 8 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option timestamp kind 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 8 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option timestamp length 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 8 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option timestamp length 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 8 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option timestamp length 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 8 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option timestamp tsval 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 8 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option timestamp tsval 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 8 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option timestamp tsval 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 8 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option timestamp tsecr 1 -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 8 + 6 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option timestamp tsecr 1 -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 8 + 6 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option timestamp tsecr 1 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 4b @ 8 + 6 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# tcp option window exists -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 3 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option window exists -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 3 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option window exists -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 3 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# tcp option window missing -+ip -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 3 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000000 ] -+ -+# tcp option window missing -+ip6 -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 3 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000000 ] -+ -+# tcp option window missing -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 3 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000000 ] -+ -+# tcp option maxseg size set 1360 -+ip -+ [ immediate reg 1 0x00005005 ] -+ [ exthdr write tcpopt reg 1 => 2b @ 2 + 2 ] -+ -+# tcp option maxseg size set 1360 -+ip6 -+ [ immediate reg 1 0x00005005 ] -+ [ exthdr write tcpopt reg 1 => 2b @ 2 + 2 ] -+ -+# tcp option maxseg size set 1360 -+inet -+ [ immediate reg 1 0x00005005 ] -+ [ exthdr write tcpopt reg 1 => 2b @ 2 + 2 ] -+ -diff --git a/tests/py/inet/tcpopt.t.payload b/tests/py/inet/tcpopt.t.payload -deleted file mode 100644 -index 7e254ed..0000000 ---- a/tests/py/inet/tcpopt.t.payload -+++ /dev/null -@@ -1,200 +0,0 @@ --# tcp option eol kind 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 0 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option noop kind 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 1 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option maxseg kind 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 2 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option maxseg length 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 2 + 1 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option maxseg size 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 2b @ 2 + 2 => reg 1 ] -- [ cmp eq reg 1 0x00000100 ] -- --# tcp option window kind 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 3 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option window length 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 3 + 1 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option window count 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 3 + 2 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option sack-permitted kind 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 4 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option sack-permitted length 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 4 + 1 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option sack kind 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 5 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option sack length 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 5 + 1 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option sack left 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 2 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack0 left 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 2 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack1 left 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 10 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack2 left 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 18 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack3 left 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 26 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack right 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 6 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack0 right 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 6 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack1 right 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 14 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack2 right 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 22 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack3 right 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 30 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option timestamp kind 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 8 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option timestamp length 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 8 + 1 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option timestamp tsval 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 8 + 2 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option timestamp tsecr 1 --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 8 + 6 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option window exists --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 3 + 0 present => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option window missing --inet test-inet input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 3 + 0 present => reg 1 ] -- [ cmp eq reg 1 0x00000000 ] -- --# tcp option maxseg size set 1360 --inet test-inet input -- [ immediate reg 1 0x00005005 ] -- [ exthdr write tcpopt reg 1 => 2b @ 2 + 2 ] -diff --git a/tests/py/ip/tcpopt.t b/tests/py/ip/tcpopt.t -deleted file mode 100644 -index 7ee50a8..0000000 ---- a/tests/py/ip/tcpopt.t -+++ /dev/null -@@ -1,38 +0,0 @@ --:input;type filter hook input priority 0 -- --*ip;test-ip;input -- --tcp option eol kind 1;ok --tcp option noop kind 1;ok --tcp option maxseg kind 1;ok --tcp option maxseg length 1;ok --tcp option maxseg size 1;ok --tcp option window kind 1;ok --tcp option window length 1;ok --tcp option window count 1;ok --tcp option sack-permitted kind 1;ok --tcp option sack-permitted length 1;ok --tcp option sack kind 1;ok --tcp option sack length 1;ok --tcp option sack left 1;ok --tcp option sack0 left 1;ok;tcp option sack left 1 --tcp option sack1 left 1;ok --tcp option sack2 left 1;ok --tcp option sack3 left 1;ok --tcp option sack right 1;ok --tcp option sack0 right 1;ok;tcp option sack right 1 --tcp option sack1 right 1;ok --tcp option sack2 right 1;ok --tcp option sack3 right 1;ok --tcp option timestamp kind 1;ok --tcp option timestamp length 1;ok --tcp option timestamp tsval 1;ok --tcp option timestamp tsecr 1;ok -- --tcp option foobar;fail --tcp option foo bar;fail --tcp option eol left;fail --tcp option eol left 1;fail --tcp option eol left 1;fail --tcp option sack window;fail --tcp option sack window 1;fail -diff --git a/tests/py/ip/tcpopt.t.json b/tests/py/ip/tcpopt.t.json -deleted file mode 100644 -index d573dd1..0000000 ---- a/tests/py/ip/tcpopt.t.json -+++ /dev/null -@@ -1,416 +0,0 @@ --# tcp option eol kind 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "kind", -- "name": "eol" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option noop kind 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "kind", -- "name": "noop" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option maxseg kind 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "kind", -- "name": "maxseg" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option maxseg length 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "length", -- "name": "maxseg" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option maxseg size 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "size", -- "name": "maxseg" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option window kind 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "kind", -- "name": "window" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option window length 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "length", -- "name": "window" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option window count 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "count", -- "name": "window" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack-permitted kind 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "kind", -- "name": "sack-permitted" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack-permitted length 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "length", -- "name": "sack-permitted" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack kind 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "kind", -- "name": "sack" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack length 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "length", -- "name": "sack" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack left 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "left", -- "name": "sack" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack0 left 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "left", -- "name": "sack" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack1 left 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "left", -- "name": "sack1" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack2 left 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "left", -- "name": "sack2" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack3 left 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "left", -- "name": "sack3" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack right 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "right", -- "name": "sack" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack0 right 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "right", -- "name": "sack0" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack1 right 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "right", -- "name": "sack1" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack2 right 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "right", -- "name": "sack2" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack3 right 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "right", -- "name": "sack3" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option timestamp kind 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "kind", -- "name": "timestamp" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option timestamp length 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "length", -- "name": "timestamp" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option timestamp tsval 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "tsval", -- "name": "timestamp" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option timestamp tsecr 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "tsecr", -- "name": "timestamp" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- -diff --git a/tests/py/ip/tcpopt.t.json.output b/tests/py/ip/tcpopt.t.json.output -deleted file mode 100644 -index 81dd8ad..0000000 ---- a/tests/py/ip/tcpopt.t.json.output -+++ /dev/null -@@ -1,16 +0,0 @@ --# tcp option sack0 right 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "right", -- "name": "sack" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- -diff --git a/tests/py/ip/tcpopt.t.payload b/tests/py/ip/tcpopt.t.payload -deleted file mode 100644 -index b2e5bdb..0000000 ---- a/tests/py/ip/tcpopt.t.payload -+++ /dev/null -@@ -1,181 +0,0 @@ --# tcp option eol kind 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 0 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option noop kind 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 1 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option maxseg kind 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 2 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option maxseg length 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 2 + 1 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option maxseg size 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 2b @ 2 + 2 => reg 1 ] -- [ cmp eq reg 1 0x00000100 ] -- --# tcp option window kind 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 3 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option window length 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 3 + 1 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option window count 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 3 + 2 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option sack-permitted kind 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 4 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option sack-permitted length 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 4 + 1 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option sack kind 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 5 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option sack length 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 5 + 1 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option sack left 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 2 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack0 left 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 2 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack1 left 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 10 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack2 left 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 18 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack3 left 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 26 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack right 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 6 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack0 right 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 6 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack1 right 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 14 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack2 right 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 22 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack3 right 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 30 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option timestamp kind 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 8 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option timestamp length 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 8 + 1 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option timestamp tsval 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 8 + 2 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option timestamp tsecr 1 --ip test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 8 + 6 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -diff --git a/tests/py/ip6/tcpopt.t b/tests/py/ip6/tcpopt.t -deleted file mode 100644 -index 497f69f..0000000 ---- a/tests/py/ip6/tcpopt.t -+++ /dev/null -@@ -1,37 +0,0 @@ --:input;type filter hook input priority 0 --*ip6;test-ip6;input -- --tcp option eol kind 1;ok --tcp option noop kind 1;ok --tcp option maxseg kind 1;ok --tcp option maxseg length 1;ok --tcp option maxseg size 1;ok --tcp option window kind 1;ok --tcp option window length 1;ok --tcp option window count 1;ok --tcp option sack-permitted kind 1;ok --tcp option sack-permitted length 1;ok --tcp option sack kind 1;ok --tcp option sack length 1;ok --tcp option sack left 1;ok --tcp option sack0 left 1;ok;tcp option sack left 1 --tcp option sack1 left 1;ok --tcp option sack2 left 1;ok --tcp option sack3 left 1;ok --tcp option sack right 1;ok --tcp option sack0 right 1;ok;tcp option sack right 1 --tcp option sack1 right 1;ok --tcp option sack2 right 1;ok --tcp option sack3 right 1;ok --tcp option timestamp kind 1;ok --tcp option timestamp length 1;ok --tcp option timestamp tsval 1;ok --tcp option timestamp tsecr 1;ok -- --tcp option foobar;fail --tcp option foo bar;fail --tcp option eol left;fail --tcp option eol left 1;fail --tcp option eol left 1;fail --tcp option sack window;fail --tcp option sack window 1;fail -diff --git a/tests/py/ip6/tcpopt.t.json b/tests/py/ip6/tcpopt.t.json -deleted file mode 100644 -index d573dd1..0000000 ---- a/tests/py/ip6/tcpopt.t.json -+++ /dev/null -@@ -1,416 +0,0 @@ --# tcp option eol kind 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "kind", -- "name": "eol" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option noop kind 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "kind", -- "name": "noop" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option maxseg kind 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "kind", -- "name": "maxseg" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option maxseg length 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "length", -- "name": "maxseg" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option maxseg size 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "size", -- "name": "maxseg" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option window kind 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "kind", -- "name": "window" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option window length 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "length", -- "name": "window" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option window count 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "count", -- "name": "window" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack-permitted kind 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "kind", -- "name": "sack-permitted" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack-permitted length 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "length", -- "name": "sack-permitted" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack kind 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "kind", -- "name": "sack" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack length 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "length", -- "name": "sack" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack left 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "left", -- "name": "sack" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack0 left 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "left", -- "name": "sack" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack1 left 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "left", -- "name": "sack1" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack2 left 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "left", -- "name": "sack2" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack3 left 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "left", -- "name": "sack3" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack right 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "right", -- "name": "sack" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack0 right 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "right", -- "name": "sack0" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack1 right 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "right", -- "name": "sack1" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack2 right 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "right", -- "name": "sack2" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option sack3 right 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "right", -- "name": "sack3" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option timestamp kind 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "kind", -- "name": "timestamp" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option timestamp length 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "length", -- "name": "timestamp" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option timestamp tsval 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "tsval", -- "name": "timestamp" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- --# tcp option timestamp tsecr 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "tsecr", -- "name": "timestamp" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- -diff --git a/tests/py/ip6/tcpopt.t.json.output b/tests/py/ip6/tcpopt.t.json.output -deleted file mode 100644 -index 81dd8ad..0000000 ---- a/tests/py/ip6/tcpopt.t.json.output -+++ /dev/null -@@ -1,16 +0,0 @@ --# tcp option sack0 right 1 --[ -- { -- "match": { -- "left": { -- "tcp option": { -- "field": "right", -- "name": "sack" -- } -- }, -- "op": "==", -- "right": 1 -- } -- } --] -- -diff --git a/tests/py/ip6/tcpopt.t.payload b/tests/py/ip6/tcpopt.t.payload -deleted file mode 100644 -index 4b18919..0000000 ---- a/tests/py/ip6/tcpopt.t.payload -+++ /dev/null -@@ -1,181 +0,0 @@ --# tcp option eol kind 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 0 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option noop kind 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 1 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option maxseg kind 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 2 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option maxseg length 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 2 + 1 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option maxseg size 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 2b @ 2 + 2 => reg 1 ] -- [ cmp eq reg 1 0x00000100 ] -- --# tcp option window kind 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 3 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option window length 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 3 + 1 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option window count 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 3 + 2 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option sack-permitted kind 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 4 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option sack-permitted length 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 4 + 1 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option sack kind 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 5 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option sack length 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 5 + 1 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option sack left 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 2 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack0 left 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 2 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack1 left 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 10 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack2 left 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 18 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack3 left 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 26 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack right 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 6 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack0 right 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 6 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack1 right 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 14 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack2 right 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 22 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option sack3 right 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 5 + 30 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option timestamp kind 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 8 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option timestamp length 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 8 + 1 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option timestamp tsval 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 8 + 2 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option timestamp tsecr 1 --ip6 test-ip input -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 8 + 6 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] --- -2.31.1 - diff --git a/SOURCES/0049-parser-merge-sack-perm-sack-permitted-and-maxseg-mss.patch b/SOURCES/0049-parser-merge-sack-perm-sack-permitted-and-maxseg-mss.patch deleted file mode 100644 index 6c23314..0000000 --- a/SOURCES/0049-parser-merge-sack-perm-sack-permitted-and-maxseg-mss.patch +++ /dev/null @@ -1,294 +0,0 @@ -From f87960ecc2ed04c803b27bb6a9c42ecd0ba0bc96 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 12 Jul 2021 17:44:08 +0200 -Subject: [PATCH] parser: merge sack-perm/sack-permitted and maxseg/mss - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1979334 -Upstream Status: nftables commit 2a9aea6f2dfb6 - -commit 2a9aea6f2dfb6ee61528809af98860e06b38762b -Author: Florian Westphal -Date: Mon Nov 2 00:27:04 2020 +0100 - - parser: merge sack-perm/sack-permitted and maxseg/mss - - One was added by the tcp option parsing ocde, the other by synproxy. - - So we have: - synproxy ... sack-perm - synproxy ... mss - - and - - tcp option maxseg - tcp option sack-permitted - - This kills the extra tokens on the scanner/parser side, - so sack-perm and sack-permitted can both be used. - - Likewise, 'synproxy maxseg' and 'tcp option mss size 42' will work too. - On the output side, the shorter form is now preferred, i.e. sack-perm - and mss. - - Signed-off-by: Florian Westphal ---- - doc/payload-expression.txt | 8 ++++---- - src/parser_bison.y | 12 +++++------- - src/scanner.l | 8 ++++---- - src/tcpopt.c | 2 +- - tests/py/any/tcpopt.t | 4 ++-- - tests/py/any/tcpopt.t.json | 8 ++++---- - tests/py/any/tcpopt.t.payload | 12 ++++++------ - 7 files changed, 26 insertions(+), 28 deletions(-) - -diff --git a/doc/payload-expression.txt b/doc/payload-expression.txt -index dba42fd..3d7057c 100644 ---- a/doc/payload-expression.txt -+++ b/doc/payload-expression.txt -@@ -525,13 +525,13 @@ nftables currently supports matching (finding) a given ipv6 extension header, TC - *dst* {*nexthdr* | *hdrlength*} - *mh* {*nexthdr* | *hdrlength* | *checksum* | *type*} - *srh* {*flags* | *tag* | *sid* | *seg-left*} --*tcp option* {*eol* | *noop* | *maxseg* | *window* | *sack-permitted* | *sack* | *sack0* | *sack1* | *sack2* | *sack3* | *timestamp*} 'tcp_option_field' -+*tcp option* {*eol* | *noop* | *maxseg* | *window* | *sack-perm* | *sack* | *sack0* | *sack1* | *sack2* | *sack3* | *timestamp*} 'tcp_option_field' - *ip option* { lsrr | ra | rr | ssrr } 'ip_option_field' - - The following syntaxes are valid only in a relational expression with boolean type on right-hand side for checking header existence only: - [verse] - *exthdr* {*hbh* | *frag* | *rt* | *dst* | *mh*} --*tcp option* {*eol* | *noop* | *maxseg* | *window* | *sack-permitted* | *sack* | *sack0* | *sack1* | *sack2* | *sack3* | *timestamp*} -+*tcp option* {*eol* | *noop* | *maxseg* | *window* | *sack-perm* | *sack* | *sack0* | *sack1* | *sack2* | *sack3* | *timestamp*} - *ip option* { lsrr | ra | rr | ssrr } - - .IPv6 extension headers -@@ -568,7 +568,7 @@ kind, length, size - |window| - TCP Window Scaling | - kind, length, count --|sack-permitted| -+|sack-perm | - TCP SACK permitted | - kind, length - |sack| -@@ -611,7 +611,7 @@ type, length, ptr, addr - - .finding TCP options - -------------------- --filter input tcp option sack-permitted kind 1 counter -+filter input tcp option sack-perm kind 1 counter - -------------------- - - .matching IPv6 exthdr -diff --git a/src/parser_bison.y b/src/parser_bison.y -index 4cca31b..56d26e3 100644 ---- a/src/parser_bison.y -+++ b/src/parser_bison.y -@@ -221,7 +221,6 @@ int nft_lex(void *, void *, void *); - %token SYNPROXY "synproxy" - %token MSS "mss" - %token WSCALE "wscale" --%token SACKPERM "sack-perm" - - %token HOOK "hook" - %token DEVICE "device" -@@ -385,14 +384,13 @@ int nft_lex(void *, void *, void *); - %token OPTION "option" - %token ECHO "echo" - %token EOL "eol" --%token MAXSEG "maxseg" - %token NOOP "noop" - %token SACK "sack" - %token SACK0 "sack0" - %token SACK1 "sack1" - %token SACK2 "sack2" - %token SACK3 "sack3" --%token SACK_PERMITTED "sack-permitted" -+%token SACK_PERM "sack-permitted" - %token TIMESTAMP "timestamp" - %token KIND "kind" - %token COUNT "count" -@@ -2889,7 +2887,7 @@ synproxy_arg : MSS NUM - { - $0->synproxy.flags |= NF_SYNPROXY_OPT_TIMESTAMP; - } -- | SACKPERM -+ | SACK_PERM - { - $0->synproxy.flags |= NF_SYNPROXY_OPT_SACK_PERM; - } -@@ -2944,7 +2942,7 @@ synproxy_ts : /* empty */ { $$ = 0; } - ; - - synproxy_sack : /* empty */ { $$ = 0; } -- | SACKPERM -+ | SACK_PERM - { - $$ = NF_SYNPROXY_OPT_SACK_PERM; - } -@@ -4736,9 +4734,9 @@ tcp_hdr_field : SPORT { $$ = TCPHDR_SPORT; } - - tcp_hdr_option_type : EOL { $$ = TCPOPTHDR_EOL; } - | NOOP { $$ = TCPOPTHDR_NOOP; } -- | MAXSEG { $$ = TCPOPTHDR_MAXSEG; } -+ | MSS { $$ = TCPOPTHDR_MAXSEG; } - | WINDOW { $$ = TCPOPTHDR_WINDOW; } -- | SACK_PERMITTED { $$ = TCPOPTHDR_SACK_PERMITTED; } -+ | SACK_PERM { $$ = TCPOPTHDR_SACK_PERMITTED; } - | SACK { $$ = TCPOPTHDR_SACK0; } - | SACK0 { $$ = TCPOPTHDR_SACK0; } - | SACK1 { $$ = TCPOPTHDR_SACK1; } -diff --git a/src/scanner.l b/src/scanner.l -index 7daf5c1..a369802 100644 ---- a/src/scanner.l -+++ b/src/scanner.l -@@ -419,14 +419,16 @@ addrstring ({macaddr}|{ip4addr}|{ip6addr}) - - "echo" { return ECHO; } - "eol" { return EOL; } --"maxseg" { return MAXSEG; } -+"maxseg" { return MSS; } -+"mss" { return MSS; } - "noop" { return NOOP; } - "sack" { return SACK; } - "sack0" { return SACK0; } - "sack1" { return SACK1; } - "sack2" { return SACK2; } - "sack3" { return SACK3; } --"sack-permitted" { return SACK_PERMITTED; } -+"sack-permitted" { return SACK_PERM; } -+"sack-perm" { return SACK_PERM; } - "timestamp" { return TIMESTAMP; } - "time" { return TIME; } - -@@ -562,9 +564,7 @@ addrstring ({macaddr}|{ip4addr}|{ip6addr}) - "osf" { return OSF; } - - "synproxy" { return SYNPROXY; } --"mss" { return MSS; } - "wscale" { return WSCALE; } --"sack-perm" { return SACKPERM; } - - "notrack" { return NOTRACK; } - -diff --git a/src/tcpopt.c b/src/tcpopt.c -index ec305d9..6dbaa9e 100644 ---- a/src/tcpopt.c -+++ b/src/tcpopt.c -@@ -55,7 +55,7 @@ static const struct exthdr_desc tcpopt_window = { - }; - - static const struct exthdr_desc tcpopt_sack_permitted = { -- .name = "sack-permitted", -+ .name = "sack-perm", - .type = TCPOPT_SACK_PERMITTED, - .templates = { - [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), -diff --git a/tests/py/any/tcpopt.t b/tests/py/any/tcpopt.t -index 08b1dcb..5f21d49 100644 ---- a/tests/py/any/tcpopt.t -+++ b/tests/py/any/tcpopt.t -@@ -12,8 +12,8 @@ tcp option maxseg size 1;ok - tcp option window kind 1;ok - tcp option window length 1;ok - tcp option window count 1;ok --tcp option sack-permitted kind 1;ok --tcp option sack-permitted length 1;ok -+tcp option sack-perm kind 1;ok -+tcp option sack-perm length 1;ok - tcp option sack kind 1;ok - tcp option sack length 1;ok - tcp option sack left 1;ok -diff --git a/tests/py/any/tcpopt.t.json b/tests/py/any/tcpopt.t.json -index 48eb339..2c6236a 100644 ---- a/tests/py/any/tcpopt.t.json -+++ b/tests/py/any/tcpopt.t.json -@@ -126,14 +126,14 @@ - } - ] - --# tcp option sack-permitted kind 1 -+# tcp option sack-perm kind 1 - [ - { - "match": { - "left": { - "tcp option": { - "field": "kind", -- "name": "sack-permitted" -+ "name": "sack-perm" - } - }, - "op": "==", -@@ -142,14 +142,14 @@ - } - ] - --# tcp option sack-permitted length 1 -+# tcp option sack-perm length 1 - [ - { - "match": { - "left": { - "tcp option": { - "field": "length", -- "name": "sack-permitted" -+ "name": "sack-perm" - } - }, - "op": "==", -diff --git a/tests/py/any/tcpopt.t.payload b/tests/py/any/tcpopt.t.payload -index 63751cf..f63076a 100644 ---- a/tests/py/any/tcpopt.t.payload -+++ b/tests/py/any/tcpopt.t.payload -@@ -166,42 +166,42 @@ inet - [ exthdr load tcpopt 1b @ 3 + 2 => reg 1 ] - [ cmp eq reg 1 0x00000001 ] - --# tcp option sack-permitted kind 1 -+# tcp option sack-perm kind 1 - ip - [ meta load l4proto => reg 1 ] - [ cmp eq reg 1 0x00000006 ] - [ exthdr load tcpopt 1b @ 4 + 0 => reg 1 ] - [ cmp eq reg 1 0x00000001 ] - --# tcp option sack-permitted kind 1 -+# tcp option sack-perm kind 1 - ip6 - [ meta load l4proto => reg 1 ] - [ cmp eq reg 1 0x00000006 ] - [ exthdr load tcpopt 1b @ 4 + 0 => reg 1 ] - [ cmp eq reg 1 0x00000001 ] - --# tcp option sack-permitted kind 1 -+# tcp option sack-perm kind 1 - inet - [ meta load l4proto => reg 1 ] - [ cmp eq reg 1 0x00000006 ] - [ exthdr load tcpopt 1b @ 4 + 0 => reg 1 ] - [ cmp eq reg 1 0x00000001 ] - --# tcp option sack-permitted length 1 -+# tcp option sack-perm length 1 - ip - [ meta load l4proto => reg 1 ] - [ cmp eq reg 1 0x00000006 ] - [ exthdr load tcpopt 1b @ 4 + 1 => reg 1 ] - [ cmp eq reg 1 0x00000001 ] - --# tcp option sack-permitted length 1 -+# tcp option sack-perm length 1 - ip6 - [ meta load l4proto => reg 1 ] - [ cmp eq reg 1 0x00000006 ] - [ exthdr load tcpopt 1b @ 4 + 1 => reg 1 ] - [ cmp eq reg 1 0x00000001 ] - --# tcp option sack-permitted length 1 -+# tcp option sack-perm length 1 - inet - [ meta load l4proto => reg 1 ] - [ cmp eq reg 1 0x00000006 ] --- -2.31.1 - diff --git a/SOURCES/0050-tcpopts-clean-up-parser-tcpopt.c-plumbing.patch b/SOURCES/0050-tcpopts-clean-up-parser-tcpopt.c-plumbing.patch deleted file mode 100644 index 5598b1a..0000000 --- a/SOURCES/0050-tcpopts-clean-up-parser-tcpopt.c-plumbing.patch +++ /dev/null @@ -1,387 +0,0 @@ -From 0aa694acf7c233f9426e48d0644b29ddec4fb16d Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 12 Jul 2021 17:44:08 +0200 -Subject: [PATCH] tcpopts: clean up parser -> tcpopt.c plumbing - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1979334 -Upstream Status: nftables commit 41158e0388ac5 - -commit 41158e0388ac56380fc0ee301f0d43f95ec43fab -Author: Florian Westphal -Date: Mon Nov 2 14:53:26 2020 +0100 - - tcpopts: clean up parser -> tcpopt.c plumbing - - tcpopt template mapping is asymmetric: - one mapping is to match dumped netlink exthdr expression to the original - tcp option template. - - This struct is indexed by the raw, on-write kind/type number. - - The other mapping maps parsed options to the tcp option template. - Remove the latter. The parser is changed to translate the textual - option name, e.g. "maxseg" to the on-wire number. - - This avoids the second mapping, it will also allow to more easily - support raw option matching in a followup patch. - - Signed-off-by: Florian Westphal ---- - doc/payload-expression.txt | 4 +- - include/tcpopt.h | 35 ++++++++------- - src/parser_bison.y | 26 +++++------ - src/parser_json.c | 10 ++--- - src/scanner.l | 3 +- - src/tcpopt.c | 92 +++++++++++++++----------------------- - 6 files changed, 75 insertions(+), 95 deletions(-) - -diff --git a/doc/payload-expression.txt b/doc/payload-expression.txt -index 3d7057c..27145c3 100644 ---- a/doc/payload-expression.txt -+++ b/doc/payload-expression.txt -@@ -525,13 +525,13 @@ nftables currently supports matching (finding) a given ipv6 extension header, TC - *dst* {*nexthdr* | *hdrlength*} - *mh* {*nexthdr* | *hdrlength* | *checksum* | *type*} - *srh* {*flags* | *tag* | *sid* | *seg-left*} --*tcp option* {*eol* | *noop* | *maxseg* | *window* | *sack-perm* | *sack* | *sack0* | *sack1* | *sack2* | *sack3* | *timestamp*} 'tcp_option_field' -+*tcp option* {*eol* | *nop* | *maxseg* | *window* | *sack-perm* | *sack* | *sack0* | *sack1* | *sack2* | *sack3* | *timestamp*} 'tcp_option_field' - *ip option* { lsrr | ra | rr | ssrr } 'ip_option_field' - - The following syntaxes are valid only in a relational expression with boolean type on right-hand side for checking header existence only: - [verse] - *exthdr* {*hbh* | *frag* | *rt* | *dst* | *mh*} --*tcp option* {*eol* | *noop* | *maxseg* | *window* | *sack-perm* | *sack* | *sack0* | *sack1* | *sack2* | *sack3* | *timestamp*} -+*tcp option* {*eol* | *nop* | *maxseg* | *window* | *sack-perm* | *sack* | *sack0* | *sack1* | *sack2* | *sack3* | *timestamp*} - *ip option* { lsrr | ra | rr | ssrr } - - .IPv6 extension headers -diff --git a/include/tcpopt.h b/include/tcpopt.h -index ffdbcb0..7f3fbb8 100644 ---- a/include/tcpopt.h -+++ b/include/tcpopt.h -@@ -6,7 +6,7 @@ - #include - - extern struct expr *tcpopt_expr_alloc(const struct location *loc, -- uint8_t type, uint8_t field); -+ unsigned int kind, unsigned int field); - - extern void tcpopt_init_raw(struct expr *expr, uint8_t type, - unsigned int offset, unsigned int len, -@@ -15,21 +15,22 @@ extern void tcpopt_init_raw(struct expr *expr, uint8_t type, - extern bool tcpopt_find_template(struct expr *expr, const struct expr *mask, - unsigned int *shift); - --enum tcpopt_hdr_types { -- TCPOPTHDR_INVALID, -- TCPOPTHDR_EOL, -- TCPOPTHDR_NOOP, -- TCPOPTHDR_MAXSEG, -- TCPOPTHDR_WINDOW, -- TCPOPTHDR_SACK_PERMITTED, -- TCPOPTHDR_SACK0, -- TCPOPTHDR_SACK1, -- TCPOPTHDR_SACK2, -- TCPOPTHDR_SACK3, -- TCPOPTHDR_TIMESTAMP, -- TCPOPTHDR_ECHO, -- TCPOPTHDR_ECHO_REPLY, -- __TCPOPTHDR_MAX -+/* TCP option numbers used on wire */ -+enum tcpopt_kind { -+ TCPOPT_KIND_EOL = 0, -+ TCPOPT_KIND_NOP = 1, -+ TCPOPT_KIND_MAXSEG = 2, -+ TCPOPT_KIND_WINDOW = 3, -+ TCPOPT_KIND_SACK_PERMITTED = 4, -+ TCPOPT_KIND_SACK = 5, -+ TCPOPT_KIND_TIMESTAMP = 8, -+ TCPOPT_KIND_ECHO = 8, -+ __TCPOPT_KIND_MAX, -+ -+ /* extra oob info, internal to nft */ -+ TCPOPT_KIND_SACK1 = 256, -+ TCPOPT_KIND_SACK2 = 257, -+ TCPOPT_KIND_SACK3 = 258, - }; - - enum tcpopt_hdr_fields { -@@ -44,6 +45,6 @@ enum tcpopt_hdr_fields { - TCPOPTHDR_FIELD_TSECR, - }; - --extern const struct exthdr_desc *tcpopthdr_protocols[__TCPOPTHDR_MAX]; -+extern const struct exthdr_desc *tcpopt_protocols[__TCPOPT_KIND_MAX]; - - #endif /* NFTABLES_TCPOPT_H */ -diff --git a/src/parser_bison.y b/src/parser_bison.y -index 56d26e3..8f77766 100644 ---- a/src/parser_bison.y -+++ b/src/parser_bison.y -@@ -384,7 +384,7 @@ int nft_lex(void *, void *, void *); - %token OPTION "option" - %token ECHO "echo" - %token EOL "eol" --%token NOOP "noop" -+%token NOP "nop" - %token SACK "sack" - %token SACK0 "sack0" - %token SACK1 "sack1" -@@ -4732,18 +4732,18 @@ tcp_hdr_field : SPORT { $$ = TCPHDR_SPORT; } - | URGPTR { $$ = TCPHDR_URGPTR; } - ; - --tcp_hdr_option_type : EOL { $$ = TCPOPTHDR_EOL; } -- | NOOP { $$ = TCPOPTHDR_NOOP; } -- | MSS { $$ = TCPOPTHDR_MAXSEG; } -- | WINDOW { $$ = TCPOPTHDR_WINDOW; } -- | SACK_PERM { $$ = TCPOPTHDR_SACK_PERMITTED; } -- | SACK { $$ = TCPOPTHDR_SACK0; } -- | SACK0 { $$ = TCPOPTHDR_SACK0; } -- | SACK1 { $$ = TCPOPTHDR_SACK1; } -- | SACK2 { $$ = TCPOPTHDR_SACK2; } -- | SACK3 { $$ = TCPOPTHDR_SACK3; } -- | ECHO { $$ = TCPOPTHDR_ECHO; } -- | TIMESTAMP { $$ = TCPOPTHDR_TIMESTAMP; } -+tcp_hdr_option_type : EOL { $$ = TCPOPT_KIND_EOL; } -+ | NOP { $$ = TCPOPT_KIND_NOP; } -+ | MSS { $$ = TCPOPT_KIND_MAXSEG; } -+ | WINDOW { $$ = TCPOPT_KIND_WINDOW; } -+ | SACK_PERM { $$ = TCPOPT_KIND_SACK_PERMITTED; } -+ | SACK { $$ = TCPOPT_KIND_SACK; } -+ | SACK0 { $$ = TCPOPT_KIND_SACK; } -+ | SACK1 { $$ = TCPOPT_KIND_SACK1; } -+ | SACK2 { $$ = TCPOPT_KIND_SACK2; } -+ | SACK3 { $$ = TCPOPT_KIND_SACK3; } -+ | ECHO { $$ = TCPOPT_KIND_ECHO; } -+ | TIMESTAMP { $$ = TCPOPT_KIND_TIMESTAMP; } - ; - - tcp_hdr_option_field : KIND { $$ = TCPOPTHDR_FIELD_KIND; } -diff --git a/src/parser_json.c b/src/parser_json.c -index 662bb4b..44b58a0 100644 ---- a/src/parser_json.c -+++ b/src/parser_json.c -@@ -456,9 +456,9 @@ static int json_parse_tcp_option_type(const char *name, int *val) - { - unsigned int i; - -- for (i = 0; i < array_size(tcpopthdr_protocols); i++) { -- if (tcpopthdr_protocols[i] && -- !strcmp(tcpopthdr_protocols[i]->name, name)) { -+ for (i = 0; i < array_size(tcpopt_protocols); i++) { -+ if (tcpopt_protocols[i] && -+ !strcmp(tcpopt_protocols[i]->name, name)) { - if (val) - *val = i; - return 0; -@@ -467,7 +467,7 @@ static int json_parse_tcp_option_type(const char *name, int *val) - /* special case for sack0 - sack3 */ - if (sscanf(name, "sack%u", &i) == 1 && i < 4) { - if (val) -- *val = TCPOPTHDR_SACK0 + i; -+ *val = TCPOPT_KIND_SACK + i; - return 0; - } - return 1; -@@ -476,7 +476,7 @@ static int json_parse_tcp_option_type(const char *name, int *val) - static int json_parse_tcp_option_field(int type, const char *name, int *val) - { - unsigned int i; -- const struct exthdr_desc *desc = tcpopthdr_protocols[type]; -+ const struct exthdr_desc *desc = tcpopt_protocols[type]; - - for (i = 0; i < array_size(desc->templates); i++) { - if (desc->templates[i].token && -diff --git a/src/scanner.l b/src/scanner.l -index a369802..20b1b2d 100644 ---- a/src/scanner.l -+++ b/src/scanner.l -@@ -421,7 +421,8 @@ addrstring ({macaddr}|{ip4addr}|{ip6addr}) - "eol" { return EOL; } - "maxseg" { return MSS; } - "mss" { return MSS; } --"noop" { return NOOP; } -+"nop" { return NOP; } -+"noop" { return NOP; } - "sack" { return SACK; } - "sack0" { return SACK0; } - "sack1" { return SACK1; } -diff --git a/src/tcpopt.c b/src/tcpopt.c -index 6dbaa9e..8d5bdec 100644 ---- a/src/tcpopt.c -+++ b/src/tcpopt.c -@@ -20,7 +20,7 @@ static const struct proto_hdr_template tcpopt_unknown_template = - __offset, __len) - static const struct exthdr_desc tcpopt_eol = { - .name = "eol", -- .type = TCPOPT_EOL, -+ .type = TCPOPT_KIND_EOL, - .templates = { - [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), - }, -@@ -28,7 +28,7 @@ static const struct exthdr_desc tcpopt_eol = { - - static const struct exthdr_desc tcpopt_nop = { - .name = "noop", -- .type = TCPOPT_NOP, -+ .type = TCPOPT_KIND_NOP, - .templates = { - [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), - }, -@@ -36,7 +36,7 @@ static const struct exthdr_desc tcpopt_nop = { - - static const struct exthdr_desc tcptopt_maxseg = { - .name = "maxseg", -- .type = TCPOPT_MAXSEG, -+ .type = TCPOPT_KIND_MAXSEG, - .templates = { - [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), - [TCPOPTHDR_FIELD_LENGTH] = PHT("length", 8, 8), -@@ -46,7 +46,7 @@ static const struct exthdr_desc tcptopt_maxseg = { - - static const struct exthdr_desc tcpopt_window = { - .name = "window", -- .type = TCPOPT_WINDOW, -+ .type = TCPOPT_KIND_WINDOW, - .templates = { - [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), - [TCPOPTHDR_FIELD_LENGTH] = PHT("length", 8, 8), -@@ -56,7 +56,7 @@ static const struct exthdr_desc tcpopt_window = { - - static const struct exthdr_desc tcpopt_sack_permitted = { - .name = "sack-perm", -- .type = TCPOPT_SACK_PERMITTED, -+ .type = TCPOPT_KIND_SACK_PERMITTED, - .templates = { - [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), - [TCPOPTHDR_FIELD_LENGTH] = PHT("length", 8, 8), -@@ -65,7 +65,7 @@ static const struct exthdr_desc tcpopt_sack_permitted = { - - static const struct exthdr_desc tcpopt_sack = { - .name = "sack", -- .type = TCPOPT_SACK, -+ .type = TCPOPT_KIND_SACK, - .templates = { - [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), - [TCPOPTHDR_FIELD_LENGTH] = PHT("length", 8, 8), -@@ -76,7 +76,7 @@ static const struct exthdr_desc tcpopt_sack = { - - static const struct exthdr_desc tcpopt_timestamp = { - .name = "timestamp", -- .type = TCPOPT_TIMESTAMP, -+ .type = TCPOPT_KIND_TIMESTAMP, - .templates = { - [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), - [TCPOPTHDR_FIELD_LENGTH] = PHT("length", 8, 8), -@@ -86,19 +86,14 @@ static const struct exthdr_desc tcpopt_timestamp = { - }; - #undef PHT - --#define TCPOPT_OBSOLETE ((struct exthdr_desc *)NULL) --#define TCPOPT_ECHO 6 --#define TCPOPT_ECHO_REPLY 7 --static const struct exthdr_desc *tcpopt_protocols[] = { -- [TCPOPT_EOL] = &tcpopt_eol, -- [TCPOPT_NOP] = &tcpopt_nop, -- [TCPOPT_MAXSEG] = &tcptopt_maxseg, -- [TCPOPT_WINDOW] = &tcpopt_window, -- [TCPOPT_SACK_PERMITTED] = &tcpopt_sack_permitted, -- [TCPOPT_SACK] = &tcpopt_sack, -- [TCPOPT_ECHO] = TCPOPT_OBSOLETE, -- [TCPOPT_ECHO_REPLY] = TCPOPT_OBSOLETE, -- [TCPOPT_TIMESTAMP] = &tcpopt_timestamp, -+const struct exthdr_desc *tcpopt_protocols[] = { -+ [TCPOPT_KIND_EOL] = &tcpopt_eol, -+ [TCPOPT_KIND_NOP] = &tcpopt_nop, -+ [TCPOPT_KIND_MAXSEG] = &tcptopt_maxseg, -+ [TCPOPT_KIND_WINDOW] = &tcpopt_window, -+ [TCPOPT_KIND_SACK_PERMITTED] = &tcpopt_sack_permitted, -+ [TCPOPT_KIND_SACK] = &tcpopt_sack, -+ [TCPOPT_KIND_TIMESTAMP] = &tcpopt_timestamp, - }; - - static unsigned int calc_offset(const struct exthdr_desc *desc, -@@ -136,51 +131,34 @@ static unsigned int calc_offset_reverse(const struct exthdr_desc *desc, - } - } - --const struct exthdr_desc *tcpopthdr_protocols[__TCPOPTHDR_MAX] = { -- [TCPOPTHDR_EOL] = &tcpopt_eol, -- [TCPOPTHDR_NOOP] = &tcpopt_nop, -- [TCPOPTHDR_MAXSEG] = &tcptopt_maxseg, -- [TCPOPTHDR_WINDOW] = &tcpopt_window, -- [TCPOPTHDR_SACK_PERMITTED] = &tcpopt_sack_permitted, -- [TCPOPTHDR_SACK0] = &tcpopt_sack, -- [TCPOPTHDR_SACK1] = &tcpopt_sack, -- [TCPOPTHDR_SACK2] = &tcpopt_sack, -- [TCPOPTHDR_SACK3] = &tcpopt_sack, -- [TCPOPTHDR_ECHO] = TCPOPT_OBSOLETE, -- [TCPOPTHDR_ECHO_REPLY] = TCPOPT_OBSOLETE, -- [TCPOPTHDR_TIMESTAMP] = &tcpopt_timestamp, --}; -- --static uint8_t tcpopt_optnum[] = { -- [TCPOPTHDR_SACK0] = 0, -- [TCPOPTHDR_SACK1] = 1, -- [TCPOPTHDR_SACK2] = 2, -- [TCPOPTHDR_SACK3] = 3, --}; -- --static uint8_t tcpopt_find_optnum(uint8_t optnum) --{ -- if (optnum > TCPOPTHDR_SACK3) -- return 0; -- -- return tcpopt_optnum[optnum]; --} -- --struct expr *tcpopt_expr_alloc(const struct location *loc, uint8_t type, -- uint8_t field) -+struct expr *tcpopt_expr_alloc(const struct location *loc, -+ unsigned int kind, -+ unsigned int field) - { - const struct proto_hdr_template *tmpl; - const struct exthdr_desc *desc; -+ uint8_t optnum = 0; - struct expr *expr; -- uint8_t optnum; - -- desc = tcpopthdr_protocols[type]; -+ switch (kind) { -+ case TCPOPT_KIND_SACK1: -+ kind = TCPOPT_KIND_SACK; -+ optnum = 1; -+ break; -+ case TCPOPT_KIND_SACK2: -+ kind = TCPOPT_KIND_SACK; -+ optnum = 2; -+ break; -+ case TCPOPT_KIND_SACK3: -+ kind = TCPOPT_KIND_SACK; -+ optnum = 3; -+ } -+ -+ desc = tcpopt_protocols[kind]; - tmpl = &desc->templates[field]; - if (!tmpl) - return NULL; - -- optnum = tcpopt_find_optnum(type); -- - expr = expr_alloc(loc, EXPR_EXTHDR, tmpl->dtype, - BYTEORDER_BIG_ENDIAN, tmpl->len); - expr->exthdr.desc = desc; -@@ -206,7 +184,7 @@ void tcpopt_init_raw(struct expr *expr, uint8_t type, unsigned int offset, - assert(type < array_size(tcpopt_protocols)); - expr->exthdr.desc = tcpopt_protocols[type]; - expr->exthdr.flags = flags; -- assert(expr->exthdr.desc != TCPOPT_OBSOLETE); -+ assert(expr->exthdr.desc != NULL); - - for (i = 0; i < array_size(expr->exthdr.desc->templates); ++i) { - tmpl = &expr->exthdr.desc->templates[i]; --- -2.31.1 - diff --git a/SOURCES/0051-tcpopt-rename-noop-to-nop.patch b/SOURCES/0051-tcpopt-rename-noop-to-nop.patch deleted file mode 100644 index 8ca855a..0000000 --- a/SOURCES/0051-tcpopt-rename-noop-to-nop.patch +++ /dev/null @@ -1,118 +0,0 @@ -From f4476f9428a79c5d6d8fe284f0da91c2d4177e66 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 12 Jul 2021 17:44:08 +0200 -Subject: [PATCH] tcpopt: rename noop to nop - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1979334 -Upstream Status: nftables commit 8f591eba561ac - -commit 8f591eba561aceeef605283c693b659a708d1cd3 -Author: Florian Westphal -Date: Mon Nov 2 14:58:41 2020 +0100 - - tcpopt: rename noop to nop - - 'nop' is the tcp padding "option". "noop" is retained for compatibility - on parser side. - - Signed-off-by: Florian Westphal ---- - doc/payload-expression.txt | 4 ++-- - src/tcpopt.c | 2 +- - tests/py/any/tcpopt.t | 2 +- - tests/py/any/tcpopt.t.json | 4 ++-- - tests/py/any/tcpopt.t.payload | 16 +--------------- - 5 files changed, 7 insertions(+), 21 deletions(-) - -diff --git a/doc/payload-expression.txt b/doc/payload-expression.txt -index 27145c3..3a07321 100644 ---- a/doc/payload-expression.txt -+++ b/doc/payload-expression.txt -@@ -559,8 +559,8 @@ Segment Routing Header - |eol| - End if option list| - kind --|noop| --1 Byte TCP No-op options | -+|nop| -+1 Byte TCP Nop padding option | - kind - |maxseg| - TCP Maximum Segment Size| -diff --git a/src/tcpopt.c b/src/tcpopt.c -index 8d5bdec..17cb580 100644 ---- a/src/tcpopt.c -+++ b/src/tcpopt.c -@@ -27,7 +27,7 @@ static const struct exthdr_desc tcpopt_eol = { - }; - - static const struct exthdr_desc tcpopt_nop = { -- .name = "noop", -+ .name = "nop", - .type = TCPOPT_KIND_NOP, - .templates = { - [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), -diff --git a/tests/py/any/tcpopt.t b/tests/py/any/tcpopt.t -index 5f21d49..1d42de8 100644 ---- a/tests/py/any/tcpopt.t -+++ b/tests/py/any/tcpopt.t -@@ -5,7 +5,7 @@ - *inet;test-inet;input - - tcp option eol kind 1;ok --tcp option noop kind 1;ok -+tcp option nop kind 1;ok - tcp option maxseg kind 1;ok - tcp option maxseg length 1;ok - tcp option maxseg size 1;ok -diff --git a/tests/py/any/tcpopt.t.json b/tests/py/any/tcpopt.t.json -index 2c6236a..b15e36e 100644 ---- a/tests/py/any/tcpopt.t.json -+++ b/tests/py/any/tcpopt.t.json -@@ -14,14 +14,14 @@ - } - ] - --# tcp option noop kind 1 -+# tcp option nop kind 1 - [ - { - "match": { - "left": { - "tcp option": { - "field": "kind", -- "name": "noop" -+ "name": "nop" - } - }, - "op": "==", -diff --git a/tests/py/any/tcpopt.t.payload b/tests/py/any/tcpopt.t.payload -index f63076a..9c480c8 100644 ---- a/tests/py/any/tcpopt.t.payload -+++ b/tests/py/any/tcpopt.t.payload -@@ -19,21 +19,7 @@ inet - [ exthdr load tcpopt 1b @ 0 + 0 => reg 1 ] - [ cmp eq reg 1 0x00000001 ] - --# tcp option noop kind 1 --ip -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 1 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option noop kind 1 --ip6 -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 1 + 0 => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option noop kind 1 -+# tcp option nop kind 1 - inet - [ meta load l4proto => reg 1 ] - [ cmp eq reg 1 0x00000006 ] --- -2.31.1 - diff --git a/SOURCES/0052-tcpopt-split-tcpopt_hdr_fields-into-per-option-enum.patch b/SOURCES/0052-tcpopt-split-tcpopt_hdr_fields-into-per-option-enum.patch deleted file mode 100644 index 22dd1c1..0000000 --- a/SOURCES/0052-tcpopt-split-tcpopt_hdr_fields-into-per-option-enum.patch +++ /dev/null @@ -1,538 +0,0 @@ -From 9697436145bf374093dc61e3ad857f7122de08ee Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 12 Jul 2021 17:44:08 +0200 -Subject: [PATCH] tcpopt: split tcpopt_hdr_fields into per-option enum - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1979334 -Upstream Status: nftables commit 2e1f821d713aa - -commit 2e1f821d713aa44717b38901ee80cac8e2aa0335 -Author: Florian Westphal -Date: Mon Nov 2 15:22:40 2020 +0100 - - tcpopt: split tcpopt_hdr_fields into per-option enum - - Currently we're limited to ten template fields in exthdr_desc struct. - Using a single enum for all tpc option fields thus won't work - indefinitely (TCPOPTHDR_FIELD_TSECR is 9) when new option templates get - added. - - Fortunately we can just use one enum per tcp option to avoid this. - As a side effect this also allows to simplify the sack offset - calculations. Rather than computing that on-the-fly, just add extra - fields to the SACK template. - - expr->exthdr.offset now holds the 'raw' value, filled in from the option - template. This would ease implementation of 'raw option matching' - using offset and length to load from the option. - - Signed-off-by: Florian Westphal ---- - include/tcpopt.h | 46 +++++++++++---- - src/evaluate.c | 16 ++--- - src/exthdr.c | 1 + - src/ipopt.c | 2 +- - src/netlink_delinearize.c | 2 +- - src/netlink_linearize.c | 4 +- - src/parser_bison.y | 18 +++--- - src/parser_json.c | 36 ++++++++++-- - src/tcpopt.c | 119 ++++++++++++++++---------------------- - 9 files changed, 139 insertions(+), 105 deletions(-) - -diff --git a/include/tcpopt.h b/include/tcpopt.h -index 7f3fbb8..667c8a7 100644 ---- a/include/tcpopt.h -+++ b/include/tcpopt.h -@@ -33,16 +33,42 @@ enum tcpopt_kind { - TCPOPT_KIND_SACK3 = 258, - }; - --enum tcpopt_hdr_fields { -- TCPOPTHDR_FIELD_INVALID, -- TCPOPTHDR_FIELD_KIND, -- TCPOPTHDR_FIELD_LENGTH, -- TCPOPTHDR_FIELD_SIZE, -- TCPOPTHDR_FIELD_COUNT, -- TCPOPTHDR_FIELD_LEFT, -- TCPOPTHDR_FIELD_RIGHT, -- TCPOPTHDR_FIELD_TSVAL, -- TCPOPTHDR_FIELD_TSECR, -+/* Internal identifiers */ -+enum tcpopt_common { -+ TCPOPT_COMMON_KIND, -+ TCPOPT_COMMON_LENGTH, -+}; -+ -+enum tcpopt_maxseg { -+ TCPOPT_MAXSEG_KIND, -+ TCPOPT_MAXSEG_LENGTH, -+ TCPOPT_MAXSEG_SIZE, -+}; -+ -+enum tcpopt_timestamp { -+ TCPOPT_TS_KIND, -+ TCPOPT_TS_LENGTH, -+ TCPOPT_TS_TSVAL, -+ TCPOPT_TS_TSECR, -+}; -+ -+enum tcpopt_windowscale { -+ TCPOPT_WINDOW_KIND, -+ TCPOPT_WINDOW_LENGTH, -+ TCPOPT_WINDOW_COUNT, -+}; -+ -+enum tcpopt_hdr_field_sack { -+ TCPOPT_SACK_KIND, -+ TCPOPT_SACK_LENGTH, -+ TCPOPT_SACK_LEFT, -+ TCPOPT_SACK_RIGHT, -+ TCPOPT_SACK_LEFT1, -+ TCPOPT_SACK_RIGHT1, -+ TCPOPT_SACK_LEFT2, -+ TCPOPT_SACK_RIGHT2, -+ TCPOPT_SACK_LEFT3, -+ TCPOPT_SACK_RIGHT3, - }; - - extern const struct exthdr_desc *tcpopt_protocols[__TCPOPT_KIND_MAX]; -diff --git a/src/evaluate.c b/src/evaluate.c -index 0181750..99a66c2 100644 ---- a/src/evaluate.c -+++ b/src/evaluate.c -@@ -474,7 +474,7 @@ static void expr_evaluate_bits(struct eval_ctx *ctx, struct expr **exprp) - &extra_len); - break; - case EXPR_EXTHDR: -- shift = expr_offset_shift(expr, expr->exthdr.tmpl->offset, -+ shift = expr_offset_shift(expr, expr->exthdr.offset, - &extra_len); - break; - default: -@@ -526,18 +526,16 @@ static int __expr_evaluate_exthdr(struct eval_ctx *ctx, struct expr **exprp) - if (expr_evaluate_primary(ctx, exprp) < 0) - return -1; - -- if (expr->exthdr.tmpl->offset % BITS_PER_BYTE != 0 || -+ if (expr->exthdr.offset % BITS_PER_BYTE != 0 || - expr->len % BITS_PER_BYTE != 0) - expr_evaluate_bits(ctx, exprp); - - switch (expr->exthdr.op) { - case NFT_EXTHDR_OP_TCPOPT: { - static const unsigned int max_tcpoptlen = (15 * 4 - 20) * BITS_PER_BYTE; -- unsigned int totlen = 0; -+ unsigned int totlen; - -- totlen += expr->exthdr.tmpl->offset; -- totlen += expr->exthdr.tmpl->len; -- totlen += expr->exthdr.offset; -+ totlen = expr->exthdr.tmpl->len + expr->exthdr.offset; - - if (totlen > max_tcpoptlen) - return expr_error(ctx->msgs, expr, -@@ -547,11 +545,9 @@ static int __expr_evaluate_exthdr(struct eval_ctx *ctx, struct expr **exprp) - } - case NFT_EXTHDR_OP_IPV4: { - static const unsigned int max_ipoptlen = 40 * BITS_PER_BYTE; -- unsigned int totlen = 0; -+ unsigned int totlen; - -- totlen += expr->exthdr.tmpl->offset; -- totlen += expr->exthdr.tmpl->len; -- totlen += expr->exthdr.offset; -+ totlen = expr->exthdr.offset + expr->exthdr.tmpl->len; - - if (totlen > max_ipoptlen) - return expr_error(ctx->msgs, expr, -diff --git a/src/exthdr.c b/src/exthdr.c -index e1ec6f3..c28213f 100644 ---- a/src/exthdr.c -+++ b/src/exthdr.c -@@ -99,6 +99,7 @@ struct expr *exthdr_expr_alloc(const struct location *loc, - BYTEORDER_BIG_ENDIAN, tmpl->len); - expr->exthdr.desc = desc; - expr->exthdr.tmpl = tmpl; -+ expr->exthdr.offset = tmpl->offset; - return expr; - } - -diff --git a/src/ipopt.c b/src/ipopt.c -index b3d0279..7ecb8b9 100644 ---- a/src/ipopt.c -+++ b/src/ipopt.c -@@ -102,7 +102,7 @@ struct expr *ipopt_expr_alloc(const struct location *loc, uint8_t type, - expr->exthdr.desc = desc; - expr->exthdr.tmpl = tmpl; - expr->exthdr.op = NFT_EXTHDR_OP_IPV4; -- expr->exthdr.offset = calc_offset(desc, tmpl, ptr); -+ expr->exthdr.offset = tmpl->offset + calc_offset(desc, tmpl, ptr); - - return expr; - } -diff --git a/src/netlink_delinearize.c b/src/netlink_delinearize.c -index 157a473..790336a 100644 ---- a/src/netlink_delinearize.c -+++ b/src/netlink_delinearize.c -@@ -727,8 +727,8 @@ static void netlink_parse_numgen(struct netlink_parse_ctx *ctx, - const struct location *loc, - const struct nftnl_expr *nle) - { -- enum nft_registers dreg; - uint32_t type, until, offset; -+ enum nft_registers dreg; - struct expr *expr; - - type = nftnl_expr_get_u32(nle, NFTNL_EXPR_NG_TYPE); -diff --git a/src/netlink_linearize.c b/src/netlink_linearize.c -index 25be634..9d1a064 100644 ---- a/src/netlink_linearize.c -+++ b/src/netlink_linearize.c -@@ -168,7 +168,7 @@ static void netlink_gen_exthdr(struct netlink_linearize_ctx *ctx, - const struct expr *expr, - enum nft_registers dreg) - { -- unsigned int offset = expr->exthdr.tmpl->offset + expr->exthdr.offset; -+ unsigned int offset = expr->exthdr.offset; - struct nftnl_expr *nle; - - nle = alloc_nft_expr("exthdr"); -@@ -896,7 +896,7 @@ static void netlink_gen_exthdr_stmt(struct netlink_linearize_ctx *ctx, - - expr = stmt->exthdr.expr; - -- offset = expr->exthdr.tmpl->offset + expr->exthdr.offset; -+ offset = expr->exthdr.offset; - - nle = alloc_nft_expr("exthdr"); - netlink_put_register(nle, NFTNL_EXPR_EXTHDR_SREG, sreg); -diff --git a/src/parser_bison.y b/src/parser_bison.y -index 8f77766..114b289 100644 ---- a/src/parser_bison.y -+++ b/src/parser_bison.y -@@ -4715,7 +4715,7 @@ tcp_hdr_expr : TCP tcp_hdr_field - } - | TCP OPTION tcp_hdr_option_type - { -- $$ = tcpopt_expr_alloc(&@$, $3, TCPOPTHDR_FIELD_KIND); -+ $$ = tcpopt_expr_alloc(&@$, $3, TCPOPT_COMMON_KIND); - $$->exthdr.flags = NFT_EXTHDR_F_PRESENT; - } - ; -@@ -4746,14 +4746,14 @@ tcp_hdr_option_type : EOL { $$ = TCPOPT_KIND_EOL; } - | TIMESTAMP { $$ = TCPOPT_KIND_TIMESTAMP; } - ; - --tcp_hdr_option_field : KIND { $$ = TCPOPTHDR_FIELD_KIND; } -- | LENGTH { $$ = TCPOPTHDR_FIELD_LENGTH; } -- | SIZE { $$ = TCPOPTHDR_FIELD_SIZE; } -- | COUNT { $$ = TCPOPTHDR_FIELD_COUNT; } -- | LEFT { $$ = TCPOPTHDR_FIELD_LEFT; } -- | RIGHT { $$ = TCPOPTHDR_FIELD_RIGHT; } -- | TSVAL { $$ = TCPOPTHDR_FIELD_TSVAL; } -- | TSECR { $$ = TCPOPTHDR_FIELD_TSECR; } -+tcp_hdr_option_field : KIND { $$ = TCPOPT_COMMON_KIND; } -+ | LENGTH { $$ = TCPOPT_COMMON_LENGTH; } -+ | SIZE { $$ = TCPOPT_MAXSEG_SIZE; } -+ | COUNT { $$ = TCPOPT_WINDOW_COUNT; } -+ | LEFT { $$ = TCPOPT_SACK_LEFT; } -+ | RIGHT { $$ = TCPOPT_SACK_RIGHT; } -+ | TSVAL { $$ = TCPOPT_TS_TSVAL; } -+ | TSECR { $$ = TCPOPT_TS_TSECR; } - ; - - dccp_hdr_expr : DCCP dccp_hdr_field -diff --git a/src/parser_json.c b/src/parser_json.c -index 44b58a0..ab2375f 100644 ---- a/src/parser_json.c -+++ b/src/parser_json.c -@@ -466,8 +466,10 @@ static int json_parse_tcp_option_type(const char *name, int *val) - } - /* special case for sack0 - sack3 */ - if (sscanf(name, "sack%u", &i) == 1 && i < 4) { -- if (val) -- *val = TCPOPT_KIND_SACK + i; -+ if (val && i == 0) -+ *val = TCPOPT_KIND_SACK; -+ else if (val && i > 0) -+ *val = TCPOPT_KIND_SACK1 + i - 1; - return 0; - } - return 1; -@@ -475,12 +477,38 @@ static int json_parse_tcp_option_type(const char *name, int *val) - - static int json_parse_tcp_option_field(int type, const char *name, int *val) - { -+ const struct exthdr_desc *desc; -+ unsigned int block = 0; - unsigned int i; -- const struct exthdr_desc *desc = tcpopt_protocols[type]; -+ -+ switch (type) { -+ case TCPOPT_KIND_SACK1: -+ type = TCPOPT_KIND_SACK; -+ block = 1; -+ break; -+ case TCPOPT_KIND_SACK2: -+ type = TCPOPT_KIND_SACK; -+ block = 2; -+ break; -+ case TCPOPT_KIND_SACK3: -+ type = TCPOPT_KIND_SACK; -+ block = 3; -+ break; -+ } -+ -+ if (type < 0 || type >= (int)array_size(tcpopt_protocols)) -+ return 1; -+ -+ desc = tcpopt_protocols[type]; - - for (i = 0; i < array_size(desc->templates); i++) { - if (desc->templates[i].token && - !strcmp(desc->templates[i].token, name)) { -+ if (block) { -+ block--; -+ continue; -+ } -+ - if (val) - *val = i; - return 0; -@@ -585,7 +613,7 @@ static struct expr *json_parse_tcp_option_expr(struct json_ctx *ctx, - - if (json_unpack(root, "{s:s}", "field", &field)) { - expr = tcpopt_expr_alloc(int_loc, descval, -- TCPOPTHDR_FIELD_KIND); -+ TCPOPT_COMMON_KIND); - expr->exthdr.flags = NFT_EXTHDR_F_PRESENT; - - return expr; -diff --git a/src/tcpopt.c b/src/tcpopt.c -index 17cb580..d1dd13b 100644 ---- a/src/tcpopt.c -+++ b/src/tcpopt.c -@@ -22,7 +22,7 @@ static const struct exthdr_desc tcpopt_eol = { - .name = "eol", - .type = TCPOPT_KIND_EOL, - .templates = { -- [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), -+ [TCPOPT_COMMON_KIND] = PHT("kind", 0, 8), - }, - }; - -@@ -30,7 +30,7 @@ static const struct exthdr_desc tcpopt_nop = { - .name = "nop", - .type = TCPOPT_KIND_NOP, - .templates = { -- [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), -+ [TCPOPT_COMMON_KIND] = PHT("kind", 0, 8), - }, - }; - -@@ -38,9 +38,9 @@ static const struct exthdr_desc tcptopt_maxseg = { - .name = "maxseg", - .type = TCPOPT_KIND_MAXSEG, - .templates = { -- [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), -- [TCPOPTHDR_FIELD_LENGTH] = PHT("length", 8, 8), -- [TCPOPTHDR_FIELD_SIZE] = PHT("size", 16, 16), -+ [TCPOPT_MAXSEG_KIND] = PHT("kind", 0, 8), -+ [TCPOPT_MAXSEG_LENGTH] = PHT("length", 8, 8), -+ [TCPOPT_MAXSEG_SIZE] = PHT("size", 16, 16), - }, - }; - -@@ -48,9 +48,9 @@ static const struct exthdr_desc tcpopt_window = { - .name = "window", - .type = TCPOPT_KIND_WINDOW, - .templates = { -- [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), -- [TCPOPTHDR_FIELD_LENGTH] = PHT("length", 8, 8), -- [TCPOPTHDR_FIELD_COUNT] = PHT("count", 16, 8), -+ [TCPOPT_WINDOW_KIND] = PHT("kind", 0, 8), -+ [TCPOPT_WINDOW_LENGTH] = PHT("length", 8, 8), -+ [TCPOPT_WINDOW_COUNT] = PHT("count", 16, 8), - }, - }; - -@@ -58,8 +58,8 @@ static const struct exthdr_desc tcpopt_sack_permitted = { - .name = "sack-perm", - .type = TCPOPT_KIND_SACK_PERMITTED, - .templates = { -- [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), -- [TCPOPTHDR_FIELD_LENGTH] = PHT("length", 8, 8), -+ [TCPOPT_COMMON_KIND] = PHT("kind", 0, 8), -+ [TCPOPT_COMMON_LENGTH] = PHT("length", 8, 8), - }, - }; - -@@ -67,10 +67,16 @@ static const struct exthdr_desc tcpopt_sack = { - .name = "sack", - .type = TCPOPT_KIND_SACK, - .templates = { -- [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), -- [TCPOPTHDR_FIELD_LENGTH] = PHT("length", 8, 8), -- [TCPOPTHDR_FIELD_LEFT] = PHT("left", 16, 32), -- [TCPOPTHDR_FIELD_RIGHT] = PHT("right", 48, 32), -+ [TCPOPT_SACK_KIND] = PHT("kind", 0, 8), -+ [TCPOPT_SACK_LENGTH] = PHT("length", 8, 8), -+ [TCPOPT_SACK_LEFT] = PHT("left", 16, 32), -+ [TCPOPT_SACK_RIGHT] = PHT("right", 48, 32), -+ [TCPOPT_SACK_LEFT1] = PHT("left", 80, 32), -+ [TCPOPT_SACK_RIGHT1] = PHT("right", 112, 32), -+ [TCPOPT_SACK_LEFT2] = PHT("left", 144, 32), -+ [TCPOPT_SACK_RIGHT2] = PHT("right", 176, 32), -+ [TCPOPT_SACK_LEFT3] = PHT("left", 208, 32), -+ [TCPOPT_SACK_RIGHT3] = PHT("right", 240, 32), - }, - }; - -@@ -78,12 +84,13 @@ static const struct exthdr_desc tcpopt_timestamp = { - .name = "timestamp", - .type = TCPOPT_KIND_TIMESTAMP, - .templates = { -- [TCPOPTHDR_FIELD_KIND] = PHT("kind", 0, 8), -- [TCPOPTHDR_FIELD_LENGTH] = PHT("length", 8, 8), -- [TCPOPTHDR_FIELD_TSVAL] = PHT("tsval", 16, 32), -- [TCPOPTHDR_FIELD_TSECR] = PHT("tsecr", 48, 32), -+ [TCPOPT_TS_KIND] = PHT("kind", 0, 8), -+ [TCPOPT_TS_LENGTH] = PHT("length", 8, 8), -+ [TCPOPT_TS_TSVAL] = PHT("tsval", 16, 32), -+ [TCPOPT_TS_TSECR] = PHT("tsecr", 48, 32), - }, - }; -+ - #undef PHT - - const struct exthdr_desc *tcpopt_protocols[] = { -@@ -96,65 +103,43 @@ const struct exthdr_desc *tcpopt_protocols[] = { - [TCPOPT_KIND_TIMESTAMP] = &tcpopt_timestamp, - }; - --static unsigned int calc_offset(const struct exthdr_desc *desc, -- const struct proto_hdr_template *tmpl, -- unsigned int num) --{ -- if (!desc || tmpl == &tcpopt_unknown_template) -- return 0; -- -- switch (desc->type) { -- case TCPOPT_SACK: -- /* Make sure, offset calculations only apply to left and right -- * fields -- */ -- return (tmpl->offset < 16) ? 0 : num * 64; -- default: -- return 0; -- } --} -- -- --static unsigned int calc_offset_reverse(const struct exthdr_desc *desc, -- const struct proto_hdr_template *tmpl, -- unsigned int offset) --{ -- if (!desc || tmpl == &tcpopt_unknown_template) -- return offset; -- -- switch (desc->type) { -- case TCPOPT_SACK: -- /* We can safely ignore the first left/right field */ -- return offset < 80 ? offset : (offset % 64); -- default: -- return offset; -- } --} -- - struct expr *tcpopt_expr_alloc(const struct location *loc, - unsigned int kind, - unsigned int field) - { - const struct proto_hdr_template *tmpl; -- const struct exthdr_desc *desc; -- uint8_t optnum = 0; -+ const struct exthdr_desc *desc = NULL; - struct expr *expr; - - switch (kind) { - case TCPOPT_KIND_SACK1: - kind = TCPOPT_KIND_SACK; -- optnum = 1; -+ if (field == TCPOPT_SACK_LEFT) -+ field = TCPOPT_SACK_LEFT1; -+ else if (field == TCPOPT_SACK_RIGHT) -+ field = TCPOPT_SACK_RIGHT1; - break; - case TCPOPT_KIND_SACK2: - kind = TCPOPT_KIND_SACK; -- optnum = 2; -+ if (field == TCPOPT_SACK_LEFT) -+ field = TCPOPT_SACK_LEFT2; -+ else if (field == TCPOPT_SACK_RIGHT) -+ field = TCPOPT_SACK_RIGHT2; - break; - case TCPOPT_KIND_SACK3: - kind = TCPOPT_KIND_SACK; -- optnum = 3; -+ if (field == TCPOPT_SACK_LEFT) -+ field = TCPOPT_SACK_LEFT3; -+ else if (field == TCPOPT_SACK_RIGHT) -+ field = TCPOPT_SACK_RIGHT3; -+ break; - } - -- desc = tcpopt_protocols[kind]; -+ if (kind < array_size(tcpopt_protocols)) -+ desc = tcpopt_protocols[kind]; -+ -+ if (!desc) -+ return NULL; - tmpl = &desc->templates[field]; - if (!tmpl) - return NULL; -@@ -164,34 +149,32 @@ struct expr *tcpopt_expr_alloc(const struct location *loc, - expr->exthdr.desc = desc; - expr->exthdr.tmpl = tmpl; - expr->exthdr.op = NFT_EXTHDR_OP_TCPOPT; -- expr->exthdr.offset = calc_offset(desc, tmpl, optnum); -+ expr->exthdr.offset = tmpl->offset; - - return expr; - } - --void tcpopt_init_raw(struct expr *expr, uint8_t type, unsigned int offset, -+void tcpopt_init_raw(struct expr *expr, uint8_t type, unsigned int off, - unsigned int len, uint32_t flags) - { - const struct proto_hdr_template *tmpl; -- unsigned int i, off; -+ unsigned int i; - - assert(expr->etype == EXPR_EXTHDR); - - expr->len = len; - expr->exthdr.flags = flags; -- expr->exthdr.offset = offset; -+ expr->exthdr.offset = off; -+ -+ if (type >= array_size(tcpopt_protocols)) -+ return; - -- assert(type < array_size(tcpopt_protocols)); - expr->exthdr.desc = tcpopt_protocols[type]; - expr->exthdr.flags = flags; - assert(expr->exthdr.desc != NULL); - - for (i = 0; i < array_size(expr->exthdr.desc->templates); ++i) { - tmpl = &expr->exthdr.desc->templates[i]; -- /* We have to reverse calculate the offset for the sack options -- * at this point -- */ -- off = calc_offset_reverse(expr->exthdr.desc, tmpl, offset); - if (tmpl->offset != off || tmpl->len != len) - continue; - --- -2.31.1 - diff --git a/SOURCES/0053-tcpopt-allow-to-check-for-presence-of-any-tcp-option.patch b/SOURCES/0053-tcpopt-allow-to-check-for-presence-of-any-tcp-option.patch deleted file mode 100644 index 6c7f7ed..0000000 --- a/SOURCES/0053-tcpopt-allow-to-check-for-presence-of-any-tcp-option.patch +++ /dev/null @@ -1,336 +0,0 @@ -From 8a4b6cbf58e965d67b0337ba1736bd3691a49890 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 12 Jul 2021 17:44:08 +0200 -Subject: [PATCH] tcpopt: allow to check for presence of any tcp option - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1979334 -Upstream Status: nftables commit 24d8da3083422 - -commit 24d8da3083422da8336eeed2ee23b2ccf598ba5a -Author: Florian Westphal -Date: Wed Oct 21 23:54:17 2020 +0200 - - tcpopt: allow to check for presence of any tcp option - - nft currently doesn't allow to check for presence of arbitrary tcp options. - Only known options where nft provides a template can be tested for. - - This allows to test for presence of raw protocol values as well. - - Example: - - tcp option 42 exists - - Signed-off-by: Florian Westphal ---- - include/expression.h | 3 +- - src/exthdr.c | 12 ++++++++ - src/ipopt.c | 1 + - src/netlink_linearize.c | 2 +- - src/parser_bison.y | 7 +++++ - src/tcpopt.c | 42 +++++++++++++++++++++++---- - tests/py/any/tcpopt.t | 2 ++ - tests/py/any/tcpopt.t.payload | 53 +++-------------------------------- - 8 files changed, 65 insertions(+), 57 deletions(-) - -diff --git a/include/expression.h b/include/expression.h -index 2e41aa0..b50183d 100644 ---- a/include/expression.h -+++ b/include/expression.h -@@ -299,7 +299,8 @@ struct expr { - /* EXPR_EXTHDR */ - const struct exthdr_desc *desc; - const struct proto_hdr_template *tmpl; -- unsigned int offset; -+ uint16_t offset; -+ uint8_t raw_type; - enum nft_exthdr_op op; - unsigned int flags; - } exthdr; -diff --git a/src/exthdr.c b/src/exthdr.c -index c28213f..68d5aa5 100644 ---- a/src/exthdr.c -+++ b/src/exthdr.c -@@ -32,6 +32,13 @@ static void exthdr_expr_print(const struct expr *expr, struct output_ctx *octx) - */ - unsigned int offset = expr->exthdr.offset / 64; - -+ if (expr->exthdr.desc == NULL && -+ expr->exthdr.offset == 0 && -+ expr->exthdr.flags & NFT_EXTHDR_F_PRESENT) { -+ nft_print(octx, "tcp option %d", expr->exthdr.raw_type); -+ return; -+ } -+ - nft_print(octx, "tcp option %s", expr->exthdr.desc->name); - if (expr->exthdr.flags & NFT_EXTHDR_F_PRESENT) - return; -@@ -59,6 +66,7 @@ static bool exthdr_expr_cmp(const struct expr *e1, const struct expr *e2) - return e1->exthdr.desc == e2->exthdr.desc && - e1->exthdr.tmpl == e2->exthdr.tmpl && - e1->exthdr.op == e2->exthdr.op && -+ e1->exthdr.raw_type == e2->exthdr.raw_type && - e1->exthdr.flags == e2->exthdr.flags; - } - -@@ -69,6 +77,7 @@ static void exthdr_expr_clone(struct expr *new, const struct expr *expr) - new->exthdr.offset = expr->exthdr.offset; - new->exthdr.op = expr->exthdr.op; - new->exthdr.flags = expr->exthdr.flags; -+ new->exthdr.raw_type = expr->exthdr.raw_type; - } - - const struct expr_ops exthdr_expr_ops = { -@@ -98,6 +107,7 @@ struct expr *exthdr_expr_alloc(const struct location *loc, - expr = expr_alloc(loc, EXPR_EXTHDR, tmpl->dtype, - BYTEORDER_BIG_ENDIAN, tmpl->len); - expr->exthdr.desc = desc; -+ expr->exthdr.raw_type = desc ? desc->type : 0; - expr->exthdr.tmpl = tmpl; - expr->exthdr.offset = tmpl->offset; - return expr; -@@ -176,6 +186,8 @@ void exthdr_init_raw(struct expr *expr, uint8_t type, - unsigned int i; - - assert(expr->etype == EXPR_EXTHDR); -+ expr->exthdr.raw_type = type; -+ - if (op == NFT_EXTHDR_OP_TCPOPT) - return tcpopt_init_raw(expr, type, offset, len, flags); - if (op == NFT_EXTHDR_OP_IPV4) -diff --git a/src/ipopt.c b/src/ipopt.c -index 7ecb8b9..5f9f908 100644 ---- a/src/ipopt.c -+++ b/src/ipopt.c -@@ -103,6 +103,7 @@ struct expr *ipopt_expr_alloc(const struct location *loc, uint8_t type, - expr->exthdr.tmpl = tmpl; - expr->exthdr.op = NFT_EXTHDR_OP_IPV4; - expr->exthdr.offset = tmpl->offset + calc_offset(desc, tmpl, ptr); -+ expr->exthdr.raw_type = desc->type; - - return expr; - } -diff --git a/src/netlink_linearize.c b/src/netlink_linearize.c -index 9d1a064..28b0e6a 100644 ---- a/src/netlink_linearize.c -+++ b/src/netlink_linearize.c -@@ -174,7 +174,7 @@ static void netlink_gen_exthdr(struct netlink_linearize_ctx *ctx, - nle = alloc_nft_expr("exthdr"); - netlink_put_register(nle, NFTNL_EXPR_EXTHDR_DREG, dreg); - nftnl_expr_set_u8(nle, NFTNL_EXPR_EXTHDR_TYPE, -- expr->exthdr.desc->type); -+ expr->exthdr.raw_type); - nftnl_expr_set_u32(nle, NFTNL_EXPR_EXTHDR_OFFSET, offset / BITS_PER_BYTE); - nftnl_expr_set_u32(nle, NFTNL_EXPR_EXTHDR_LEN, - div_round_up(expr->len, BITS_PER_BYTE)); -diff --git a/src/parser_bison.y b/src/parser_bison.y -index 114b289..4ea9364 100644 ---- a/src/parser_bison.y -+++ b/src/parser_bison.y -@@ -4744,6 +4744,13 @@ tcp_hdr_option_type : EOL { $$ = TCPOPT_KIND_EOL; } - | SACK3 { $$ = TCPOPT_KIND_SACK3; } - | ECHO { $$ = TCPOPT_KIND_ECHO; } - | TIMESTAMP { $$ = TCPOPT_KIND_TIMESTAMP; } -+ | NUM { -+ if ($1 > 255) { -+ erec_queue(error(&@1, "value too large"), state->msgs); -+ YYERROR; -+ } -+ $$ = $1; -+ } - ; - - tcp_hdr_option_field : KIND { $$ = TCPOPT_COMMON_KIND; } -diff --git a/src/tcpopt.c b/src/tcpopt.c -index d1dd13b..1cf97a5 100644 ---- a/src/tcpopt.c -+++ b/src/tcpopt.c -@@ -103,6 +103,19 @@ const struct exthdr_desc *tcpopt_protocols[] = { - [TCPOPT_KIND_TIMESTAMP] = &tcpopt_timestamp, - }; - -+/** -+ * tcpopt_expr_alloc - allocate tcp option extension expression -+ * -+ * @loc: location from parser -+ * @kind: raw tcp option value to find in packet -+ * @field: highlevel field to find in the option if @kind is present in packet -+ * -+ * Allocate a new tcp option expression. -+ * @kind is the raw option value to find in the packet. -+ * Exception: SACK may use extra OOB data that is mangled here. -+ * -+ * @field is the optional field to extract from the @type option. -+ */ - struct expr *tcpopt_expr_alloc(const struct location *loc, - unsigned int kind, - unsigned int field) -@@ -138,8 +151,22 @@ struct expr *tcpopt_expr_alloc(const struct location *loc, - if (kind < array_size(tcpopt_protocols)) - desc = tcpopt_protocols[kind]; - -- if (!desc) -- return NULL; -+ if (!desc) { -+ if (field != TCPOPT_COMMON_KIND || kind > 255) -+ return NULL; -+ -+ expr = expr_alloc(loc, EXPR_EXTHDR, &integer_type, -+ BYTEORDER_BIG_ENDIAN, 8); -+ -+ desc = tcpopt_protocols[TCPOPT_NOP]; -+ tmpl = &desc->templates[field]; -+ expr->exthdr.desc = desc; -+ expr->exthdr.tmpl = tmpl; -+ expr->exthdr.op = NFT_EXTHDR_OP_TCPOPT; -+ expr->exthdr.raw_type = kind; -+ return expr; -+ } -+ - tmpl = &desc->templates[field]; - if (!tmpl) - return NULL; -@@ -149,6 +176,7 @@ struct expr *tcpopt_expr_alloc(const struct location *loc, - expr->exthdr.desc = desc; - expr->exthdr.tmpl = tmpl; - expr->exthdr.op = NFT_EXTHDR_OP_TCPOPT; -+ expr->exthdr.raw_type = desc->type; - expr->exthdr.offset = tmpl->offset; - - return expr; -@@ -165,6 +193,10 @@ void tcpopt_init_raw(struct expr *expr, uint8_t type, unsigned int off, - expr->len = len; - expr->exthdr.flags = flags; - expr->exthdr.offset = off; -+ expr->exthdr.op = NFT_EXTHDR_OP_TCPOPT; -+ -+ if (flags & NFT_EXTHDR_F_PRESENT) -+ datatype_set(expr, &boolean_type); - - if (type >= array_size(tcpopt_protocols)) - return; -@@ -178,12 +210,10 @@ void tcpopt_init_raw(struct expr *expr, uint8_t type, unsigned int off, - if (tmpl->offset != off || tmpl->len != len) - continue; - -- if (flags & NFT_EXTHDR_F_PRESENT) -- datatype_set(expr, &boolean_type); -- else -+ if ((flags & NFT_EXTHDR_F_PRESENT) == 0) - datatype_set(expr, tmpl->dtype); -+ - expr->exthdr.tmpl = tmpl; -- expr->exthdr.op = NFT_EXTHDR_OP_TCPOPT; - break; - } - } -diff --git a/tests/py/any/tcpopt.t b/tests/py/any/tcpopt.t -index 1d42de8..7b17014 100644 ---- a/tests/py/any/tcpopt.t -+++ b/tests/py/any/tcpopt.t -@@ -30,6 +30,7 @@ tcp option timestamp kind 1;ok - tcp option timestamp length 1;ok - tcp option timestamp tsval 1;ok - tcp option timestamp tsecr 1;ok -+tcp option 255 missing;ok - - tcp option foobar;fail - tcp option foo bar;fail -@@ -38,6 +39,7 @@ tcp option eol left 1;fail - tcp option eol left 1;fail - tcp option sack window;fail - tcp option sack window 1;fail -+tcp option 256 exists;fail - - tcp option window exists;ok - tcp option window missing;ok -diff --git a/tests/py/any/tcpopt.t.payload b/tests/py/any/tcpopt.t.payload -index 9c480c8..34f8e26 100644 ---- a/tests/py/any/tcpopt.t.payload -+++ b/tests/py/any/tcpopt.t.payload -@@ -509,20 +509,6 @@ inet - [ exthdr load tcpopt 4b @ 8 + 2 => reg 1 ] - [ cmp eq reg 1 0x01000000 ] - --# tcp option timestamp tsecr 1 --ip -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 8 + 6 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- --# tcp option timestamp tsecr 1 --ip6 -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 4b @ 8 + 6 => reg 1 ] -- [ cmp eq reg 1 0x01000000 ] -- - # tcp option timestamp tsecr 1 - inet - [ meta load l4proto => reg 1 ] -@@ -530,19 +516,12 @@ inet - [ exthdr load tcpopt 4b @ 8 + 6 => reg 1 ] - [ cmp eq reg 1 0x01000000 ] - --# tcp option window exists --ip -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 3 + 0 present => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -- --# tcp option window exists --ip6 -+# tcp option 255 missing -+inet - [ meta load l4proto => reg 1 ] - [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 3 + 0 present => reg 1 ] -- [ cmp eq reg 1 0x00000001 ] -+ [ exthdr load tcpopt 1b @ 255 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000000 ] - - # tcp option window exists - inet -@@ -551,20 +530,6 @@ inet - [ exthdr load tcpopt 1b @ 3 + 0 present => reg 1 ] - [ cmp eq reg 1 0x00000001 ] - --# tcp option window missing --ip -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 3 + 0 present => reg 1 ] -- [ cmp eq reg 1 0x00000000 ] -- --# tcp option window missing --ip6 -- [ meta load l4proto => reg 1 ] -- [ cmp eq reg 1 0x00000006 ] -- [ exthdr load tcpopt 1b @ 3 + 0 present => reg 1 ] -- [ cmp eq reg 1 0x00000000 ] -- - # tcp option window missing - inet - [ meta load l4proto => reg 1 ] -@@ -572,16 +537,6 @@ inet - [ exthdr load tcpopt 1b @ 3 + 0 present => reg 1 ] - [ cmp eq reg 1 0x00000000 ] - --# tcp option maxseg size set 1360 --ip -- [ immediate reg 1 0x00005005 ] -- [ exthdr write tcpopt reg 1 => 2b @ 2 + 2 ] -- --# tcp option maxseg size set 1360 --ip6 -- [ immediate reg 1 0x00005005 ] -- [ exthdr write tcpopt reg 1 => 2b @ 2 + 2 ] -- - # tcp option maxseg size set 1360 - inet - [ immediate reg 1 0x00005005 ] --- -2.31.1 - diff --git a/SOURCES/0054-tcp-add-raw-tcp-option-match-support.patch b/SOURCES/0054-tcp-add-raw-tcp-option-match-support.patch deleted file mode 100644 index e069466..0000000 --- a/SOURCES/0054-tcp-add-raw-tcp-option-match-support.patch +++ /dev/null @@ -1,137 +0,0 @@ -From 267d86b62132a009badd57b2ffcffed6ae682a1e Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 12 Jul 2021 17:44:08 +0200 -Subject: [PATCH] tcp: add raw tcp option match support - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1979334 -Upstream Status: nftables commit 881d8cb21c0b9 - -commit 881d8cb21c0b9168787b932f41b801593bde2216 -Author: Florian Westphal -Date: Mon Nov 2 20:10:25 2020 +0100 - - tcp: add raw tcp option match support - - tcp option @42,16,4 (@kind,offset,length). - - Signed-off-by: Florian Westphal ---- - doc/payload-expression.txt | 6 ++++++ - src/exthdr.c | 13 +++++++++---- - src/parser_bison.y | 5 +++++ - src/tcpopt.c | 2 ++ - tests/py/any/tcpopt.t | 2 ++ - tests/py/any/tcpopt.t.payload | 7 +++++++ - 6 files changed, 31 insertions(+), 4 deletions(-) - -diff --git a/doc/payload-expression.txt b/doc/payload-expression.txt -index 3a07321..b6d2a28 100644 ---- a/doc/payload-expression.txt -+++ b/doc/payload-expression.txt -@@ -591,6 +591,12 @@ TCP Timestamps | - kind, length, tsval, tsecr - |============================ - -+TCP option matching also supports raw expression syntax to access arbitrary options: -+[verse] -+*tcp option* -+[verse] -+*tcp option* *@*'number'*,*'offset'*,*'length' -+ - .IP Options - [options="header"] - |================== -diff --git a/src/exthdr.c b/src/exthdr.c -index 68d5aa5..5c75720 100644 ---- a/src/exthdr.c -+++ b/src/exthdr.c -@@ -32,10 +32,15 @@ static void exthdr_expr_print(const struct expr *expr, struct output_ctx *octx) - */ - unsigned int offset = expr->exthdr.offset / 64; - -- if (expr->exthdr.desc == NULL && -- expr->exthdr.offset == 0 && -- expr->exthdr.flags & NFT_EXTHDR_F_PRESENT) { -- nft_print(octx, "tcp option %d", expr->exthdr.raw_type); -+ if (expr->exthdr.desc == NULL) { -+ if (expr->exthdr.offset == 0 && -+ expr->exthdr.flags & NFT_EXTHDR_F_PRESENT) { -+ nft_print(octx, "tcp option %d", expr->exthdr.raw_type); -+ return; -+ } -+ -+ nft_print(octx, "tcp option @%u,%u,%u", expr->exthdr.raw_type, -+ expr->exthdr.offset, expr->len); - return; - } - -diff --git a/src/parser_bison.y b/src/parser_bison.y -index 4ea9364..5aedc55 100644 ---- a/src/parser_bison.y -+++ b/src/parser_bison.y -@@ -4718,6 +4718,11 @@ tcp_hdr_expr : TCP tcp_hdr_field - $$ = tcpopt_expr_alloc(&@$, $3, TCPOPT_COMMON_KIND); - $$->exthdr.flags = NFT_EXTHDR_F_PRESENT; - } -+ | TCP OPTION AT tcp_hdr_option_type COMMA NUM COMMA NUM -+ { -+ $$ = tcpopt_expr_alloc(&@$, $4, 0); -+ tcpopt_init_raw($$, $4, $6, $8, 0); -+ } - ; - - tcp_hdr_field : SPORT { $$ = TCPHDR_SPORT; } -diff --git a/src/tcpopt.c b/src/tcpopt.c -index 1cf97a5..05b5ee6 100644 ---- a/src/tcpopt.c -+++ b/src/tcpopt.c -@@ -197,6 +197,8 @@ void tcpopt_init_raw(struct expr *expr, uint8_t type, unsigned int off, - - if (flags & NFT_EXTHDR_F_PRESENT) - datatype_set(expr, &boolean_type); -+ else -+ datatype_set(expr, &integer_type); - - if (type >= array_size(tcpopt_protocols)) - return; -diff --git a/tests/py/any/tcpopt.t b/tests/py/any/tcpopt.t -index 7b17014..e759ac6 100644 ---- a/tests/py/any/tcpopt.t -+++ b/tests/py/any/tcpopt.t -@@ -31,6 +31,7 @@ tcp option timestamp length 1;ok - tcp option timestamp tsval 1;ok - tcp option timestamp tsecr 1;ok - tcp option 255 missing;ok -+tcp option @255,8,8 255;ok - - tcp option foobar;fail - tcp option foo bar;fail -@@ -40,6 +41,7 @@ tcp option eol left 1;fail - tcp option sack window;fail - tcp option sack window 1;fail - tcp option 256 exists;fail -+tcp option @255,8,8 256;fail - - tcp option window exists;ok - tcp option window missing;ok -diff --git a/tests/py/any/tcpopt.t.payload b/tests/py/any/tcpopt.t.payload -index 34f8e26..cddba61 100644 ---- a/tests/py/any/tcpopt.t.payload -+++ b/tests/py/any/tcpopt.t.payload -@@ -523,6 +523,13 @@ inet - [ exthdr load tcpopt 1b @ 255 + 0 present => reg 1 ] - [ cmp eq reg 1 0x00000000 ] - -+# tcp option @255,8,8 255 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ exthdr load tcpopt 1b @ 255 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x000000ff ] -+ - # tcp option window exists - inet - [ meta load l4proto => reg 1 ] --- -2.31.1 - diff --git a/SOURCES/0055-json-tcp-add-raw-tcp-option-match-support.patch b/SOURCES/0055-json-tcp-add-raw-tcp-option-match-support.patch deleted file mode 100644 index 324b5be..0000000 --- a/SOURCES/0055-json-tcp-add-raw-tcp-option-match-support.patch +++ /dev/null @@ -1,199 +0,0 @@ -From ad566e27398e81ed803c4225179bb8df4718a2e9 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 12 Jul 2021 17:44:08 +0200 -Subject: [PATCH] json: tcp: add raw tcp option match support - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1979334 -Upstream Status: nftables commit cb21869649208 - -commit cb21869649208118ed61354e2674858e4ff6c23c -Author: Florian Westphal -Date: Tue Nov 3 12:04:20 2020 +0100 - - json: tcp: add raw tcp option match support - - To similar change as in previous one, this time for the - jason (de)serialization. - - Re-uses the raw payload match syntax, i.e. base,offset,length. - - Signed-off-by: Florian Westphal ---- - src/json.c | 22 ++++++++-------- - src/parser_json.c | 52 ++++++++++++++++++++++++++------------ - tests/py/any/tcpopt.t.json | 34 +++++++++++++++++++++++++ - 3 files changed, 82 insertions(+), 26 deletions(-) - -diff --git a/src/json.c b/src/json.c -index 1906e7d..b77c6d2 100644 ---- a/src/json.c -+++ b/src/json.c -@@ -656,30 +656,32 @@ json_t *map_expr_json(const struct expr *expr, struct output_ctx *octx) - json_t *exthdr_expr_json(const struct expr *expr, struct output_ctx *octx) - { - const char *desc = expr->exthdr.desc ? -- expr->exthdr.desc->name : -- "unknown-exthdr"; -+ expr->exthdr.desc->name : NULL; - const char *field = expr->exthdr.tmpl->token; - json_t *root; - bool is_exists = expr->exthdr.flags & NFT_EXTHDR_F_PRESENT; - - if (expr->exthdr.op == NFT_EXTHDR_OP_TCPOPT) { -+ static const char *offstrs[] = { "", "1", "2", "3" }; - unsigned int offset = expr->exthdr.offset / 64; -+ const char *offstr = ""; - -- if (offset) { -- const char *offstrs[] = { "0", "1", "2", "3" }; -- const char *offstr = ""; -- -+ if (desc) { - if (offset < 4) - offstr = offstrs[offset]; - - root = json_pack("{s:s+}", "name", desc, offstr); -+ -+ if (!is_exists) -+ json_object_set_new(root, "field", json_string(field)); - } else { -- root = json_pack("{s:s}", "name", desc); -+ root = json_pack("{s:i, s:i, s:i}", -+ "base", expr->exthdr.raw_type, -+ "offset", expr->exthdr.offset, -+ "len", expr->len); -+ is_exists = false; - } - -- if (!is_exists) -- json_object_set_new(root, "field", json_string(field)); -- - return json_pack("{s:o}", "tcp option", root); - } - if (expr->exthdr.op == NFT_EXTHDR_OP_IPV4) { -diff --git a/src/parser_json.c b/src/parser_json.c -index ab2375f..fbf7db5 100644 ---- a/src/parser_json.c -+++ b/src/parser_json.c -@@ -500,6 +500,8 @@ static int json_parse_tcp_option_field(int type, const char *name, int *val) - return 1; - - desc = tcpopt_protocols[type]; -+ if (!desc) -+ return 1; - - for (i = 0; i < array_size(desc->templates); i++) { - if (desc->templates[i].token && -@@ -599,30 +601,48 @@ static struct expr *json_parse_payload_expr(struct json_ctx *ctx, - static struct expr *json_parse_tcp_option_expr(struct json_ctx *ctx, - const char *type, json_t *root) - { -+ int fieldval, kind, offset, len; - const char *desc, *field; -- int descval, fieldval; - struct expr *expr; - -- if (json_unpack_err(ctx, root, "{s:s}", "name", &desc)) -- return NULL; -- -- if (json_parse_tcp_option_type(desc, &descval)) { -- json_error(ctx, "Unknown tcp option name '%s'.", desc); -- return NULL; -- } -+ if (!json_unpack(root, "{s:i, s:i, s:i}", -+ "base", &kind, "offset", &offset, "len", &len)) { -+ uint32_t flag = 0; - -- if (json_unpack(root, "{s:s}", "field", &field)) { -- expr = tcpopt_expr_alloc(int_loc, descval, -+ expr = tcpopt_expr_alloc(int_loc, kind, - TCPOPT_COMMON_KIND); -- expr->exthdr.flags = NFT_EXTHDR_F_PRESENT; - -+ if (kind < 0 || kind > 255) -+ return NULL; -+ -+ if (offset == TCPOPT_COMMON_KIND && len == 8) -+ flag = NFT_EXTHDR_F_PRESENT; -+ -+ tcpopt_init_raw(expr, kind, offset, len, flag); - return expr; -+ } else if (!json_unpack(root, "{s:s}", "name", &desc)) { -+ if (json_parse_tcp_option_type(desc, &kind)) { -+ json_error(ctx, "Unknown tcp option name '%s'.", desc); -+ return NULL; -+ } -+ -+ if (json_unpack(root, "{s:s}", "field", &field)) { -+ expr = tcpopt_expr_alloc(int_loc, kind, -+ TCPOPT_COMMON_KIND); -+ expr->exthdr.flags = NFT_EXTHDR_F_PRESENT; -+ return expr; -+ } -+ -+ if (json_parse_tcp_option_field(kind, field, &fieldval)) { -+ json_error(ctx, "Unknown tcp option field '%s'.", field); -+ return NULL; -+ } -+ -+ return tcpopt_expr_alloc(int_loc, kind, fieldval); - } -- if (json_parse_tcp_option_field(descval, field, &fieldval)) { -- json_error(ctx, "Unknown tcp option field '%s'.", field); -- return NULL; -- } -- return tcpopt_expr_alloc(int_loc, descval, fieldval); -+ -+ json_error(ctx, "Invalid tcp option expression properties."); -+ return NULL; - } - - static int json_parse_ip_option_type(const char *name, int *val) -diff --git a/tests/py/any/tcpopt.t.json b/tests/py/any/tcpopt.t.json -index b15e36e..139e97d 100644 ---- a/tests/py/any/tcpopt.t.json -+++ b/tests/py/any/tcpopt.t.json -@@ -414,6 +414,40 @@ - } - ] - -+# tcp option 255 missing -+[ -+ { -+ "match": { -+ "left": { -+ "tcp option": { -+ "base": 255, -+ "len": 8, -+ "offset": 0 -+ } -+ }, -+ "op": "==", -+ "right": false -+ } -+ } -+] -+ -+# tcp option @255,8,8 255 -+[ -+ { -+ "match": { -+ "left": { -+ "tcp option": { -+ "base": 255, -+ "len": 8, -+ "offset": 8 -+ } -+ }, -+ "op": "==", -+ "right": 255 -+ } -+ } -+] -+ - # tcp option window exists - [ - { --- -2.31.1 - diff --git a/SOURCES/0056-json-Simplify-non-tcpopt-exthdr-printing-a-bit.patch b/SOURCES/0056-json-Simplify-non-tcpopt-exthdr-printing-a-bit.patch deleted file mode 100644 index 1c21c46..0000000 --- a/SOURCES/0056-json-Simplify-non-tcpopt-exthdr-printing-a-bit.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 2026f7d056679508f8506fbba7f578aa15af7c05 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 12 Jul 2021 16:32:27 +0200 -Subject: [PATCH] json: Simplify non-tcpopt exthdr printing a bit - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1979334 -Upstream Status: nftables commit fd81d3ec3ae8b - -commit fd81d3ec3ae8b8d1d54a708d63b2dab2c8508c90 -Author: Phil Sutter -Date: Tue May 4 13:18:11 2021 +0200 - - json: Simplify non-tcpopt exthdr printing a bit - - This was just duplicate code apart from the object's name. - - Signed-off-by: Phil Sutter ---- - src/json.c | 18 +++++++----------- - 1 file changed, 7 insertions(+), 11 deletions(-) - -diff --git a/src/json.c b/src/json.c -index b77c6d2..a6d0716 100644 ---- a/src/json.c -+++ b/src/json.c -@@ -684,21 +684,17 @@ json_t *exthdr_expr_json(const struct expr *expr, struct output_ctx *octx) - - return json_pack("{s:o}", "tcp option", root); - } -- if (expr->exthdr.op == NFT_EXTHDR_OP_IPV4) { -- root = json_pack("{s:s}", "name", desc); - -- if (!is_exists) -- json_object_set_new(root, "field", json_string(field)); -- -- return json_pack("{s:o}", "ip option", root); -- } -- -- root = json_pack("{s:s}", -- "name", desc); -+ root = json_pack("{s:s}", "name", desc); - if (!is_exists) - json_object_set_new(root, "field", json_string(field)); - -- return json_pack("{s:o}", "exthdr", root); -+ switch (expr->exthdr.op) { -+ case NFT_EXTHDR_OP_IPV4: -+ return json_pack("{s:o}", "ip option", root); -+ default: -+ return json_pack("{s:o}", "exthdr", root); -+ } - } - - json_t *verdict_expr_json(const struct expr *expr, struct output_ctx *octx) --- -2.31.1 - diff --git a/SOURCES/0057-scanner-introduce-start-condition-stack.patch b/SOURCES/0057-scanner-introduce-start-condition-stack.patch deleted file mode 100644 index 4a40b1d..0000000 --- a/SOURCES/0057-scanner-introduce-start-condition-stack.patch +++ /dev/null @@ -1,175 +0,0 @@ -From c724812d9561021fb6a80c817d411d9ba2de5dbd Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Tue, 13 Jul 2021 13:54:12 +0200 -Subject: [PATCH] scanner: introduce start condition stack - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1979334 -Upstream Status: nftables commit 5896772fe3c5f - -commit 5896772fe3c5f01696188ea04957a825ee601b12 -Author: Florian Westphal -Date: Mon Mar 8 18:18:33 2021 +0100 - - scanner: introduce start condition stack - - Add a small initial chunk of flex start conditionals. - - This starts with two low-hanging fruits, numgen and j/symhash. - - NUMGEN and HASH start conditions are entered from flex when - the corresponding expression token is encountered. - - Flex returns to the INIT condition when the bison parser - has seen a complete numgen/hash statement. - - This intentionally uses a stack rather than BEGIN() - to eventually support nested states. - - The scanner_pop_start_cond() function argument is not used yet, but - will need to be used later to deal with nesting. - - Signed-off-by: Florian Westphal ---- - include/parser.h | 8 ++++++++ - src/parser_bison.y | 11 +++++++---- - src/scanner.l | 36 +++++++++++++++++++++++++++++------- - 3 files changed, 44 insertions(+), 11 deletions(-) - -diff --git a/include/parser.h b/include/parser.h -index 949284d..1d293f5 100644 ---- a/include/parser.h -+++ b/include/parser.h -@@ -28,6 +28,12 @@ struct parser_state { - struct list_head *cmds; - }; - -+enum startcond_type { -+ PARSER_SC_BEGIN, -+ PARSER_SC_EXPR_HASH, -+ PARSER_SC_EXPR_NUMGEN, -+}; -+ - struct mnl_socket; - - extern void parser_init(struct nft_ctx *nft, struct parser_state *state, -@@ -47,4 +53,6 @@ extern void scanner_push_buffer(void *scanner, - const struct input_descriptor *indesc, - const char *buffer); - -+extern void scanner_pop_start_cond(void *scanner, enum startcond_type sc); -+ - #endif /* NFTABLES_PARSER_H */ -diff --git a/src/parser_bison.y b/src/parser_bison.y -index 5aedc55..9a9447f 100644 ---- a/src/parser_bison.y -+++ b/src/parser_bison.y -@@ -840,6 +840,9 @@ opt_newline : NEWLINE - | /* empty */ - ; - -+close_scope_hash : { scanner_pop_start_cond(nft->scanner, PARSER_SC_EXPR_HASH); }; -+close_scope_numgen : { scanner_pop_start_cond(nft->scanner, PARSER_SC_EXPR_NUMGEN); }; -+ - common_block : INCLUDE QUOTED_STRING stmt_separator - { - if (scanner_include_file(nft, scanner, $2, &@$) < 0) { -@@ -4249,7 +4252,7 @@ numgen_type : INC { $$ = NFT_NG_INCREMENTAL; } - | RANDOM { $$ = NFT_NG_RANDOM; } - ; - --numgen_expr : NUMGEN numgen_type MOD NUM offset_opt -+numgen_expr : NUMGEN numgen_type MOD NUM offset_opt close_scope_numgen - { - $$ = numgen_expr_alloc(&@$, $2, $4, $5); - } -@@ -4306,17 +4309,17 @@ xfrm_expr : IPSEC xfrm_dir xfrm_spnum xfrm_state_key - } - ; - --hash_expr : JHASH expr MOD NUM SEED NUM offset_opt -+hash_expr : JHASH expr MOD NUM SEED NUM offset_opt close_scope_hash - { - $$ = hash_expr_alloc(&@$, $4, true, $6, $7, NFT_HASH_JENKINS); - $$->hash.expr = $2; - } -- | JHASH expr MOD NUM offset_opt -+ | JHASH expr MOD NUM offset_opt close_scope_hash - { - $$ = hash_expr_alloc(&@$, $4, false, 0, $5, NFT_HASH_JENKINS); - $$->hash.expr = $2; - } -- | SYMHASH MOD NUM offset_opt -+ | SYMHASH MOD NUM offset_opt close_scope_hash - { - $$ = hash_expr_alloc(&@$, $3, false, 0, $4, NFT_HASH_SYM); - } -diff --git a/src/scanner.l b/src/scanner.l -index 20b1b2d..68fe988 100644 ---- a/src/scanner.l -+++ b/src/scanner.l -@@ -98,6 +98,8 @@ static void reset_pos(struct parser_state *state, struct location *loc) - state->indesc->column = 1; - } - -+static void scanner_push_start_cond(void *scanner, enum startcond_type type); -+ - #define YY_USER_ACTION { \ - update_pos(yyget_extra(yyscanner), yylloc, yyleng); \ - update_offset(yyget_extra(yyscanner), yylloc, yyleng); \ -@@ -193,6 +195,9 @@ addrstring ({macaddr}|{ip4addr}|{ip6addr}) - %option yylineno - %option nodefault - %option warn -+%option stack -+%s SCANSTATE_EXPR_HASH -+%s SCANSTATE_EXPR_NUMGEN - - %% - -@@ -548,15 +553,21 @@ addrstring ({macaddr}|{ip4addr}|{ip6addr}) - "state" { return STATE; } - "status" { return STATUS; } - --"numgen" { return NUMGEN; } --"inc" { return INC; } --"mod" { return MOD; } --"offset" { return OFFSET; } -+"numgen" { scanner_push_start_cond(yyscanner, SCANSTATE_EXPR_NUMGEN); return NUMGEN; } -+{ -+ "inc" { return INC; } -+} - --"jhash" { return JHASH; } --"symhash" { return SYMHASH; } --"seed" { return SEED; } -+"jhash" { scanner_push_start_cond(yyscanner, SCANSTATE_EXPR_HASH); return JHASH; } -+"symhash" { scanner_push_start_cond(yyscanner, SCANSTATE_EXPR_HASH); return SYMHASH; } - -+{ -+ "seed" { return SEED; } -+} -+{ -+ "mod" { return MOD; } -+ "offset" { return OFFSET; } -+} - "dup" { return DUP; } - "fwd" { return FWD; } - -@@ -949,3 +960,14 @@ void scanner_destroy(struct nft_ctx *nft) - input_descriptor_list_destroy(state); - yylex_destroy(nft->scanner); - } -+ -+static void scanner_push_start_cond(void *scanner, enum startcond_type type) -+{ -+ yy_push_state((int)type, scanner); -+} -+ -+void scanner_pop_start_cond(void *scanner, enum startcond_type t) -+{ -+ yy_pop_state(scanner); -+ (void)yy_top_state(scanner); /* suppress gcc warning wrt. unused function */ -+} --- -2.31.1 - diff --git a/SOURCES/0058-scanner-sctp-Move-to-own-scope.patch b/SOURCES/0058-scanner-sctp-Move-to-own-scope.patch deleted file mode 100644 index 9576e93..0000000 --- a/SOURCES/0058-scanner-sctp-Move-to-own-scope.patch +++ /dev/null @@ -1,96 +0,0 @@ -From 595e79b1ccdfa6b11cd6c2b1c8eda0161b58d22a Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Tue, 13 Jul 2021 13:54:12 +0200 -Subject: [PATCH] scanner: sctp: Move to own scope - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1979334 -Upstream Status: nftables commit 0925d7e214825 -Conflicts: Context change due to missing other scopes. - -commit 0925d7e214825628e7db4a86d5ebbad578ab0777 -Author: Phil Sutter -Date: Tue May 4 13:06:32 2021 +0200 - - scanner: sctp: Move to own scope - - This isolates only "vtag" token for now. - - Signed-off-by: Phil Sutter - Reviewed-by: Florian Westphal ---- - include/parser.h | 1 + - src/parser_bison.y | 5 +++-- - src/scanner.l | 8 ++++++-- - 3 files changed, 10 insertions(+), 4 deletions(-) - -diff --git a/include/parser.h b/include/parser.h -index 1d293f5..2e6ef4d 100644 ---- a/include/parser.h -+++ b/include/parser.h -@@ -30,6 +30,7 @@ struct parser_state { - - enum startcond_type { - PARSER_SC_BEGIN, -+ PARSER_SC_SCTP, - PARSER_SC_EXPR_HASH, - PARSER_SC_EXPR_NUMGEN, - }; -diff --git a/src/parser_bison.y b/src/parser_bison.y -index 9a9447f..beb5995 100644 ---- a/src/parser_bison.y -+++ b/src/parser_bison.y -@@ -842,6 +842,7 @@ opt_newline : NEWLINE - - close_scope_hash : { scanner_pop_start_cond(nft->scanner, PARSER_SC_EXPR_HASH); }; - close_scope_numgen : { scanner_pop_start_cond(nft->scanner, PARSER_SC_EXPR_NUMGEN); }; -+close_scope_sctp : { scanner_pop_start_cond(nft->scanner, PARSER_SC_SCTP); }; - - common_block : INCLUDE QUOTED_STRING stmt_separator - { -@@ -4059,7 +4060,7 @@ primary_rhs_expr : symbol_expr { $$ = $1; } - BYTEORDER_HOST_ENDIAN, - sizeof(data) * BITS_PER_BYTE, &data); - } -- | SCTP -+ | SCTP close_scope_sctp - { - uint8_t data = IPPROTO_SCTP; - $$ = constant_expr_alloc(&@$, &inet_protocol_type, -@@ -4782,7 +4783,7 @@ dccp_hdr_field : SPORT { $$ = DCCPHDR_SPORT; } - | TYPE { $$ = DCCPHDR_TYPE; } - ; - --sctp_hdr_expr : SCTP sctp_hdr_field -+sctp_hdr_expr : SCTP sctp_hdr_field close_scope_sctp - { - $$ = payload_expr_alloc(&@$, &proto_sctp, $2); - } -diff --git a/src/scanner.l b/src/scanner.l -index 68fe988..b79ae55 100644 ---- a/src/scanner.l -+++ b/src/scanner.l -@@ -196,6 +196,7 @@ addrstring ({macaddr}|{ip4addr}|{ip6addr}) - %option nodefault - %option warn - %option stack -+%s SCANSTATE_SCTP - %s SCANSTATE_EXPR_HASH - %s SCANSTATE_EXPR_NUMGEN - -@@ -488,8 +489,11 @@ addrstring ({macaddr}|{ip4addr}|{ip6addr}) - - "dccp" { return DCCP; } - --"sctp" { return SCTP; } --"vtag" { return VTAG; } -+"sctp" { scanner_push_start_cond(yyscanner, SCANSTATE_SCTP); return SCTP; } -+ -+{ -+ "vtag" { return VTAG; } -+} - - "rt" { return RT; } - "rt0" { return RT0; } --- -2.31.1 - diff --git a/SOURCES/0059-exthdr-Implement-SCTP-Chunk-matching.patch b/SOURCES/0059-exthdr-Implement-SCTP-Chunk-matching.patch deleted file mode 100644 index e095e66..0000000 --- a/SOURCES/0059-exthdr-Implement-SCTP-Chunk-matching.patch +++ /dev/null @@ -1,1625 +0,0 @@ -From 5a8d6197929e30520bb3839c9165d89930888daf Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Tue, 13 Jul 2021 13:54:42 +0200 -Subject: [PATCH] exthdr: Implement SCTP Chunk matching - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1979334 -Upstream Status: nftables commit 0e3871cfd9a1e -Conflicts: - * Context changes due to missing other scopes. - * Context change due to missing commit 6e6ef00028f1c - ("exthdr: remove tcp dependency for tcp option matching"). - -commit 0e3871cfd9a1e32a4ac041ce87a8057b11a89924 -Author: Phil Sutter -Date: Tue May 4 13:41:38 2021 +0200 - - exthdr: Implement SCTP Chunk matching - - Extend exthdr expression to support scanning through SCTP packet chunks - and matching on fixed fields' values. - - Signed-off-by: Phil Sutter - Acked-by: Florian Westphal ---- - doc/libnftables-json.adoc | 13 + - doc/payload-expression.txt | 53 +++ - include/linux/netfilter/nf_tables.h | 2 + - include/parser.h | 1 + - include/sctp_chunk.h | 87 +++++ - src/Makefile.am | 1 + - src/evaluate.c | 2 + - src/exthdr.c | 8 + - src/json.c | 2 + - src/parser_bison.y | 148 ++++++++- - src/parser_json.c | 49 +++ - src/scanner.l | 38 +++ - src/sctp_chunk.c | 261 +++++++++++++++ - tests/py/inet/sctp.t | 37 +++ - tests/py/inet/sctp.t.json | 478 ++++++++++++++++++++++++++++ - tests/py/inet/sctp.t.payload | 155 +++++++++ - 16 files changed, 1333 insertions(+), 2 deletions(-) - create mode 100644 include/sctp_chunk.h - create mode 100644 src/sctp_chunk.c - -diff --git a/doc/libnftables-json.adoc b/doc/libnftables-json.adoc -index 858abbf..fba4cb0 100644 ---- a/doc/libnftables-json.adoc -+++ b/doc/libnftables-json.adoc -@@ -1200,6 +1200,19 @@ Create a reference to a field (*field*) of a TCP option header (*name*). - If the *field* property is not given, the expression is to be used as a TCP option - existence check in a *match* statement with a boolean on the right hand side. - -+=== SCTP CHUNK -+[verse] -+*{ "sctp chunk": { -+ "name":* 'STRING'*, -+ "field":* 'STRING' -+*}}* -+ -+Create a reference to a field (*field*) of an SCTP chunk (*name*). -+ -+If the *field* property is not given, the expression is to be used as an SCTP -+chunk existence check in a *match* statement with a boolean on the right hand -+side. -+ - === META - [verse] - ____ -diff --git a/doc/payload-expression.txt b/doc/payload-expression.txt -index b6d2a28..bd03ca2 100644 ---- a/doc/payload-expression.txt -+++ b/doc/payload-expression.txt -@@ -369,7 +369,33 @@ integer (16 bit) - SCTP HEADER EXPRESSION - ~~~~~~~~~~~~~~~~~~~~~~~ - [verse] -+____ - *sctp* {*sport* | *dport* | *vtag* | *checksum*} -+*sctp chunk* 'CHUNK' [ 'FIELD' ] -+ -+'CHUNK' := *data* | *init* | *init-ack* | *sack* | *heartbeat* | -+ *heartbeat-ack* | *abort* | *shutdown* | *shutdown-ack* | *error* | -+ *cookie-echo* | *cookie-ack* | *ecne* | *cwr* | *shutdown-complete* -+ | *asconf-ack* | *forward-tsn* | *asconf* -+ -+'FIELD' := 'COMMON_FIELD' | 'DATA_FIELD' | 'INIT_FIELD' | 'INIT_ACK_FIELD' | -+ 'SACK_FIELD' | 'SHUTDOWN_FIELD' | 'ECNE_FIELD' | 'CWR_FIELD' | -+ 'ASCONF_ACK_FIELD' | 'FORWARD_TSN_FIELD' | 'ASCONF_FIELD' -+ -+'COMMON_FIELD' := *type* | *flags* | *length* -+'DATA_FIELD' := *tsn* | *stream* | *ssn* | *ppid* -+'INIT_FIELD' := *init-tag* | *a-rwnd* | *num-outbound-streams* | -+ *num-inbound-streams* | *initial-tsn* -+'INIT_ACK_FIELD' := 'INIT_FIELD' -+'SACK_FIELD' := *cum-tsn-ack* | *a-rwnd* | *num-gap-ack-blocks* | -+ *num-dup-tsns* -+'SHUTDOWN_FIELD' := *cum-tsn-ack* -+'ECNE_FIELD' := *lowest-tsn* -+'CWR_FIELD' := *lowest-tsn* -+'ASCONF_ACK_FIELD' := *seqno* -+'FORWARD_TSN_FIELD' := *new-cum-tsn* -+'ASCONF_FIELD' := *seqno* -+____ - - .SCTP header expression - [options="header"] -@@ -387,8 +413,35 @@ integer (32 bit) - |checksum| - Checksum| - integer (32 bit) -+|chunk| -+Search chunk in packet| -+without 'FIELD', boolean indicating existence - |================ - -+.SCTP chunk fields -+[options="header"] -+|================== -+|Name| Width in bits | Chunk | Notes -+|type| 8 | all | not useful, defined by chunk type -+|flags| 8 | all | semantics defined on per-chunk basis -+|length| 16 | all | length of this chunk in bytes excluding padding -+|tsn| 32 | data | transmission sequence number -+|stream| 16 | data | stream identifier -+|ssn| 16 | data | stream sequence number -+|ppid| 32 | data | payload protocol identifier -+|init-tag| 32 | init, init-ack | initiate tag -+|a-rwnd| 32 | init, init-ack, sack | advertised receiver window credit -+|num-outbound-streams| 16 | init, init-ack | number of outbound streams -+|num-inbound-streams| 16 | init, init-ack | number of inbound streams -+|initial-tsn| 32 | init, init-ack | initial transmit sequence number -+|cum-tsn-ack| 32 | sack, shutdown | cumulative transmission sequence number acknowledged -+|num-gap-ack-blocks| 16 | sack | number of Gap Ack Blocks included -+|num-dup-tsns| 16 | sack | number of duplicate transmission sequence numbers received -+|lowest-tsn| 32 | ecne, cwr | lowest transmission sequence number -+|seqno| 32 | asconf-ack, asconf | sequence number -+|new-cum-tsn| 32 | forward-tsn | new cumulative transmission sequence number -+|================== -+ - DCCP HEADER EXPRESSION - ~~~~~~~~~~~~~~~~~~~~~~ - [verse] -diff --git a/include/linux/netfilter/nf_tables.h b/include/linux/netfilter/nf_tables.h -index 1328b8e..960a5b4 100644 ---- a/include/linux/netfilter/nf_tables.h -+++ b/include/linux/netfilter/nf_tables.h -@@ -755,11 +755,13 @@ enum nft_exthdr_flags { - * @NFT_EXTHDR_OP_IPV6: match against ipv6 extension headers - * @NFT_EXTHDR_OP_TCP: match against tcp options - * @NFT_EXTHDR_OP_IPV4: match against ipv4 options -+ * @NFT_EXTHDR_OP_SCTP: match against sctp chunks - */ - enum nft_exthdr_op { - NFT_EXTHDR_OP_IPV6, - NFT_EXTHDR_OP_TCPOPT, - NFT_EXTHDR_OP_IPV4, -+ NFT_EXTHDR_OP_SCTP, - __NFT_EXTHDR_OP_MAX - }; - #define NFT_EXTHDR_OP_MAX (__NFT_EXTHDR_OP_MAX - 1) -diff --git a/include/parser.h b/include/parser.h -index 2e6ef4d..99bed3c 100644 ---- a/include/parser.h -+++ b/include/parser.h -@@ -33,6 +33,7 @@ enum startcond_type { - PARSER_SC_SCTP, - PARSER_SC_EXPR_HASH, - PARSER_SC_EXPR_NUMGEN, -+ PARSER_SC_EXPR_SCTP_CHUNK, - }; - - struct mnl_socket; -diff --git a/include/sctp_chunk.h b/include/sctp_chunk.h -new file mode 100644 -index 0000000..3819200 ---- /dev/null -+++ b/include/sctp_chunk.h -@@ -0,0 +1,87 @@ -+/* -+ * Copyright Red Hat -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 (or any -+ * later) as published by the Free Software Foundation. -+ */ -+ -+#ifndef NFTABLES_SCTP_CHUNK_H -+#define NFTABLES_SCTP_CHUNK_H -+ -+/* SCTP chunk types used on wire */ -+enum sctp_hdr_chunk_types { -+ SCTP_CHUNK_TYPE_DATA = 0, -+ SCTP_CHUNK_TYPE_INIT = 1, -+ SCTP_CHUNK_TYPE_INIT_ACK = 2, -+ SCTP_CHUNK_TYPE_SACK = 3, -+ SCTP_CHUNK_TYPE_HEARTBEAT = 4, -+ SCTP_CHUNK_TYPE_HEARTBEAT_ACK = 5, -+ SCTP_CHUNK_TYPE_ABORT = 6, -+ SCTP_CHUNK_TYPE_SHUTDOWN = 7, -+ SCTP_CHUNK_TYPE_SHUTDOWN_ACK = 8, -+ SCTP_CHUNK_TYPE_ERROR = 9, -+ SCTP_CHUNK_TYPE_COOKIE_ECHO = 10, -+ SCTP_CHUNK_TYPE_COOKIE_ACK = 11, -+ SCTP_CHUNK_TYPE_ECNE = 12, -+ SCTP_CHUNK_TYPE_CWR = 13, -+ SCTP_CHUNK_TYPE_SHUTDOWN_COMPLETE = 14, -+ SCTP_CHUNK_TYPE_ASCONF_ACK = 128, -+ SCTP_CHUNK_TYPE_FORWARD_TSN = 192, -+ SCTP_CHUNK_TYPE_ASCONF = 193, -+}; -+ -+enum sctp_hdr_chunk_common_fields { -+ SCTP_CHUNK_COMMON_TYPE, -+ SCTP_CHUNK_COMMON_FLAGS, -+ SCTP_CHUNK_COMMON_LENGTH, -+ __SCTP_CHUNK_COMMON_MAX, -+}; -+ -+#define SCTP_CHUNK_START_INDEX __SCTP_CHUNK_COMMON_MAX -+ -+enum sctp_hdr_chunk_data_fields { -+ SCTP_CHUNK_DATA_TSN = SCTP_CHUNK_START_INDEX, -+ SCTP_CHUNK_DATA_STREAM, -+ SCTP_CHUNK_DATA_SSN, -+ SCTP_CHUNK_DATA_PPID, -+}; -+ -+enum sctp_hdr_chunk_init_fields { -+ SCTP_CHUNK_INIT_TAG = SCTP_CHUNK_START_INDEX, -+ SCTP_CHUNK_INIT_RWND, -+ SCTP_CHUNK_INIT_OSTREAMS, -+ SCTP_CHUNK_INIT_ISTREAMS, -+ SCTP_CHUNK_INIT_TSN, -+}; -+ -+enum sctp_hdr_chunk_sack_fields { -+ SCTP_CHUNK_SACK_CTSN_ACK = SCTP_CHUNK_START_INDEX, -+ SCTP_CHUNK_SACK_RWND, -+ SCTP_CHUNK_SACK_GACK_BLOCKS, -+ SCTP_CHUNK_SACK_DUP_TSNS, -+}; -+ -+enum sctp_hdr_chunk_shutdown_fields { -+ SCTP_CHUNK_SHUTDOWN_CTSN_ACK = SCTP_CHUNK_START_INDEX, -+}; -+ -+enum sctp_hdr_chunk_ecne_cwr_fields { -+ SCTP_CHUNK_ECNE_CWR_MIN_TSN = SCTP_CHUNK_START_INDEX, -+}; -+ -+enum sctp_hdr_chunk_asconf_fields { -+ SCTP_CHUNK_ASCONF_SEQNO = SCTP_CHUNK_START_INDEX, -+}; -+ -+enum sctp_hdr_chunk_fwd_tsn_fields { -+ SCTP_CHUNK_FORWARD_TSN_NCTSN = SCTP_CHUNK_START_INDEX, -+}; -+ -+struct expr *sctp_chunk_expr_alloc(const struct location *loc, -+ unsigned int type, unsigned int field); -+void sctp_chunk_init_raw(struct expr *expr, uint8_t type, unsigned int off, -+ unsigned int len, uint32_t flags); -+const struct exthdr_desc *sctp_chunk_protocol_find(const char *name); -+ -+#endif /* NFTABLES_SCTP_CHUNK_H */ -diff --git a/src/Makefile.am b/src/Makefile.am -index 740c21f..366820b 100644 ---- a/src/Makefile.am -+++ b/src/Makefile.am -@@ -64,6 +64,7 @@ libnftables_la_SOURCES = \ - tcpopt.c \ - socket.c \ - print.c \ -+ sctp_chunk.c \ - libnftables.c \ - libnftables.map - -diff --git a/src/evaluate.c b/src/evaluate.c -index 99a66c2..00ec20b 100644 ---- a/src/evaluate.c -+++ b/src/evaluate.c -@@ -579,6 +579,8 @@ static int expr_evaluate_exthdr(struct eval_ctx *ctx, struct expr **exprp) - dependency = &proto_tcp; - pb = PROTO_BASE_TRANSPORT_HDR; - break; -+ case NFT_EXTHDR_OP_SCTP: -+ return __expr_evaluate_exthdr(ctx, exprp); - case NFT_EXTHDR_OP_IPV4: - dependency = &proto_ip; - break; -diff --git a/src/exthdr.c b/src/exthdr.c -index 5c75720..f5689e7 100644 ---- a/src/exthdr.c -+++ b/src/exthdr.c -@@ -22,6 +22,7 @@ - #include - #include - #include -+#include - - static void exthdr_expr_print(const struct expr *expr, struct output_ctx *octx) - { -@@ -55,6 +56,11 @@ static void exthdr_expr_print(const struct expr *expr, struct output_ctx *octx) - if (expr->exthdr.flags & NFT_EXTHDR_F_PRESENT) - return; - nft_print(octx, " %s", expr->exthdr.tmpl->token); -+ } else if (expr->exthdr.op == NFT_EXTHDR_OP_SCTP) { -+ nft_print(octx, "sctp chunk %s", expr->exthdr.desc->name); -+ if (expr->exthdr.flags & NFT_EXTHDR_F_PRESENT) -+ return; -+ nft_print(octx, " %s", expr->exthdr.tmpl->token); - } else { - if (expr->exthdr.flags & NFT_EXTHDR_F_PRESENT) - nft_print(octx, "exthdr %s", expr->exthdr.desc->name); -@@ -197,6 +203,8 @@ void exthdr_init_raw(struct expr *expr, uint8_t type, - return tcpopt_init_raw(expr, type, offset, len, flags); - if (op == NFT_EXTHDR_OP_IPV4) - return ipopt_init_raw(expr, type, offset, len, flags, true); -+ if (op == NFT_EXTHDR_OP_SCTP) -+ return sctp_chunk_init_raw(expr, type, offset, len, flags); - - expr->len = len; - expr->exthdr.flags = flags; -diff --git a/src/json.c b/src/json.c -index a6d0716..dfc9031 100644 ---- a/src/json.c -+++ b/src/json.c -@@ -692,6 +692,8 @@ json_t *exthdr_expr_json(const struct expr *expr, struct output_ctx *octx) - switch (expr->exthdr.op) { - case NFT_EXTHDR_OP_IPV4: - return json_pack("{s:o}", "ip option", root); -+ case NFT_EXTHDR_OP_SCTP: -+ return json_pack("{s:o}", "sctp chunk", root); - default: - return json_pack("{s:o}", "exthdr", root); - } -diff --git a/src/parser_bison.y b/src/parser_bison.y -index beb5995..5ab5744 100644 ---- a/src/parser_bison.y -+++ b/src/parser_bison.y -@@ -38,6 +38,7 @@ - #include - #include - #include -+#include - - #include "parser_bison.h" - -@@ -402,6 +403,40 @@ int nft_lex(void *, void *, void *); - %token DCCP "dccp" - - %token SCTP "sctp" -+%token CHUNK "chunk" -+%token DATA "data" -+%token INIT "init" -+%token INIT_ACK "init-ack" -+%token HEARTBEAT "heartbeat" -+%token HEARTBEAT_ACK "heartbeat-ack" -+%token ABORT "abort" -+%token SHUTDOWN "shutdown" -+%token SHUTDOWN_ACK "shutdown-ack" -+%token ERROR "error" -+%token COOKIE_ECHO "cookie-echo" -+%token COOKIE_ACK "cookie-ack" -+%token ECNE "ecne" -+%token CWR "cwr" -+%token SHUTDOWN_COMPLETE "shutdown-complete" -+%token ASCONF_ACK "asconf-ack" -+%token FORWARD_TSN "forward-tsn" -+%token ASCONF "asconf" -+%token TSN "tsn" -+%token STREAM "stream" -+%token SSN "ssn" -+%token PPID "ppid" -+%token INIT_TAG "init-tag" -+%token A_RWND "a-rwnd" -+%token NUM_OSTREAMS "num-outbound-streams" -+%token NUM_ISTREAMS "num-inbound-streams" -+%token INIT_TSN "initial-tsn" -+%token CUM_TSN_ACK "cum-tsn-ack" -+%token NUM_GACK_BLOCKS "num-gap-ack-blocks" -+%token NUM_DUP_TSNS "num-dup-tsns" -+%token LOWEST_TSN "lowest-tsn" -+%token SEQNO "seqno" -+%token NEW_CUM_TSN "new-cum-tsn" -+ - %token VTAG "vtag" - - %token RT "rt" -@@ -746,9 +781,12 @@ int nft_lex(void *, void *, void *); - %type udp_hdr_expr udplite_hdr_expr - %destructor { expr_free($$); } udp_hdr_expr udplite_hdr_expr - %type udp_hdr_field udplite_hdr_field --%type dccp_hdr_expr sctp_hdr_expr --%destructor { expr_free($$); } dccp_hdr_expr sctp_hdr_expr -+%type dccp_hdr_expr sctp_hdr_expr sctp_chunk_alloc -+%destructor { expr_free($$); } dccp_hdr_expr sctp_hdr_expr sctp_chunk_alloc - %type dccp_hdr_field sctp_hdr_field -+%type sctp_chunk_type sctp_chunk_common_field -+%type sctp_chunk_data_field sctp_chunk_init_field -+%type sctp_chunk_sack_field - %type th_hdr_expr - %destructor { expr_free($$); } th_hdr_expr - %type th_hdr_field -@@ -843,6 +881,7 @@ opt_newline : NEWLINE - close_scope_hash : { scanner_pop_start_cond(nft->scanner, PARSER_SC_EXPR_HASH); }; - close_scope_numgen : { scanner_pop_start_cond(nft->scanner, PARSER_SC_EXPR_NUMGEN); }; - close_scope_sctp : { scanner_pop_start_cond(nft->scanner, PARSER_SC_SCTP); }; -+close_scope_sctp_chunk : { scanner_pop_start_cond(nft->scanner, PARSER_SC_EXPR_SCTP_CHUNK); }; - - common_block : INCLUDE QUOTED_STRING stmt_separator - { -@@ -4783,10 +4822,115 @@ dccp_hdr_field : SPORT { $$ = DCCPHDR_SPORT; } - | TYPE { $$ = DCCPHDR_TYPE; } - ; - -+sctp_chunk_type : DATA { $$ = SCTP_CHUNK_TYPE_DATA; } -+ | INIT { $$ = SCTP_CHUNK_TYPE_INIT; } -+ | INIT_ACK { $$ = SCTP_CHUNK_TYPE_INIT_ACK; } -+ | SACK { $$ = SCTP_CHUNK_TYPE_SACK; } -+ | HEARTBEAT { $$ = SCTP_CHUNK_TYPE_HEARTBEAT; } -+ | HEARTBEAT_ACK { $$ = SCTP_CHUNK_TYPE_HEARTBEAT_ACK; } -+ | ABORT { $$ = SCTP_CHUNK_TYPE_ABORT; } -+ | SHUTDOWN { $$ = SCTP_CHUNK_TYPE_SHUTDOWN; } -+ | SHUTDOWN_ACK { $$ = SCTP_CHUNK_TYPE_SHUTDOWN_ACK; } -+ | ERROR { $$ = SCTP_CHUNK_TYPE_ERROR; } -+ | COOKIE_ECHO { $$ = SCTP_CHUNK_TYPE_COOKIE_ECHO; } -+ | COOKIE_ACK { $$ = SCTP_CHUNK_TYPE_COOKIE_ACK; } -+ | ECNE { $$ = SCTP_CHUNK_TYPE_ECNE; } -+ | CWR { $$ = SCTP_CHUNK_TYPE_CWR; } -+ | SHUTDOWN_COMPLETE { $$ = SCTP_CHUNK_TYPE_SHUTDOWN_COMPLETE; } -+ | ASCONF_ACK { $$ = SCTP_CHUNK_TYPE_ASCONF_ACK; } -+ | FORWARD_TSN { $$ = SCTP_CHUNK_TYPE_FORWARD_TSN; } -+ | ASCONF { $$ = SCTP_CHUNK_TYPE_ASCONF; } -+ ; -+ -+sctp_chunk_common_field : TYPE { $$ = SCTP_CHUNK_COMMON_TYPE; } -+ | FLAGS { $$ = SCTP_CHUNK_COMMON_FLAGS; } -+ | LENGTH { $$ = SCTP_CHUNK_COMMON_LENGTH; } -+ ; -+ -+sctp_chunk_data_field : TSN { $$ = SCTP_CHUNK_DATA_TSN; } -+ | STREAM { $$ = SCTP_CHUNK_DATA_STREAM; } -+ | SSN { $$ = SCTP_CHUNK_DATA_SSN; } -+ | PPID { $$ = SCTP_CHUNK_DATA_PPID; } -+ ; -+ -+sctp_chunk_init_field : INIT_TAG { $$ = SCTP_CHUNK_INIT_TAG; } -+ | A_RWND { $$ = SCTP_CHUNK_INIT_RWND; } -+ | NUM_OSTREAMS { $$ = SCTP_CHUNK_INIT_OSTREAMS; } -+ | NUM_ISTREAMS { $$ = SCTP_CHUNK_INIT_ISTREAMS; } -+ | INIT_TSN { $$ = SCTP_CHUNK_INIT_TSN; } -+ ; -+ -+sctp_chunk_sack_field : CUM_TSN_ACK { $$ = SCTP_CHUNK_SACK_CTSN_ACK; } -+ | A_RWND { $$ = SCTP_CHUNK_SACK_RWND; } -+ | NUM_GACK_BLOCKS { $$ = SCTP_CHUNK_SACK_GACK_BLOCKS; } -+ | NUM_DUP_TSNS { $$ = SCTP_CHUNK_SACK_DUP_TSNS; } -+ ; -+ -+sctp_chunk_alloc : sctp_chunk_type -+ { -+ $$ = sctp_chunk_expr_alloc(&@$, $1, SCTP_CHUNK_COMMON_TYPE); -+ $$->exthdr.flags = NFT_EXTHDR_F_PRESENT; -+ } -+ | sctp_chunk_type sctp_chunk_common_field -+ { -+ $$ = sctp_chunk_expr_alloc(&@$, $1, $2); -+ } -+ | DATA sctp_chunk_data_field -+ { -+ $$ = sctp_chunk_expr_alloc(&@$, SCTP_CHUNK_TYPE_DATA, $2); -+ } -+ | INIT sctp_chunk_init_field -+ { -+ $$ = sctp_chunk_expr_alloc(&@$, SCTP_CHUNK_TYPE_INIT, $2); -+ } -+ | INIT_ACK sctp_chunk_init_field -+ { -+ $$ = sctp_chunk_expr_alloc(&@$, SCTP_CHUNK_TYPE_INIT_ACK, $2); -+ } -+ | SACK sctp_chunk_sack_field -+ { -+ $$ = sctp_chunk_expr_alloc(&@$, SCTP_CHUNK_TYPE_SACK, $2); -+ } -+ | SHUTDOWN CUM_TSN_ACK -+ { -+ $$ = sctp_chunk_expr_alloc(&@$, SCTP_CHUNK_TYPE_SHUTDOWN, -+ SCTP_CHUNK_SHUTDOWN_CTSN_ACK); -+ } -+ | ECNE LOWEST_TSN -+ { -+ $$ = sctp_chunk_expr_alloc(&@$, SCTP_CHUNK_TYPE_ECNE, -+ SCTP_CHUNK_ECNE_CWR_MIN_TSN); -+ } -+ | CWR LOWEST_TSN -+ { -+ $$ = sctp_chunk_expr_alloc(&@$, SCTP_CHUNK_TYPE_CWR, -+ SCTP_CHUNK_ECNE_CWR_MIN_TSN); -+ } -+ | ASCONF_ACK SEQNO -+ { -+ $$ = sctp_chunk_expr_alloc(&@$, SCTP_CHUNK_TYPE_ASCONF_ACK, -+ SCTP_CHUNK_ASCONF_SEQNO); -+ } -+ | FORWARD_TSN NEW_CUM_TSN -+ { -+ $$ = sctp_chunk_expr_alloc(&@$, SCTP_CHUNK_TYPE_FORWARD_TSN, -+ SCTP_CHUNK_FORWARD_TSN_NCTSN); -+ } -+ | ASCONF SEQNO -+ { -+ $$ = sctp_chunk_expr_alloc(&@$, SCTP_CHUNK_TYPE_ASCONF, -+ SCTP_CHUNK_ASCONF_SEQNO); -+ } -+ ; -+ - sctp_hdr_expr : SCTP sctp_hdr_field close_scope_sctp - { - $$ = payload_expr_alloc(&@$, &proto_sctp, $2); - } -+ | SCTP CHUNK sctp_chunk_alloc close_scope_sctp_chunk close_scope_sctp -+ { -+ $$ = $3; -+ } - ; - - sctp_hdr_field : SPORT { $$ = SCTPHDR_SPORT; } -diff --git a/src/parser_json.c b/src/parser_json.c -index fbf7db5..a069a89 100644 ---- a/src/parser_json.c -+++ b/src/parser_json.c -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include - - #include -@@ -705,6 +706,53 @@ static struct expr *json_parse_ip_option_expr(struct json_ctx *ctx, - return ipopt_expr_alloc(int_loc, descval, fieldval, 0); - } - -+static int json_parse_sctp_chunk_field(const struct exthdr_desc *desc, -+ const char *name, int *val) -+{ -+ unsigned int i; -+ -+ for (i = 0; i < array_size(desc->templates); i++) { -+ if (desc->templates[i].token && -+ !strcmp(desc->templates[i].token, name)) { -+ if (val) -+ *val = i; -+ return 0; -+ } -+ } -+ return 1; -+} -+ -+static struct expr *json_parse_sctp_chunk_expr(struct json_ctx *ctx, -+ const char *type, json_t *root) -+{ -+ const struct exthdr_desc *desc; -+ const char *name, *field; -+ struct expr *expr; -+ int fieldval; -+ -+ if (json_unpack_err(ctx, root, "{s:s}", "name", &name)) -+ return NULL; -+ -+ desc = sctp_chunk_protocol_find(name); -+ if (!desc) { -+ json_error(ctx, "Unknown sctp chunk name '%s'.", name); -+ return NULL; -+ } -+ -+ if (json_unpack(root, "{s:s}", "field", &field)) { -+ expr = sctp_chunk_expr_alloc(int_loc, desc->type, -+ SCTP_CHUNK_COMMON_TYPE); -+ expr->exthdr.flags = NFT_EXTHDR_F_PRESENT; -+ -+ return expr; -+ } -+ if (json_parse_sctp_chunk_field(desc, field, &fieldval)) { -+ json_error(ctx, "Unknown sctp chunk field '%s'.", field); -+ return NULL; -+ } -+ return sctp_chunk_expr_alloc(int_loc, desc->type, fieldval); -+} -+ - static const struct exthdr_desc *exthdr_lookup_byname(const char *name) - { - const struct exthdr_desc *exthdr_tbl[] = { -@@ -1410,6 +1458,7 @@ static struct expr *json_parse_expr(struct json_ctx *ctx, json_t *root) - { "exthdr", json_parse_exthdr_expr, CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, - { "tcp option", json_parse_tcp_option_expr, CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_MANGLE | CTX_F_SES | CTX_F_CONCAT }, - { "ip option", json_parse_ip_option_expr, CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_MANGLE | CTX_F_SES | CTX_F_CONCAT }, -+ { "sctp chunk", json_parse_sctp_chunk_expr, CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_MANGLE | CTX_F_SES | CTX_F_CONCAT }, - { "meta", json_parse_meta_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_SET_RHS | CTX_F_MANGLE | CTX_F_SES | CTX_F_MAP | CTX_F_CONCAT }, - { "osf", json_parse_osf_expr, CTX_F_STMT | CTX_F_PRIMARY | CTX_F_MAP | CTX_F_CONCAT }, - { "ipsec", json_parse_xfrm_expr, CTX_F_PRIMARY | CTX_F_MAP | CTX_F_CONCAT }, -diff --git a/src/scanner.l b/src/scanner.l -index b79ae55..fe1b8ad 100644 ---- a/src/scanner.l -+++ b/src/scanner.l -@@ -199,6 +199,7 @@ addrstring ({macaddr}|{ip4addr}|{ip6addr}) - %s SCANSTATE_SCTP - %s SCANSTATE_EXPR_HASH - %s SCANSTATE_EXPR_NUMGEN -+%s SCANSTATE_EXPR_SCTP_CHUNK - - %% - -@@ -492,9 +493,46 @@ addrstring ({macaddr}|{ip4addr}|{ip6addr}) - "sctp" { scanner_push_start_cond(yyscanner, SCANSTATE_SCTP); return SCTP; } - - { -+ "chunk" { scanner_push_start_cond(yyscanner, SCANSTATE_EXPR_SCTP_CHUNK); return CHUNK; } - "vtag" { return VTAG; } - } - -+{ -+ "data" { return DATA; } -+ "init" { return INIT; } -+ "init-ack" { return INIT_ACK; } -+ "heartbeat" { return HEARTBEAT; } -+ "heartbeat-ack" { return HEARTBEAT_ACK; } -+ "abort" { return ABORT; } -+ "shutdown" { return SHUTDOWN; } -+ "shutdown-ack" { return SHUTDOWN_ACK; } -+ "error" { return ERROR; } -+ "cookie-echo" { return COOKIE_ECHO; } -+ "cookie-ack" { return COOKIE_ACK; } -+ "ecne" { return ECNE; } -+ "cwr" { return CWR; } -+ "shutdown-complete" { return SHUTDOWN_COMPLETE; } -+ "asconf-ack" { return ASCONF_ACK; } -+ "forward-tsn" { return FORWARD_TSN; } -+ "asconf" { return ASCONF; } -+ -+ "tsn" { return TSN; } -+ "stream" { return STREAM; } -+ "ssn" { return SSN; } -+ "ppid" { return PPID; } -+ "init-tag" { return INIT_TAG; } -+ "a-rwnd" { return A_RWND; } -+ "num-outbound-streams" { return NUM_OSTREAMS; } -+ "num-inbound-streams" { return NUM_ISTREAMS; } -+ "initial-tsn" { return INIT_TSN; } -+ "cum-tsn-ack" { return CUM_TSN_ACK; } -+ "num-gap-ack-blocks" { return NUM_GACK_BLOCKS; } -+ "num-dup-tsns" { return NUM_DUP_TSNS; } -+ "lowest-tsn" { return LOWEST_TSN; } -+ "seqno" { return SEQNO; } -+ "new-cum-tsn" { return NEW_CUM_TSN; } -+} -+ - "rt" { return RT; } - "rt0" { return RT0; } - "rt2" { return RT2; } -diff --git a/src/sctp_chunk.c b/src/sctp_chunk.c -new file mode 100644 -index 0000000..6e73e72 ---- /dev/null -+++ b/src/sctp_chunk.c -@@ -0,0 +1,261 @@ -+/* -+ * Copyright Red Hat -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 (or any -+ * later) as published by the Free Software Foundation. -+ */ -+ -+#include -+#include -+ -+#include -+ -+#define PHT(__token, __offset, __len) \ -+ PROTO_HDR_TEMPLATE(__token, &integer_type, BYTEORDER_BIG_ENDIAN, \ -+ __offset, __len) -+ -+static const struct exthdr_desc sctp_chunk_data = { -+ .name = "data", -+ .type = SCTP_CHUNK_TYPE_DATA, -+ .templates = { -+ [SCTP_CHUNK_COMMON_TYPE] = PHT("type", 0, 8), -+ [SCTP_CHUNK_COMMON_FLAGS] = PHT("flags", 8, 8), -+ [SCTP_CHUNK_COMMON_LENGTH] = PHT("length", 16, 16), -+ [SCTP_CHUNK_DATA_TSN] = PHT("tsn", 32, 32), -+ [SCTP_CHUNK_DATA_STREAM] = PHT("stream", 64, 16), -+ [SCTP_CHUNK_DATA_SSN] = PHT("ssn", 80, 16), -+ [SCTP_CHUNK_DATA_PPID] = PHT("ppid", 96, 32), -+ }, -+}; -+ -+static const struct exthdr_desc sctp_chunk_init = { -+ .name = "init", -+ .type = SCTP_CHUNK_TYPE_INIT, -+ .templates = { -+ [SCTP_CHUNK_COMMON_TYPE] = PHT("type", 0, 8), -+ [SCTP_CHUNK_COMMON_FLAGS] = PHT("flags", 8, 8), -+ [SCTP_CHUNK_COMMON_LENGTH] = PHT("length", 16, 16), -+ [SCTP_CHUNK_INIT_TAG] = PHT("init-tag", 32, 32), -+ [SCTP_CHUNK_INIT_RWND] = PHT("a-rwnd", 64, 32), -+ [SCTP_CHUNK_INIT_OSTREAMS] = PHT("num-outbound-streams", 96, 16), -+ [SCTP_CHUNK_INIT_ISTREAMS] = PHT("num-inbound-streams", 112, 16), -+ [SCTP_CHUNK_INIT_TSN] = PHT("initial-tsn", 128, 32), -+ }, -+}; -+ -+static const struct exthdr_desc sctp_chunk_init_ack = { -+ .name = "init-ack", -+ .type = SCTP_CHUNK_TYPE_INIT_ACK, -+ .templates = { -+ [SCTP_CHUNK_COMMON_TYPE] = PHT("type", 0, 8), -+ [SCTP_CHUNK_COMMON_FLAGS] = PHT("flags", 8, 8), -+ [SCTP_CHUNK_COMMON_LENGTH] = PHT("length", 16, 16), -+ [SCTP_CHUNK_INIT_TAG] = PHT("init-tag", 32, 32), -+ [SCTP_CHUNK_INIT_RWND] = PHT("a-rwnd", 64, 32), -+ [SCTP_CHUNK_INIT_OSTREAMS] = PHT("num-outbound-streams", 96, 16), -+ [SCTP_CHUNK_INIT_ISTREAMS] = PHT("num-inbound-streams", 112, 16), -+ [SCTP_CHUNK_INIT_TSN] = PHT("initial-tsn", 128, 32), -+ }, -+}; -+ -+static const struct exthdr_desc sctp_chunk_sack = { -+ .name = "sack", -+ .type = SCTP_CHUNK_TYPE_SACK, -+ .templates = { -+ [SCTP_CHUNK_COMMON_TYPE] = PHT("type", 0, 8), -+ [SCTP_CHUNK_COMMON_FLAGS] = PHT("flags", 8, 8), -+ [SCTP_CHUNK_COMMON_LENGTH] = PHT("length", 16, 16), -+ [SCTP_CHUNK_SACK_CTSN_ACK] = PHT("cum-tsn-ack", 32, 32), -+ [SCTP_CHUNK_SACK_RWND] = PHT("a-rwnd", 64, 32), -+ [SCTP_CHUNK_SACK_GACK_BLOCKS] = PHT("num-gap-ack-blocks", 96, 16), -+ [SCTP_CHUNK_SACK_DUP_TSNS] = PHT("num-dup-tsns", 112, 16), -+ }, -+}; -+ -+static const struct exthdr_desc sctp_chunk_shutdown = { -+ .name = "shutdown", -+ .type = SCTP_CHUNK_TYPE_SHUTDOWN, -+ .templates = { -+ [SCTP_CHUNK_COMMON_TYPE] = PHT("type", 0, 8), -+ [SCTP_CHUNK_COMMON_FLAGS] = PHT("flags", 8, 8), -+ [SCTP_CHUNK_COMMON_LENGTH] = PHT("length", 16, 16), -+ [SCTP_CHUNK_SHUTDOWN_CTSN_ACK] = PHT("cum-tsn-ack", 32, 32), -+ }, -+}; -+ -+static const struct exthdr_desc sctp_chunk_ecne = { -+ .name = "ecne", -+ .type = SCTP_CHUNK_TYPE_ECNE, -+ .templates = { -+ [SCTP_CHUNK_COMMON_TYPE] = PHT("type", 0, 8), -+ [SCTP_CHUNK_COMMON_FLAGS] = PHT("flags", 8, 8), -+ [SCTP_CHUNK_COMMON_LENGTH] = PHT("length", 16, 16), -+ [SCTP_CHUNK_ECNE_CWR_MIN_TSN] = PHT("lowest-tsn", 32, 32), -+ }, -+}; -+ -+static const struct exthdr_desc sctp_chunk_cwr = { -+ .name = "cwr", -+ .type = SCTP_CHUNK_TYPE_CWR, -+ .templates = { -+ [SCTP_CHUNK_COMMON_TYPE] = PHT("type", 0, 8), -+ [SCTP_CHUNK_COMMON_FLAGS] = PHT("flags", 8, 8), -+ [SCTP_CHUNK_COMMON_LENGTH] = PHT("length", 16, 16), -+ [SCTP_CHUNK_ECNE_CWR_MIN_TSN] = PHT("lowest-tsn", 32, 32), -+ }, -+}; -+ -+static const struct exthdr_desc sctp_chunk_asconf_ack = { -+ .name = "asconf-ack", -+ .type = SCTP_CHUNK_TYPE_ASCONF_ACK, -+ .templates = { -+ [SCTP_CHUNK_COMMON_TYPE] = PHT("type", 0, 8), -+ [SCTP_CHUNK_COMMON_FLAGS] = PHT("flags", 8, 8), -+ [SCTP_CHUNK_COMMON_LENGTH] = PHT("length", 16, 16), -+ [SCTP_CHUNK_ASCONF_SEQNO] = PHT("seqno", 32, 32), -+ }, -+}; -+ -+static const struct exthdr_desc sctp_chunk_forward_tsn = { -+ .name = "forward-tsn", -+ .type = SCTP_CHUNK_TYPE_FORWARD_TSN, -+ .templates = { -+ [SCTP_CHUNK_COMMON_TYPE] = PHT("type", 0, 8), -+ [SCTP_CHUNK_COMMON_FLAGS] = PHT("flags", 8, 8), -+ [SCTP_CHUNK_COMMON_LENGTH] = PHT("length", 16, 16), -+ [SCTP_CHUNK_FORWARD_TSN_NCTSN] = PHT("new-cum-tsn", 32, 32), -+ }, -+}; -+ -+static const struct exthdr_desc sctp_chunk_asconf = { -+ .name = "asconf", -+ .type = SCTP_CHUNK_TYPE_ASCONF, -+ .templates = { -+ [SCTP_CHUNK_COMMON_TYPE] = PHT("type", 0, 8), -+ [SCTP_CHUNK_COMMON_FLAGS] = PHT("flags", 8, 8), -+ [SCTP_CHUNK_COMMON_LENGTH] = PHT("length", 16, 16), -+ [SCTP_CHUNK_ASCONF_SEQNO] = PHT("seqno", 32, 32), -+ }, -+}; -+ -+#define SCTP_CHUNK_DESC_GENERATOR(descname, hname, desctype) \ -+static const struct exthdr_desc sctp_chunk_##descname = { \ -+ .name = #hname, \ -+ .type = SCTP_CHUNK_TYPE_##desctype, \ -+ .templates = { \ -+ [SCTP_CHUNK_COMMON_TYPE] = PHT("type", 0, 8), \ -+ [SCTP_CHUNK_COMMON_FLAGS] = PHT("flags", 8, 8), \ -+ [SCTP_CHUNK_COMMON_LENGTH] = PHT("length", 16, 16),\ -+ }, \ -+}; -+ -+SCTP_CHUNK_DESC_GENERATOR(heartbeat, heartbeat, HEARTBEAT) -+SCTP_CHUNK_DESC_GENERATOR(heartbeat_ack, heartbeat-ack, HEARTBEAT_ACK) -+SCTP_CHUNK_DESC_GENERATOR(abort, abort, ABORT) -+SCTP_CHUNK_DESC_GENERATOR(shutdown_ack, shutdown-ack, SHUTDOWN_ACK) -+SCTP_CHUNK_DESC_GENERATOR(error, error, ERROR) -+SCTP_CHUNK_DESC_GENERATOR(cookie_echo, cookie-echo, COOKIE_ECHO) -+SCTP_CHUNK_DESC_GENERATOR(cookie_ack, cookie-ack, COOKIE_ACK) -+SCTP_CHUNK_DESC_GENERATOR(shutdown_complete, shutdown-complete, SHUTDOWN_COMPLETE) -+ -+#undef SCTP_CHUNK_DESC_GENERATOR -+ -+static const struct exthdr_desc *sctp_chunk_protocols[] = { -+ [SCTP_CHUNK_TYPE_DATA] = &sctp_chunk_data, -+ [SCTP_CHUNK_TYPE_INIT] = &sctp_chunk_init, -+ [SCTP_CHUNK_TYPE_INIT_ACK] = &sctp_chunk_init_ack, -+ [SCTP_CHUNK_TYPE_SACK] = &sctp_chunk_sack, -+ [SCTP_CHUNK_TYPE_HEARTBEAT] = &sctp_chunk_heartbeat, -+ [SCTP_CHUNK_TYPE_HEARTBEAT_ACK] = &sctp_chunk_heartbeat_ack, -+ [SCTP_CHUNK_TYPE_ABORT] = &sctp_chunk_abort, -+ [SCTP_CHUNK_TYPE_SHUTDOWN] = &sctp_chunk_shutdown, -+ [SCTP_CHUNK_TYPE_SHUTDOWN_ACK] = &sctp_chunk_shutdown_ack, -+ [SCTP_CHUNK_TYPE_ERROR] = &sctp_chunk_error, -+ [SCTP_CHUNK_TYPE_COOKIE_ECHO] = &sctp_chunk_cookie_echo, -+ [SCTP_CHUNK_TYPE_COOKIE_ACK] = &sctp_chunk_cookie_ack, -+ [SCTP_CHUNK_TYPE_ECNE] = &sctp_chunk_ecne, -+ [SCTP_CHUNK_TYPE_CWR] = &sctp_chunk_cwr, -+ [SCTP_CHUNK_TYPE_SHUTDOWN_COMPLETE] = &sctp_chunk_shutdown_complete, -+ [SCTP_CHUNK_TYPE_ASCONF_ACK] = &sctp_chunk_asconf_ack, -+ [SCTP_CHUNK_TYPE_FORWARD_TSN] = &sctp_chunk_forward_tsn, -+ [SCTP_CHUNK_TYPE_ASCONF] = &sctp_chunk_asconf, -+}; -+ -+const struct exthdr_desc *sctp_chunk_protocol_find(const char *name) -+{ -+ unsigned int i; -+ -+ for (i = 0; i < array_size(sctp_chunk_protocols); i++) { -+ if (sctp_chunk_protocols[i] && -+ !strcmp(sctp_chunk_protocols[i]->name, name)) -+ return sctp_chunk_protocols[i]; -+ } -+ return NULL; -+} -+ -+struct expr *sctp_chunk_expr_alloc(const struct location *loc, -+ unsigned int type, unsigned int field) -+{ -+ const struct proto_hdr_template *tmpl; -+ const struct exthdr_desc *desc = NULL; -+ struct expr *expr; -+ -+ if (type < array_size(sctp_chunk_protocols)) -+ desc = sctp_chunk_protocols[type]; -+ -+ if (!desc) -+ return NULL; -+ -+ tmpl = &desc->templates[field]; -+ if (!tmpl) -+ return NULL; -+ -+ expr = expr_alloc(loc, EXPR_EXTHDR, tmpl->dtype, -+ BYTEORDER_BIG_ENDIAN, tmpl->len); -+ expr->exthdr.desc = desc; -+ expr->exthdr.tmpl = tmpl; -+ expr->exthdr.op = NFT_EXTHDR_OP_SCTP; -+ expr->exthdr.raw_type = desc->type; -+ expr->exthdr.offset = tmpl->offset; -+ -+ return expr; -+} -+ -+void sctp_chunk_init_raw(struct expr *expr, uint8_t type, unsigned int off, -+ unsigned int len, uint32_t flags) -+{ -+ const struct proto_hdr_template *tmpl; -+ unsigned int i; -+ -+ assert(expr->etype == EXPR_EXTHDR); -+ -+ expr->len = len; -+ expr->exthdr.flags = flags; -+ expr->exthdr.offset = off; -+ expr->exthdr.op = NFT_EXTHDR_OP_SCTP; -+ -+ if (flags & NFT_EXTHDR_F_PRESENT) -+ datatype_set(expr, &boolean_type); -+ else -+ datatype_set(expr, &integer_type); -+ -+ if (type >= array_size(sctp_chunk_protocols)) -+ return; -+ -+ expr->exthdr.desc = sctp_chunk_protocols[type]; -+ expr->exthdr.flags = flags; -+ assert(expr->exthdr.desc != NULL); -+ -+ for (i = 0; i < array_size(expr->exthdr.desc->templates); ++i) { -+ tmpl = &expr->exthdr.desc->templates[i]; -+ if (tmpl->offset != off || tmpl->len != len) -+ continue; -+ -+ if ((flags & NFT_EXTHDR_F_PRESENT) == 0) -+ datatype_set(expr, tmpl->dtype); -+ -+ expr->exthdr.tmpl = tmpl; -+ break; -+ } -+} -diff --git a/tests/py/inet/sctp.t b/tests/py/inet/sctp.t -index 5188b57..3d1c2fd 100644 ---- a/tests/py/inet/sctp.t -+++ b/tests/py/inet/sctp.t -@@ -41,3 +41,40 @@ sctp vtag {33, 55, 67, 88};ok - sctp vtag != {33, 55, 67, 88};ok - sctp vtag { 33-55};ok - sctp vtag != { 33-55};ok -+ -+# assert all chunk types are recognized -+sctp chunk data exists;ok -+sctp chunk init exists;ok -+sctp chunk init-ack exists;ok -+sctp chunk sack exists;ok -+sctp chunk heartbeat exists;ok -+sctp chunk heartbeat-ack exists;ok -+sctp chunk abort exists;ok -+sctp chunk shutdown exists;ok -+sctp chunk shutdown-ack exists;ok -+sctp chunk error exists;ok -+sctp chunk cookie-echo exists;ok -+sctp chunk cookie-ack exists;ok -+sctp chunk ecne exists;ok -+sctp chunk cwr exists;ok -+sctp chunk shutdown-complete exists;ok -+sctp chunk asconf-ack exists;ok -+sctp chunk forward-tsn exists;ok -+sctp chunk asconf exists;ok -+ -+# test common header fields in random chunk types -+sctp chunk data type 0;ok -+sctp chunk init flags 23;ok -+sctp chunk init-ack length 42;ok -+ -+# test one custom field in every applicable chunk type -+sctp chunk data stream 1337;ok -+sctp chunk init initial-tsn 5;ok -+sctp chunk init-ack num-outbound-streams 3;ok -+sctp chunk sack a-rwnd 1;ok -+sctp chunk shutdown cum-tsn-ack 65535;ok -+sctp chunk ecne lowest-tsn 5;ok -+sctp chunk cwr lowest-tsn 8;ok -+sctp chunk asconf-ack seqno 12345;ok -+sctp chunk forward-tsn new-cum-tsn 31337;ok -+sctp chunk asconf seqno 12345;ok -diff --git a/tests/py/inet/sctp.t.json b/tests/py/inet/sctp.t.json -index 2684b03..8135686 100644 ---- a/tests/py/inet/sctp.t.json -+++ b/tests/py/inet/sctp.t.json -@@ -608,3 +608,481 @@ - } - ] - -+# sctp chunk data exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "data" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk init exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "init" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk init-ack exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "init-ack" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk sack exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "sack" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk heartbeat exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "heartbeat" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk heartbeat-ack exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "heartbeat-ack" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk abort exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "abort" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk shutdown exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "shutdown" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk shutdown-ack exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "shutdown-ack" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk error exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "error" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk cookie-echo exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "cookie-echo" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk cookie-ack exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "cookie-ack" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk ecne exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "ecne" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk cwr exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "cwr" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk shutdown-complete exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "shutdown-complete" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk asconf-ack exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "asconf-ack" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk forward-tsn exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "forward-tsn" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk asconf exists -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "name": "asconf" -+ } -+ }, -+ "op": "==", -+ "right": true -+ } -+ } -+] -+ -+# sctp chunk data type 0 -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "field": "type", -+ "name": "data" -+ } -+ }, -+ "op": "==", -+ "right": 0 -+ } -+ } -+] -+ -+# sctp chunk init flags 23 -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "field": "flags", -+ "name": "init" -+ } -+ }, -+ "op": "==", -+ "right": 23 -+ } -+ } -+] -+ -+# sctp chunk init-ack length 42 -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "field": "length", -+ "name": "init-ack" -+ } -+ }, -+ "op": "==", -+ "right": 42 -+ } -+ } -+] -+ -+# sctp chunk data stream 1337 -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "field": "stream", -+ "name": "data" -+ } -+ }, -+ "op": "==", -+ "right": 1337 -+ } -+ } -+] -+ -+# sctp chunk init initial-tsn 5 -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "field": "initial-tsn", -+ "name": "init" -+ } -+ }, -+ "op": "==", -+ "right": 5 -+ } -+ } -+] -+ -+# sctp chunk init-ack num-outbound-streams 3 -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "field": "num-outbound-streams", -+ "name": "init-ack" -+ } -+ }, -+ "op": "==", -+ "right": 3 -+ } -+ } -+] -+ -+# sctp chunk sack a-rwnd 1 -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "field": "a-rwnd", -+ "name": "sack" -+ } -+ }, -+ "op": "==", -+ "right": 1 -+ } -+ } -+] -+ -+# sctp chunk shutdown cum-tsn-ack 65535 -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "field": "cum-tsn-ack", -+ "name": "shutdown" -+ } -+ }, -+ "op": "==", -+ "right": 65535 -+ } -+ } -+] -+ -+# sctp chunk ecne lowest-tsn 5 -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "field": "lowest-tsn", -+ "name": "ecne" -+ } -+ }, -+ "op": "==", -+ "right": 5 -+ } -+ } -+] -+ -+# sctp chunk cwr lowest-tsn 8 -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "field": "lowest-tsn", -+ "name": "cwr" -+ } -+ }, -+ "op": "==", -+ "right": 8 -+ } -+ } -+] -+ -+# sctp chunk asconf-ack seqno 12345 -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "field": "seqno", -+ "name": "asconf-ack" -+ } -+ }, -+ "op": "==", -+ "right": 12345 -+ } -+ } -+] -+ -+# sctp chunk forward-tsn new-cum-tsn 31337 -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "field": "new-cum-tsn", -+ "name": "forward-tsn" -+ } -+ }, -+ "op": "==", -+ "right": 31337 -+ } -+ } -+] -+ -+# sctp chunk asconf seqno 12345 -+[ -+ { -+ "match": { -+ "left": { -+ "sctp chunk": { -+ "field": "seqno", -+ "name": "asconf" -+ } -+ }, -+ "op": "==", -+ "right": 12345 -+ } -+ } -+] -+ -diff --git a/tests/py/inet/sctp.t.payload b/tests/py/inet/sctp.t.payload -index ecfcc72..9c4854c 100644 ---- a/tests/py/inet/sctp.t.payload -+++ b/tests/py/inet/sctp.t.payload -@@ -274,3 +274,158 @@ inet test-inet input - [ payload load 4b @ transport header + 4 => reg 1 ] - [ lookup reg 1 set __set%d 0x1 ] - -+# sctp chunk data exists -+ip -+ [ exthdr load 1b @ 0 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk init exists -+ip -+ [ exthdr load 1b @ 1 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk init-ack exists -+ip -+ [ exthdr load 1b @ 2 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk sack exists -+ip -+ [ exthdr load 1b @ 3 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk heartbeat exists -+ip -+ [ exthdr load 1b @ 4 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk heartbeat-ack exists -+ip -+ [ exthdr load 1b @ 5 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk abort exists -+ip -+ [ exthdr load 1b @ 6 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk shutdown exists -+ip -+ [ exthdr load 1b @ 7 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk shutdown-ack exists -+ip -+ [ exthdr load 1b @ 8 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk error exists -+ip -+ [ exthdr load 1b @ 9 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk cookie-echo exists -+ip -+ [ exthdr load 1b @ 10 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk cookie-ack exists -+ip -+ [ exthdr load 1b @ 11 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk ecne exists -+ip -+ [ exthdr load 1b @ 12 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk cwr exists -+ip -+ [ exthdr load 1b @ 13 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk shutdown-complete exists -+ip -+ [ exthdr load 1b @ 14 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk asconf-ack exists -+ip -+ [ exthdr load 1b @ 128 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk forward-tsn exists -+ip -+ [ exthdr load 1b @ 192 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk asconf exists -+ip -+ [ exthdr load 1b @ 193 + 0 present => reg 1 ] -+ [ cmp eq reg 1 0x00000001 ] -+ -+# sctp chunk data type 0 -+ip -+ [ exthdr load 1b @ 0 + 0 => reg 1 ] -+ [ cmp eq reg 1 0x00000000 ] -+ -+# sctp chunk init flags 23 -+ip -+ [ exthdr load 1b @ 1 + 1 => reg 1 ] -+ [ cmp eq reg 1 0x00000017 ] -+ -+# sctp chunk init-ack length 42 -+ip -+ [ exthdr load 2b @ 2 + 2 => reg 1 ] -+ [ cmp eq reg 1 0x00002a00 ] -+ -+# sctp chunk data stream 1337 -+ip -+ [ exthdr load 2b @ 0 + 8 => reg 1 ] -+ [ cmp eq reg 1 0x00003905 ] -+ -+# sctp chunk init initial-tsn 5 -+ip -+ [ exthdr load 4b @ 1 + 16 => reg 1 ] -+ [ cmp eq reg 1 0x05000000 ] -+ -+# sctp chunk init-ack num-outbound-streams 3 -+ip -+ [ exthdr load 2b @ 2 + 12 => reg 1 ] -+ [ cmp eq reg 1 0x00000300 ] -+ -+# sctp chunk sack a-rwnd 1 -+ip -+ [ exthdr load 4b @ 3 + 8 => reg 1 ] -+ [ cmp eq reg 1 0x01000000 ] -+ -+# sctp chunk shutdown cum-tsn-ack 65535 -+ip -+ [ exthdr load 4b @ 7 + 4 => reg 1 ] -+ [ cmp eq reg 1 0xffff0000 ] -+ -+# sctp chunk ecne lowest-tsn 5 -+ip -+ [ exthdr load 4b @ 12 + 4 => reg 1 ] -+ [ cmp eq reg 1 0x05000000 ] -+ -+# sctp chunk cwr lowest-tsn 8 -+ip -+ [ exthdr load 4b @ 13 + 4 => reg 1 ] -+ [ cmp eq reg 1 0x08000000 ] -+ -+# sctp chunk asconf-ack seqno 12345 -+ip -+ [ exthdr load 4b @ 128 + 4 => reg 1 ] -+ [ cmp eq reg 1 0x39300000 ] -+ -+# sctp chunk forward-tsn new-cum-tsn 31337 -+ip -+ [ exthdr load 4b @ 192 + 4 => reg 1 ] -+ [ cmp eq reg 1 0x697a0000 ] -+ -+# sctp chunk asconf seqno 12345 -+ip -+ [ exthdr load 4b @ 193 + 4 => reg 1 ] -+ [ cmp eq reg 1 0x39300000 ] -+ --- -2.31.1 - diff --git a/SOURCES/0060-include-missing-sctp_chunk.h-in-Makefile.am.patch b/SOURCES/0060-include-missing-sctp_chunk.h-in-Makefile.am.patch deleted file mode 100644 index 9bafe70..0000000 --- a/SOURCES/0060-include-missing-sctp_chunk.h-in-Makefile.am.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 7ba8ea2cf06230e647b096f40d3006abec45f801 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 12 Jul 2021 16:33:20 +0200 -Subject: [PATCH] include: missing sctp_chunk.h in Makefile.am - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1979334 -Upstream Status: nftables commit 117ceb4f52711 - -commit 117ceb4f527119a6d44bf5e23f2ff7a8d116658a -Author: Pablo Neira Ayuso -Date: Tue May 25 14:04:36 2021 +0200 - - include: missing sctp_chunk.h in Makefile.am - - Fix make distcheck. - - Fixes: 0e3871cfd9a1 ("exthdr: Implement SCTP Chunk matching") - Signed-off-by: Pablo Neira Ayuso ---- - include/Makefile.am | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/include/Makefile.am b/include/Makefile.am -index 04a4a61..4ee5124 100644 ---- a/include/Makefile.am -+++ b/include/Makefile.am -@@ -30,6 +30,7 @@ noinst_HEADERS = cli.h \ - osf.h \ - parser.h \ - proto.h \ -+ sctp_chunk.h \ - socket.h \ - rule.h \ - rt.h \ --- -2.31.1 - diff --git a/SOURCES/0061-doc-nft.8-Extend-monitor-description-by-trace.patch b/SOURCES/0061-doc-nft.8-Extend-monitor-description-by-trace.patch deleted file mode 100644 index 1d4174d..0000000 --- a/SOURCES/0061-doc-nft.8-Extend-monitor-description-by-trace.patch +++ /dev/null @@ -1,71 +0,0 @@ -From 5a735f26b0c6617b2851a7399c8ad118e89deba8 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 12 Jul 2021 16:34:38 +0200 -Subject: [PATCH] doc: nft.8: Extend monitor description by trace - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1820365 -Upstream Status: nftables commit 2acf8b2caea19 - -commit 2acf8b2caea19d8abd46d475a908f8d6afb33aa0 -Author: Phil Sutter -Date: Wed May 19 13:12:48 2021 +0200 - - doc: nft.8: Extend monitor description by trace - - Briefly describe 'nft monitor trace' command functionality. - - Signed-off-by: Phil Sutter ---- - doc/nft.txt | 25 ++++++++++++++++++++++--- - 1 file changed, 22 insertions(+), 3 deletions(-) - -diff --git a/doc/nft.txt b/doc/nft.txt -index abb9260..9cc35ee 100644 ---- a/doc/nft.txt -+++ b/doc/nft.txt -@@ -734,13 +734,26 @@ These are some additional commands included in nft. - MONITOR - ~~~~~~~~ - The monitor command allows you to listen to Netlink events produced by the --nf_tables subsystem, related to creation and deletion of objects. When they -+nf_tables subsystem. These are either related to creation and deletion of -+objects or to packets for which *meta nftrace* was enabled. When they - occur, nft will print to stdout the monitored events in either JSON or - native nft format. + - --To filter events related to a concrete object, use one of the keywords 'tables', 'chains', 'sets', 'rules', 'elements', 'ruleset'. + -+[verse] -+____ -+*monitor* [*new* | *destroy*] 'MONITOR_OBJECT' -+*monitor* *trace* -+ -+'MONITOR_OBJECT' := *tables* | *chains* | *sets* | *rules* | *elements* | *ruleset* -+____ -+ -+To filter events related to a concrete object, use one of the keywords in -+'MONITOR_OBJECT'. - --To filter events related to a concrete action, use keyword 'new' or 'destroy'. -+To filter events related to a concrete action, use keyword *new* or *destroy*. -+ -+The second form of invocation takes no further options and exclusively prints -+events generated for packets with *nftrace* enabled. - - Hit ^C to finish the monitor operation. - -@@ -764,6 +777,12 @@ Hit ^C to finish the monitor operation. - % nft monitor ruleset - --------------------- - -+.Trace incoming packets from host 10.0.0.1 -+------------------------------------------ -+% nft add rule filter input ip saddr 10.0.0.1 meta nftrace set 1 -+% nft monitor trace -+------------------------------------------ -+ - ERROR REPORTING - --------------- - When an error is detected, nft shows the line(s) containing the error, the --- -2.31.1 - diff --git a/SOURCES/0062-tests-shell-Fix-bogus-testsuite-failure-with-100Hz.patch b/SOURCES/0062-tests-shell-Fix-bogus-testsuite-failure-with-100Hz.patch deleted file mode 100644 index 0c6f24b..0000000 --- a/SOURCES/0062-tests-shell-Fix-bogus-testsuite-failure-with-100Hz.patch +++ /dev/null @@ -1,44 +0,0 @@ -From e8300966510001e38f2b6530607bac2a93de5c2e Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Mon, 2 Aug 2021 14:35:08 +0200 -Subject: [PATCH] tests: shell: Fix bogus testsuite failure with 100Hz - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1919203 -Upstream Status: nftables commit c9c5b5f621c37 - -commit c9c5b5f621c37d17140dac682d211825ef321093 -Author: Phil Sutter -Date: Mon Jul 26 15:27:32 2021 +0200 - - tests: shell: Fix bogus testsuite failure with 100Hz - - On kernels with CONFIG_HZ=100, clock granularity does not allow tracking - timeouts in single digit ms range. Change sets/0031set_timeout_size_0 to - not expose this detail. - - Signed-off-by: Phil Sutter - Acked-by: Florian Westphal ---- - tests/shell/testcases/sets/0031set_timeout_size_0 | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/tests/shell/testcases/sets/0031set_timeout_size_0 b/tests/shell/testcases/sets/0031set_timeout_size_0 -index 9edd5f6..796640d 100755 ---- a/tests/shell/testcases/sets/0031set_timeout_size_0 -+++ b/tests/shell/testcases/sets/0031set_timeout_size_0 -@@ -3,10 +3,10 @@ - RULESET="add table x - add set x y { type ipv4_addr; size 128; timeout 30s; flags dynamic; } - add chain x test --add rule x test set update ip saddr timeout 1d2h3m4s8ms @y -+add rule x test set update ip saddr timeout 1d2h3m4s10ms @y - add rule x test set update ip daddr timeout 100ms @y" - - set -e - $NFT -f - <<< "$RULESET" --$NFT list chain x test | grep -q 'update @y { ip saddr timeout 1d2h3m4s8ms }' -+$NFT list chain x test | grep -q 'update @y { ip saddr timeout 1d2h3m4s10ms }' - $NFT list chain x test | grep -q 'update @y { ip daddr timeout 100ms }' --- -2.31.1 - diff --git a/SOURCES/0063-parser_json-Fix-error-reporting-for-invalid-syntax.patch b/SOURCES/0063-parser_json-Fix-error-reporting-for-invalid-syntax.patch deleted file mode 100644 index 32f88c4..0000000 --- a/SOURCES/0063-parser_json-Fix-error-reporting-for-invalid-syntax.patch +++ /dev/null @@ -1,100 +0,0 @@ -From 8cb078a2f9f69259325c10f479c198349ef01ef2 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 6 Oct 2021 17:24:44 +0200 -Subject: [PATCH] parser_json: Fix error reporting for invalid syntax - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1994141 -Upstream Status: nftables commit 9fe5d1bc18cfa - -commit 9fe5d1bc18cfaed2ecf717e3dd9a97ff5b0e183c -Author: Phil Sutter -Date: Wed Sep 1 16:41:44 2021 +0200 - - parser_json: Fix error reporting for invalid syntax - - Errors emitted by the JSON parser caused BUG() in erec_print() due to - input descriptor values being bogus. - - Due to lack of 'include' support, JSON parser uses a single input - descriptor only and it lived inside the json_ctx object on stack of - nft_parse_json_*() functions. - - By the time errors are printed though, that scope is not valid anymore. - Move the static input descriptor object to avoid this. - - Fixes: 586ad210368b7 ("libnftables: Implement JSON parser") - Signed-off-by: Phil Sutter ---- - src/parser_json.c | 18 ++++++++---------- - 1 file changed, 8 insertions(+), 10 deletions(-) - -diff --git a/src/parser_json.c b/src/parser_json.c -index a069a89..ef4d4fb 100644 ---- a/src/parser_json.c -+++ b/src/parser_json.c -@@ -44,7 +44,6 @@ - #define CTX_F_CONCAT (1 << 8) /* inside concat_expr */ - - struct json_ctx { -- struct input_descriptor indesc; - struct nft_ctx *nft; - struct list_head *msgs; - struct list_head *cmds; -@@ -107,11 +106,12 @@ static struct stmt *json_parse_stmt(struct json_ctx *ctx, json_t *root); - /* parsing helpers */ - - const struct location *int_loc = &internal_location; -+static struct input_descriptor json_indesc; - - static void json_lib_error(struct json_ctx *ctx, json_error_t *err) - { - struct location loc = { -- .indesc = &ctx->indesc, -+ .indesc = &json_indesc, - .line_offset = err->position - err->column, - .first_line = err->line, - .last_line = err->line, -@@ -3864,16 +3864,15 @@ int nft_parse_json_buffer(struct nft_ctx *nft, const char *buf, - struct list_head *msgs, struct list_head *cmds) - { - struct json_ctx ctx = { -- .indesc = { -- .type = INDESC_BUFFER, -- .data = buf, -- }, - .nft = nft, - .msgs = msgs, - .cmds = cmds, - }; - int ret; - -+ json_indesc.type = INDESC_BUFFER; -+ json_indesc.data = buf; -+ - parser_init(nft, nft->state, msgs, cmds, nft->top_scope); - nft->json_root = json_loads(buf, 0, NULL); - if (!nft->json_root) -@@ -3892,10 +3891,6 @@ int nft_parse_json_filename(struct nft_ctx *nft, const char *filename, - struct list_head *msgs, struct list_head *cmds) - { - struct json_ctx ctx = { -- .indesc = { -- .type = INDESC_FILE, -- .name = filename, -- }, - .nft = nft, - .msgs = msgs, - .cmds = cmds, -@@ -3903,6 +3898,9 @@ int nft_parse_json_filename(struct nft_ctx *nft, const char *filename, - json_error_t err; - int ret; - -+ json_indesc.type = INDESC_FILE; -+ json_indesc.name = filename; -+ - parser_init(nft, nft->state, msgs, cmds, nft->top_scope); - nft->json_root = json_load_file(filename, 0, &err); - if (!nft->json_root) --- -2.31.1 - diff --git a/SOURCES/0064-parser_bison-Fix-for-implicit-declaration-of-isalnum.patch b/SOURCES/0064-parser_bison-Fix-for-implicit-declaration-of-isalnum.patch deleted file mode 100644 index 09f6950..0000000 --- a/SOURCES/0064-parser_bison-Fix-for-implicit-declaration-of-isalnum.patch +++ /dev/null @@ -1,37 +0,0 @@ -From bb4718fa421938c4a501b9a55df68de16a572f23 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 6 Oct 2021 17:32:04 +0200 -Subject: [PATCH] parser_bison: Fix for implicit declaration of isalnum - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1999059 -Upstream Status: nftables commit 7c3b2a7acbdc7 - -commit 7c3b2a7acbdc793b822a230ec0c28086c7d0365d -Author: Phil Sutter -Date: Fri Jun 11 16:03:32 2021 +0200 - - parser_bison: Fix for implicit declaration of isalnum - - Have to include ctype.h to make it known. - - Fixes: e76bb37940181 ("src: allow for variables in the log prefix string") - Signed-off-by: Phil Sutter ---- - src/parser_bison.y | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/src/parser_bison.y b/src/parser_bison.y -index 5ab5744..d38ec30 100644 ---- a/src/parser_bison.y -+++ b/src/parser_bison.y -@@ -10,6 +10,7 @@ - - %{ - -+#include - #include - #include - #include --- -2.31.1 - diff --git a/SOURCES/0065-parser_json-Fix-for-memleak-in-tcp-option-error-path.patch b/SOURCES/0065-parser_json-Fix-for-memleak-in-tcp-option-error-path.patch deleted file mode 100644 index 0f6e5ee..0000000 --- a/SOURCES/0065-parser_json-Fix-for-memleak-in-tcp-option-error-path.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 99d51194569f2784261f452ee821c42c3a7a6808 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 6 Oct 2021 17:32:04 +0200 -Subject: [PATCH] parser_json: Fix for memleak in tcp option error path - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1999059 -Upstream Status: nftables commit f7b0eef8391ae - -commit f7b0eef8391ae7f89a3a82f6eeecaebe199224d7 -Author: Phil Sutter -Date: Fri Jun 11 16:07:02 2021 +0200 - - parser_json: Fix for memleak in tcp option error path - - If 'kind' value is invalid, the function returned without freeing 'expr' - first. Fix this by performing the check before allocation. - - Fixes: cb21869649208 ("json: tcp: add raw tcp option match support") - Signed-off-by: Phil Sutter ---- - src/parser_json.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/src/parser_json.c b/src/parser_json.c -index ef4d4fb..2250be9 100644 ---- a/src/parser_json.c -+++ b/src/parser_json.c -@@ -610,12 +610,12 @@ static struct expr *json_parse_tcp_option_expr(struct json_ctx *ctx, - "base", &kind, "offset", &offset, "len", &len)) { - uint32_t flag = 0; - -- expr = tcpopt_expr_alloc(int_loc, kind, -- TCPOPT_COMMON_KIND); -- - if (kind < 0 || kind > 255) - return NULL; - -+ expr = tcpopt_expr_alloc(int_loc, kind, -+ TCPOPT_COMMON_KIND); -+ - if (offset == TCPOPT_COMMON_KIND && len == 8) - flag = NFT_EXTHDR_F_PRESENT; - --- -2.31.1 - diff --git a/SOURCES/0066-json-Drop-pointless-assignment-in-exthdr_expr_json.patch b/SOURCES/0066-json-Drop-pointless-assignment-in-exthdr_expr_json.patch deleted file mode 100644 index 8000cf3..0000000 --- a/SOURCES/0066-json-Drop-pointless-assignment-in-exthdr_expr_json.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 5f30a3447d28381fdf534ff4ed90167455d1283b Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 6 Oct 2021 17:32:04 +0200 -Subject: [PATCH] json: Drop pointless assignment in exthdr_expr_json() - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1999059 -Upstream Status: nftables commit c1616dfd1ce40 - -commit c1616dfd1ce40bac197924c8947e1c646e915dca -Author: Phil Sutter -Date: Fri Jun 11 16:23:22 2021 +0200 - - json: Drop pointless assignment in exthdr_expr_json() - - The updated value of 'is_exists' is no longer read at this point. - - Fixes: cb21869649208 ("json: tcp: add raw tcp option match support") - Signed-off-by: Phil Sutter ---- - src/json.c | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/src/json.c b/src/json.c -index dfc9031..ecec51c 100644 ---- a/src/json.c -+++ b/src/json.c -@@ -679,7 +679,6 @@ json_t *exthdr_expr_json(const struct expr *expr, struct output_ctx *octx) - "base", expr->exthdr.raw_type, - "offset", expr->exthdr.offset, - "len", expr->len); -- is_exists = false; - } - - return json_pack("{s:o}", "tcp option", root); --- -2.31.1 - diff --git a/SOURCES/0067-segtree-Fix-segfault-when-restoring-a-huge-interval-.patch b/SOURCES/0067-segtree-Fix-segfault-when-restoring-a-huge-interval-.patch deleted file mode 100644 index b5501fd..0000000 --- a/SOURCES/0067-segtree-Fix-segfault-when-restoring-a-huge-interval-.patch +++ /dev/null @@ -1,69 +0,0 @@ -From 36cf5177c724540aea5a42f9dc6ef5476f86179a Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Fri, 5 Nov 2021 16:06:45 +0100 -Subject: [PATCH] segtree: Fix segfault when restoring a huge interval set - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1908127 -Upstream Status: nftables commit baecd1cf26851 - -commit baecd1cf26851a4c5b7d469206a488f14fe5b147 -Author: Phil Sutter -Date: Wed Jun 9 15:49:52 2021 +0200 - - segtree: Fix segfault when restoring a huge interval set - - Restoring a set of IPv4 prefixes with about 1.1M elements crashes nft as - set_to_segtree() exhausts the stack. Prevent this by allocating the - pointer array on heap and make sure it is freed before returning to - caller. - - With this patch in place, restoring said set succeeds with allocation of - about 3GB of memory, according to valgrind. - - Signed-off-by: Phil Sutter ---- - src/segtree.c | 10 ++++++---- - 1 file changed, 6 insertions(+), 4 deletions(-) - -diff --git a/src/segtree.c b/src/segtree.c -index d6e3ce2..b852961 100644 ---- a/src/segtree.c -+++ b/src/segtree.c -@@ -414,10 +414,10 @@ static int set_to_segtree(struct list_head *msgs, struct set *set, - struct expr *init, struct seg_tree *tree, - bool add, bool merge) - { -- struct elementary_interval *intervals[init->size]; -+ struct elementary_interval **intervals; - struct expr *i, *next; - unsigned int n; -- int err; -+ int err = 0; - - /* We are updating an existing set with new elements, check if the new - * interval overlaps with any of the existing ones. -@@ -428,6 +428,7 @@ static int set_to_segtree(struct list_head *msgs, struct set *set, - return err; - } - -+ intervals = xmalloc_array(init->size, sizeof(intervals[0])); - n = expr_to_intervals(init, tree->keylen, intervals); - - list_for_each_entry_safe(i, next, &init->expressions, list) { -@@ -446,10 +447,11 @@ static int set_to_segtree(struct list_head *msgs, struct set *set, - for (n = 0; n < init->size; n++) { - err = ei_insert(msgs, tree, intervals[n], merge); - if (err < 0) -- return err; -+ break; - } - -- return 0; -+ xfree(intervals); -+ return err; - } - - static bool segtree_needs_first_segment(const struct set *set, --- -2.31.1 - diff --git a/SOURCES/0068-tests-cover-baecd1cf2685-segtree-Fix-segfault-when-r.patch b/SOURCES/0068-tests-cover-baecd1cf2685-segtree-Fix-segfault-when-r.patch deleted file mode 100644 index b909cfe..0000000 --- a/SOURCES/0068-tests-cover-baecd1cf2685-segtree-Fix-segfault-when-r.patch +++ /dev/null @@ -1,74 +0,0 @@ -From cc6c59e683c503b461b4a80526f4bc9cbb0660bf Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Fri, 5 Nov 2021 16:06:45 +0100 -Subject: [PATCH] tests: cover baecd1cf2685 ("segtree: Fix segfault when - restoring a huge interval set") -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1908127 -Upstream Status: nftables commit d8ccad2a2b73c - -commit d8ccad2a2b73c4189934eb5fd0e3d096699b5043 -Author: Å tÄ›pán NÄ›mec -Date: Wed Oct 20 14:42:20 2021 +0200 - - tests: cover baecd1cf2685 ("segtree: Fix segfault when restoring a huge interval set") - - Test inspired by [1] with both the set and stack size reduced by the - same power of 2, to preserve the (pre-baecd1cf2685) segfault on one - hand, and make the test successfully complete (post-baecd1cf2685) in a - few seconds even on weaker hardware on the other. - - (The reason I stopped at 128kB stack size is that with 64kB I was - getting segfaults even with baecd1cf2685 applied.) - - [1] https://bugzilla.redhat.com/show_bug.cgi?id=1908127 - - Signed-off-by: Å tÄ›pán NÄ›mec - Helped-by: Phil Sutter - Signed-off-by: Phil Sutter ---- - .../sets/0068interval_stack_overflow_0 | 29 +++++++++++++++++++ - 1 file changed, 29 insertions(+) - create mode 100755 tests/shell/testcases/sets/0068interval_stack_overflow_0 - -diff --git a/tests/shell/testcases/sets/0068interval_stack_overflow_0 b/tests/shell/testcases/sets/0068interval_stack_overflow_0 -new file mode 100755 -index 0000000..134282d ---- /dev/null -+++ b/tests/shell/testcases/sets/0068interval_stack_overflow_0 -@@ -0,0 +1,29 @@ -+#!/bin/bash -+ -+set -e -+ -+ruleset_file=$(mktemp) -+ -+trap 'rm -f "$ruleset_file"' EXIT -+ -+{ -+ echo 'define big_set = {' -+ for ((i = 1; i < 255; i++)); do -+ for ((j = 1; j < 80; j++)); do -+ echo "10.0.$i.$j," -+ done -+ done -+ echo '10.1.0.0/24 }' -+} >"$ruleset_file" -+ -+cat >>"$ruleset_file" <<\EOF -+table inet test68_table { -+ set test68_set { -+ type ipv4_addr -+ flags interval -+ elements = { $big_set } -+ } -+} -+EOF -+ -+( ulimit -s 128 && "$NFT" -f "$ruleset_file" ) --- -2.31.1 - diff --git a/SOURCES/0069-tests-shell-NFT-needs-to-be-invoked-unquoted.patch b/SOURCES/0069-tests-shell-NFT-needs-to-be-invoked-unquoted.patch deleted file mode 100644 index 8207b8f..0000000 --- a/SOURCES/0069-tests-shell-NFT-needs-to-be-invoked-unquoted.patch +++ /dev/null @@ -1,58 +0,0 @@ -From ea4457d5c329c8930c610ef3002cfe42bf8a263f Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 8 Dec 2021 14:10:31 +0100 -Subject: [PATCH] tests: shell: $NFT needs to be invoked unquoted -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1908127 -Upstream Status: nftables commit dad3338f1f76a -Conflicts: Context change in README due to missing other commits. - -commit dad3338f1f76a4a5bd782bae9c6b48941dfb1e31 -Author: Å tÄ›pán NÄ›mec -Date: Fri Nov 5 12:39:11 2021 +0100 - - tests: shell: $NFT needs to be invoked unquoted - - The variable has to undergo word splitting, otherwise the shell tries - to find the variable value as an executable, which breaks in cases that - 7c8a44b25c22 ("tests: shell: Allow wrappers to be passed as nft command") - intends to support. - - Mention this in the shell tests README. - - Fixes: d8ccad2a2b73 ("tests: cover baecd1cf2685 ("segtree: Fix segfault when restoring a huge interval set")") - Signed-off-by: Å tÄ›pán NÄ›mec - Signed-off-by: Phil Sutter ---- - tests/shell/README | 3 +++ - tests/shell/testcases/sets/0068interval_stack_overflow_0 | 2 +- - 2 files changed, 4 insertions(+), 1 deletion(-) - -diff --git a/tests/shell/README b/tests/shell/README -index e0279bb..aee50e3 100644 ---- a/tests/shell/README -+++ b/tests/shell/README -@@ -25,4 +25,7 @@ path to the nftables binary being tested. - You can pass an arbitrary $NFT value as well: - # NFT=/usr/local/sbin/nft ./run-tests.sh - -+Note that, to support usage such as NFT='valgrind nft', tests must -+invoke $NFT unquoted. -+ - By default the tests are run with the nft binary at '../../src/nft' -diff --git a/tests/shell/testcases/sets/0068interval_stack_overflow_0 b/tests/shell/testcases/sets/0068interval_stack_overflow_0 -index 134282d..6620572 100755 ---- a/tests/shell/testcases/sets/0068interval_stack_overflow_0 -+++ b/tests/shell/testcases/sets/0068interval_stack_overflow_0 -@@ -26,4 +26,4 @@ table inet test68_table { - } - EOF - --( ulimit -s 128 && "$NFT" -f "$ruleset_file" ) -+( ulimit -s 128 && $NFT -f "$ruleset_file" ) --- -2.31.1 - diff --git a/SOURCES/0070-tests-shell-better-parameters-for-the-interval-stack.patch b/SOURCES/0070-tests-shell-better-parameters-for-the-interval-stack.patch deleted file mode 100644 index dd6cd97..0000000 --- a/SOURCES/0070-tests-shell-better-parameters-for-the-interval-stack.patch +++ /dev/null @@ -1,59 +0,0 @@ -From b297f75275737de3e16b5d14916efe35535b6279 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 8 Dec 2021 14:10:54 +0100 -Subject: [PATCH] tests: shell: better parameters for the interval stack - overflow test -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1908127 -Upstream Status: nftables commit 7b81d9cb094ff - -commit 7b81d9cb094ffa96ad821528cf19269dc348f617 -Author: Å tÄ›pán NÄ›mec -Date: Wed Dec 1 12:12:00 2021 +0100 - - tests: shell: better parameters for the interval stack overflow test - - Wider testing has shown that 128 kB stack is too low (e.g. for systems - with 64 kB page size), leading to false failures in some environments. - - Based on results from a matrix of RHEL 8 and RHEL 9 systems across - x86_64, aarch64, ppc64le and s390x architectures as well as some - anecdotal testing of other Linux distros on x86_64 machines, 400 kB - seems safe: the normal nft stack (which should stay constant during - this test) on all tested systems doesn't exceed 200 kB (stays around - 100 kB on typical systems with 4 kB page size), while always growing - beyond 500 kB in the failing case (nftables before baecd1cf2685) with - the increased set size. - - Fixes: d8ccad2a2b73 ("tests: cover baecd1cf2685 ("segtree: Fix segfault when restoring a huge interval set")") - Signed-off-by: Å tÄ›pán NÄ›mec - Signed-off-by: Phil Sutter ---- - tests/shell/testcases/sets/0068interval_stack_overflow_0 | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/tests/shell/testcases/sets/0068interval_stack_overflow_0 b/tests/shell/testcases/sets/0068interval_stack_overflow_0 -index 6620572..2cbc986 100755 ---- a/tests/shell/testcases/sets/0068interval_stack_overflow_0 -+++ b/tests/shell/testcases/sets/0068interval_stack_overflow_0 -@@ -9,7 +9,7 @@ trap 'rm -f "$ruleset_file"' EXIT - { - echo 'define big_set = {' - for ((i = 1; i < 255; i++)); do -- for ((j = 1; j < 80; j++)); do -+ for ((j = 1; j < 255; j++)); do - echo "10.0.$i.$j," - done - done -@@ -26,4 +26,4 @@ table inet test68_table { - } - EOF - --( ulimit -s 128 && $NFT -f "$ruleset_file" ) -+( ulimit -s 400 && $NFT -f "$ruleset_file" ) --- -2.31.1 - diff --git a/SOURCES/0071-netlink-remove-unused-parameter-from-netlink_gen_stm.patch b/SOURCES/0071-netlink-remove-unused-parameter-from-netlink_gen_stm.patch deleted file mode 100644 index d254375..0000000 --- a/SOURCES/0071-netlink-remove-unused-parameter-from-netlink_gen_stm.patch +++ /dev/null @@ -1,134 +0,0 @@ -From cf85778a263a34aa2aeee565f3e046693164a097 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Thu, 13 Jan 2022 20:37:56 +0100 -Subject: [PATCH] netlink: remove unused parameter from - netlink_gen_stmt_stateful() - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2039594 -Upstream Status: nftables commit 3f3e897f42965 - -commit 3f3e897f429659ff6c8387245d0d4115952a6c31 -Author: Pablo Neira Ayuso -Date: Wed Mar 11 13:02:26 2020 +0100 - - netlink: remove unused parameter from netlink_gen_stmt_stateful() - - Remove context from netlink_gen_stmt_stateful(). - - Signed-off-by: Pablo Neira Ayuso ---- - src/netlink_linearize.c | 36 +++++++++++++----------------------- - 1 file changed, 13 insertions(+), 23 deletions(-) - -diff --git a/src/netlink_linearize.c b/src/netlink_linearize.c -index 28b0e6a..f5c6116 100644 ---- a/src/netlink_linearize.c -+++ b/src/netlink_linearize.c -@@ -780,9 +780,7 @@ static void netlink_gen_objref_stmt(struct netlink_linearize_ctx *ctx, - nftnl_rule_add_expr(ctx->nlr, nle); - } - --static struct nftnl_expr * --netlink_gen_connlimit_stmt(struct netlink_linearize_ctx *ctx, -- const struct stmt *stmt) -+static struct nftnl_expr *netlink_gen_connlimit_stmt(const struct stmt *stmt) - { - struct nftnl_expr *nle; - -@@ -795,9 +793,7 @@ netlink_gen_connlimit_stmt(struct netlink_linearize_ctx *ctx, - return nle; - } - --static struct nftnl_expr * --netlink_gen_counter_stmt(struct netlink_linearize_ctx *ctx, -- const struct stmt *stmt) -+static struct nftnl_expr *netlink_gen_counter_stmt(const struct stmt *stmt) - { - struct nftnl_expr *nle; - -@@ -814,9 +810,7 @@ netlink_gen_counter_stmt(struct netlink_linearize_ctx *ctx, - return nle; - } - --static struct nftnl_expr * --netlink_gen_limit_stmt(struct netlink_linearize_ctx *ctx, -- const struct stmt *stmt) -+static struct nftnl_expr *netlink_gen_limit_stmt(const struct stmt *stmt) - { - struct nftnl_expr *nle; - -@@ -832,9 +826,7 @@ netlink_gen_limit_stmt(struct netlink_linearize_ctx *ctx, - return nle; - } - --static struct nftnl_expr * --netlink_gen_quota_stmt(struct netlink_linearize_ctx *ctx, -- const struct stmt *stmt) -+static struct nftnl_expr *netlink_gen_quota_stmt(const struct stmt *stmt) - { - struct nftnl_expr *nle; - -@@ -846,19 +838,17 @@ netlink_gen_quota_stmt(struct netlink_linearize_ctx *ctx, - return nle; - } - --static struct nftnl_expr * --netlink_gen_stmt_stateful(struct netlink_linearize_ctx *ctx, -- const struct stmt *stmt) -+static struct nftnl_expr *netlink_gen_stmt_stateful(const struct stmt *stmt) - { - switch (stmt->ops->type) { - case STMT_CONNLIMIT: -- return netlink_gen_connlimit_stmt(ctx, stmt); -+ return netlink_gen_connlimit_stmt(stmt); - case STMT_COUNTER: -- return netlink_gen_counter_stmt(ctx, stmt); -+ return netlink_gen_counter_stmt(stmt); - case STMT_LIMIT: -- return netlink_gen_limit_stmt(ctx, stmt); -+ return netlink_gen_limit_stmt(stmt); - case STMT_QUOTA: -- return netlink_gen_quota_stmt(ctx, stmt); -+ return netlink_gen_quota_stmt(stmt); - default: - BUG("unknown stateful statement type %s\n", stmt->ops->name); - } -@@ -1307,7 +1297,7 @@ static void netlink_gen_set_stmt(struct netlink_linearize_ctx *ctx, - - if (stmt->set.stmt) - nftnl_expr_set(nle, NFTNL_EXPR_DYNSET_EXPR, -- netlink_gen_stmt_stateful(ctx, stmt->set.stmt), 0); -+ netlink_gen_stmt_stateful(stmt->set.stmt), 0); - } - - static void netlink_gen_map_stmt(struct netlink_linearize_ctx *ctx, -@@ -1337,7 +1327,7 @@ static void netlink_gen_map_stmt(struct netlink_linearize_ctx *ctx, - - if (stmt->map.stmt) - nftnl_expr_set(nle, NFTNL_EXPR_DYNSET_EXPR, -- netlink_gen_stmt_stateful(ctx, stmt->map.stmt), 0); -+ netlink_gen_stmt_stateful(stmt->map.stmt), 0); - - nftnl_rule_add_expr(ctx->nlr, nle); - } -@@ -1369,7 +1359,7 @@ static void netlink_gen_meter_stmt(struct netlink_linearize_ctx *ctx, - nftnl_expr_set_str(nle, NFTNL_EXPR_DYNSET_SET_NAME, set->handle.set.name); - nftnl_expr_set_u32(nle, NFTNL_EXPR_DYNSET_SET_ID, set->handle.set_id); - nftnl_expr_set(nle, NFTNL_EXPR_DYNSET_EXPR, -- netlink_gen_stmt_stateful(ctx, stmt->meter.stmt), 0); -+ netlink_gen_stmt_stateful(stmt->meter.stmt), 0); - nftnl_rule_add_expr(ctx->nlr, nle); - } - -@@ -1415,7 +1405,7 @@ static void netlink_gen_stmt(struct netlink_linearize_ctx *ctx, - case STMT_COUNTER: - case STMT_LIMIT: - case STMT_QUOTA: -- nle = netlink_gen_stmt_stateful(ctx, stmt); -+ nle = netlink_gen_stmt_stateful(stmt); - nftnl_rule_add_expr(ctx->nlr, nle); - break; - case STMT_NOTRACK: --- -2.31.1 - diff --git a/SOURCES/0072-src-support-for-restoring-element-counters.patch b/SOURCES/0072-src-support-for-restoring-element-counters.patch deleted file mode 100644 index ad66222..0000000 --- a/SOURCES/0072-src-support-for-restoring-element-counters.patch +++ /dev/null @@ -1,150 +0,0 @@ -From 0db42cc2d2647ec61441e29445c9f6e0f8946613 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Thu, 13 Jan 2022 20:37:56 +0100 -Subject: [PATCH] src: support for restoring element counters - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2039594 -Upstream Status: nftables commit 1fe6089ddd87e - -commit 1fe6089ddd87ee7869d24c0f8849951220cc9b85 -Author: Pablo Neira Ayuso -Date: Wed Mar 11 13:00:01 2020 +0100 - - src: support for restoring element counters - - This patch allows you to restore counters in dynamic sets: - - table ip test { - set test { - type ipv4_addr - size 65535 - flags dynamic,timeout - timeout 30d - gc-interval 1d - elements = { 192.168.10.13 expires 19d23h52m27s576ms counter packets 51 bytes 17265 } - } - chain output { - type filter hook output priority 0; - update @test { ip saddr } - } - } - - You can also add counters to elements from the control place, ie. - - table ip test { - set test { - type ipv4_addr - size 65535 - elements = { 192.168.2.1 counter packets 75 bytes 19043 } - } - - chain output { - type filter hook output priority filter; policy accept; - ip daddr @test - } - } - - Signed-off-by: Pablo Neira Ayuso ---- - include/netlink.h | 1 + - src/netlink.c | 3 +++ - src/netlink_linearize.c | 2 +- - src/parser_bison.y | 36 +++++++++++++++++++++++++++++++++++- - 4 files changed, 40 insertions(+), 2 deletions(-) - -diff --git a/include/netlink.h b/include/netlink.h -index 88d12ba..059092e 100644 ---- a/include/netlink.h -+++ b/include/netlink.h -@@ -97,6 +97,7 @@ extern void netlink_gen_data(const struct expr *expr, - extern void netlink_gen_raw_data(const mpz_t value, enum byteorder byteorder, - unsigned int len, - struct nft_data_linearize *data); -+extern struct nftnl_expr *netlink_gen_stmt_stateful(const struct stmt *stmt); - - extern struct expr *netlink_alloc_value(const struct location *loc, - const struct nft_data_delinearize *nld); -diff --git a/src/netlink.c b/src/netlink.c -index 64e51e5..825c2cc 100644 ---- a/src/netlink.c -+++ b/src/netlink.c -@@ -136,6 +136,9 @@ static struct nftnl_set_elem *alloc_nftnl_setelem(const struct expr *set, - if (elem->expiration) - nftnl_set_elem_set_u64(nlse, NFTNL_SET_ELEM_EXPIRATION, - elem->expiration); -+ if (elem->stmt) -+ nftnl_set_elem_set(nlse, NFTNL_SET_ELEM_EXPR, -+ netlink_gen_stmt_stateful(elem->stmt), 0); - if (elem->comment || expr->elem_flags) { - udbuf = nftnl_udata_buf_alloc(NFT_USERDATA_MAXLEN); - if (!udbuf) -diff --git a/src/netlink_linearize.c b/src/netlink_linearize.c -index f5c6116..3fa1339 100644 ---- a/src/netlink_linearize.c -+++ b/src/netlink_linearize.c -@@ -838,7 +838,7 @@ static struct nftnl_expr *netlink_gen_quota_stmt(const struct stmt *stmt) - return nle; - } - --static struct nftnl_expr *netlink_gen_stmt_stateful(const struct stmt *stmt) -+struct nftnl_expr *netlink_gen_stmt_stateful(const struct stmt *stmt) - { - switch (stmt->ops->type) { - case STMT_CONNLIMIT: -diff --git a/src/parser_bison.y b/src/parser_bison.y -index d38ec30..2cdf8ec 100644 ---- a/src/parser_bison.y -+++ b/src/parser_bison.y -@@ -3654,7 +3654,7 @@ meter_key_expr_alloc : concat_expr - ; - - set_elem_expr : set_elem_expr_alloc -- | set_elem_expr_alloc set_elem_options -+ | set_elem_expr_alloc set_elem_expr_options - ; - - set_elem_expr_alloc : set_lhs_expr -@@ -3684,6 +3684,40 @@ set_elem_option : TIMEOUT time_spec - } - ; - -+set_elem_expr_options : set_elem_expr_option -+ { -+ $$ = $0; -+ } -+ | set_elem_expr_options set_elem_expr_option -+ ; -+ -+set_elem_expr_option : TIMEOUT time_spec -+ { -+ $0->timeout = $2; -+ } -+ | EXPIRES time_spec -+ { -+ $0->expiration = $2; -+ } -+ | COUNTER -+ { -+ $0->stmt = counter_stmt_alloc(&@$); -+ } -+ | COUNTER PACKETS NUM BYTES NUM -+ { -+ struct stmt *stmt; -+ -+ stmt = counter_stmt_alloc(&@$); -+ stmt->counter.packets = $3; -+ stmt->counter.bytes = $5; -+ $0->stmt = stmt; -+ } -+ | comment_spec -+ { -+ $0->comment = $1; -+ } -+ ; -+ - set_lhs_expr : concat_rhs_expr - | wildcard_expr - ; --- -2.31.1 - diff --git a/SOURCES/0073-evaluate-attempt-to-set_eval-flag-if-dynamic-updates.patch b/SOURCES/0073-evaluate-attempt-to-set_eval-flag-if-dynamic-updates.patch deleted file mode 100644 index 670afae..0000000 --- a/SOURCES/0073-evaluate-attempt-to-set_eval-flag-if-dynamic-updates.patch +++ /dev/null @@ -1,127 +0,0 @@ -From 48021b277a1ab92480c43e1fa7573b00e33f5212 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Fri, 14 Jan 2022 11:39:17 +0100 -Subject: [PATCH] evaluate: attempt to set_eval flag if dynamic updates - requested - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2039594 -Upstream Status: nftables commit 8d443adfcc8c1 -Conflicts: -* Context change due to missing commit 242965f452e64 - ("src: add support for multi-statement in dynamic sets and maps") -* Adjusted test-case: Due to missing kernel commit 7b1394892de8d - ("netfilter: nft_dynset: relax superfluous check on set updates"), - 'update' statement is allowed only if timeout flag is present - -commit 8d443adfcc8c19effd6be9a9c903ee96e374f2e8 -Author: Florian Westphal -Date: Tue Jan 11 12:08:59 2022 +0100 - - evaluate: attempt to set_eval flag if dynamic updates requested - - When passing no upper size limit, the dynset expression forces - an internal 64k upperlimit. - - In some cases, this can result in 'nft -f' to restore the ruleset. - Avoid this by always setting the EVAL flag on a set definition when - we encounter packet-path update attempt in the batch. - - Reported-by: Yi Chen - Suggested-by: Pablo Neira Ayuso - Signed-off-by: Florian Westphal ---- - src/evaluate.c | 11 +++++++ - .../testcases/sets/dumps/dynset_missing.nft | 12 +++++++ - tests/shell/testcases/sets/dynset_missing | 32 +++++++++++++++++++ - 3 files changed, 55 insertions(+) - create mode 100644 tests/shell/testcases/sets/dumps/dynset_missing.nft - create mode 100755 tests/shell/testcases/sets/dynset_missing - -diff --git a/src/evaluate.c b/src/evaluate.c -index 00ec20b..9381f23 100644 ---- a/src/evaluate.c -+++ b/src/evaluate.c -@@ -3076,6 +3076,8 @@ static int stmt_evaluate_log(struct eval_ctx *ctx, struct stmt *stmt) - - static int stmt_evaluate_set(struct eval_ctx *ctx, struct stmt *stmt) - { -+ struct set *this_set; -+ - expr_set_context(&ctx->ectx, NULL, 0); - if (expr_evaluate(ctx, &stmt->set.set) < 0) - return -1; -@@ -3103,6 +3105,15 @@ static int stmt_evaluate_set(struct eval_ctx *ctx, struct stmt *stmt) - "meter statement must be stateful"); - } - -+ this_set = stmt->set.set->set; -+ -+ /* Make sure EVAL flag is set on set definition so that kernel -+ * picks a set that allows updates from the packet path. -+ * -+ * Alternatively we could error out in case 'flags dynamic' was -+ * not given, but we can repair this here. -+ */ -+ this_set->flags |= NFT_SET_EVAL; - return 0; - } - -diff --git a/tests/shell/testcases/sets/dumps/dynset_missing.nft b/tests/shell/testcases/sets/dumps/dynset_missing.nft -new file mode 100644 -index 0000000..fdb1b97 ---- /dev/null -+++ b/tests/shell/testcases/sets/dumps/dynset_missing.nft -@@ -0,0 +1,12 @@ -+table ip test { -+ set dlist { -+ type ipv4_addr -+ size 65535 -+ flags dynamic,timeout -+ } -+ -+ chain output { -+ type filter hook output priority filter; policy accept; -+ udp dport 1234 update @dlist { ip daddr } counter packets 0 bytes 0 -+ } -+} -diff --git a/tests/shell/testcases/sets/dynset_missing b/tests/shell/testcases/sets/dynset_missing -new file mode 100755 -index 0000000..89afcd5 ---- /dev/null -+++ b/tests/shell/testcases/sets/dynset_missing -@@ -0,0 +1,32 @@ -+#!/bin/bash -+ -+set -e -+ -+$NFT -f /dev/stdin < $tmpfile -+ -+# this restore works, because set is still the rhash backend. -+$NFT -f $tmpfile # success -+$NFT flush ruleset -+ -+# fails without commit 'attempt to set_eval flag if dynamic updates requested', -+# because set in $tmpfile has 'size x' but no 'flags dynamic'. -+$NFT -f $tmpfile --- -2.31.1 - diff --git a/SOURCES/0074-evaluate-fix-inet-nat-with-no-layer-3-info.patch b/SOURCES/0074-evaluate-fix-inet-nat-with-no-layer-3-info.patch deleted file mode 100644 index c9fae43..0000000 --- a/SOURCES/0074-evaluate-fix-inet-nat-with-no-layer-3-info.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 1fe92af5a03608b94e8e1e2ff26e24adfe2ea09a Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Fri, 21 Jan 2022 12:35:39 +0100 -Subject: [PATCH] evaluate: fix inet nat with no layer 3 info - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2030773 -Upstream Status: nftables commit 9a36033ce5063 - -commit 9a36033ce50638a403d1421935cdd1287ee5de6b -Author: Pablo Neira Ayuso -Date: Tue Jul 20 18:59:44 2021 +0200 - - evaluate: fix inet nat with no layer 3 info - - nft currently reports: - - Error: Could not process rule: Protocol error - add rule inet x y meta l4proto tcp dnat to :80 - ^^^^ - - default to NFPROTO_INET family, otherwise kernel bails out EPROTO when - trying to load the conntrack helper. - - Closes: https://bugzilla.netfilter.org/show_bug.cgi?id=1428 - Signed-off-by: Pablo Neira Ayuso ---- - src/evaluate.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/src/evaluate.c b/src/evaluate.c -index 9381f23..e495faf 100644 ---- a/src/evaluate.c -+++ b/src/evaluate.c -@@ -2757,9 +2757,10 @@ static int nat_evaluate_family(struct eval_ctx *ctx, struct stmt *stmt) - stmt->nat.family = ctx->pctx.family; - return 0; - case NFPROTO_INET: -- if (!stmt->nat.addr) -+ if (!stmt->nat.addr) { -+ stmt->nat.family = NFPROTO_INET; - return 0; -- -+ } - if (stmt->nat.family != NFPROTO_UNSPEC) - return 0; - --- -2.31.1 - diff --git a/SOURCES/0075-tests-py-add-dnat-to-port-without-defining-destinati.patch b/SOURCES/0075-tests-py-add-dnat-to-port-without-defining-destinati.patch deleted file mode 100644 index f4e0e5e..0000000 --- a/SOURCES/0075-tests-py-add-dnat-to-port-without-defining-destinati.patch +++ /dev/null @@ -1,86 +0,0 @@ -From eeba2cd956485d3059dabf86a7ad8dd59ee682dd Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Fri, 4 Feb 2022 14:18:44 +0100 -Subject: [PATCH] tests: py: add dnat to port without defining destination - address - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2030773 -Upstream Status: nftables commit 0f27e258b37a5 -Conflicts: Context changes due to missing commit ae1d822630e6d - ("src: context tracking for multiple transport protocols") - -commit 0f27e258b37a592233d6ad5381cd1fae65e57514 -Author: Pablo Neira Ayuso -Date: Thu Jul 22 17:43:56 2021 +0200 - - tests: py: add dnat to port without defining destination address - - Add a test to cover dnat to port without destination address. - - Closes: https://bugzilla.netfilter.org/show_bug.cgi?id=1428 - Signed-off-by: Pablo Neira Ayuso ---- - tests/py/inet/dnat.t | 1 + - tests/py/inet/dnat.t.json | 20 ++++++++++++++++++++ - tests/py/inet/dnat.t.payload | 8 ++++++++ - 3 files changed, 29 insertions(+) - -diff --git a/tests/py/inet/dnat.t b/tests/py/inet/dnat.t -index fcdf943..6beceda 100644 ---- a/tests/py/inet/dnat.t -+++ b/tests/py/inet/dnat.t -@@ -6,6 +6,7 @@ iifname "foo" tcp dport 80 redirect to :8080;ok - - iifname "eth0" tcp dport 443 dnat ip to 192.168.3.2;ok - iifname "eth0" tcp dport 443 dnat ip6 to [dead::beef]:4443;ok -+meta l4proto tcp dnat to :80;ok;meta l4proto 6 dnat to :80 - - dnat ip to ct mark map { 0x00000014 : 1.2.3.4};ok - dnat ip to ct mark . ip daddr map { 0x00000014 . 1.1.1.1 : 1.2.3.4};ok -diff --git a/tests/py/inet/dnat.t.json b/tests/py/inet/dnat.t.json -index ac6dac6..f88e9cf 100644 ---- a/tests/py/inet/dnat.t.json -+++ b/tests/py/inet/dnat.t.json -@@ -164,3 +164,23 @@ - } - ] - -+# meta l4proto tcp dnat to :80 -+[ -+ { -+ "match": { -+ "left": { -+ "meta": { -+ "key": "l4proto" -+ } -+ }, -+ "op": "==", -+ "right": 6 -+ } -+ }, -+ { -+ "dnat": { -+ "port": 80 -+ } -+ } -+] -+ -diff --git a/tests/py/inet/dnat.t.payload b/tests/py/inet/dnat.t.payload -index b81caf7..6d8569d 100644 ---- a/tests/py/inet/dnat.t.payload -+++ b/tests/py/inet/dnat.t.payload -@@ -52,3 +52,11 @@ inet test-inet prerouting - [ payload load 4b @ network header + 16 => reg 9 ] - [ lookup reg 1 set __map%d dreg 1 ] - [ nat dnat ip addr_min reg 1 addr_max reg 0 ] -+ -+# meta l4proto tcp dnat to :80 -+inet -+ [ meta load l4proto => reg 1 ] -+ [ cmp eq reg 1 0x00000006 ] -+ [ immediate reg 1 0x00005000 ] -+ [ nat dnat inet proto_min reg 1 flags 0x2 ] -+ --- -2.31.1 - diff --git a/SOURCES/0076-mnl-do-not-build-nftnl_set-element-list.patch b/SOURCES/0076-mnl-do-not-build-nftnl_set-element-list.patch deleted file mode 100644 index 9e9c18d..0000000 --- a/SOURCES/0076-mnl-do-not-build-nftnl_set-element-list.patch +++ /dev/null @@ -1,214 +0,0 @@ -From bd940a4efd2b5897f8a8e58ec7733417b3710e1e Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 8 Dec 2021 13:28:49 +0100 -Subject: [PATCH] mnl: do not build nftnl_set element list - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2047821 -Upstream Status: nftables commit b4b234f5a29e8 -Conflicts: Context change due to missing commit 66746e7dedeb0 - ("src: support for nat with interval concatenation"). - -commit b4b234f5a29e819045679acd95820a7457d4d7de -Author: Pablo Neira Ayuso -Date: Thu Nov 4 12:53:11 2021 +0100 - - mnl: do not build nftnl_set element list - - Do not call alloc_setelem_cache() to build the set element list in - nftnl_set. Instead, translate one single set element expression to - nftnl_set_elem object at a time and use this object to build the netlink - header. - - Using a huge test set containing 1.1 million element blocklist, this - patch is reducing userspace memory consumption by 40%. - - Signed-off-by: Pablo Neira Ayuso ---- - include/netlink.h | 2 + - src/mnl.c | 112 ++++++++++++++++++++++++++++++++++++---------- - src/netlink.c | 4 +- - 3 files changed, 93 insertions(+), 25 deletions(-) - -diff --git a/include/netlink.h b/include/netlink.h -index 059092e..3443582 100644 ---- a/include/netlink.h -+++ b/include/netlink.h -@@ -56,6 +56,8 @@ struct netlink_ctx { - - extern struct nftnl_expr *alloc_nft_expr(const char *name); - extern void alloc_setelem_cache(const struct expr *set, struct nftnl_set *nls); -+struct nftnl_set_elem *alloc_nftnl_setelem(const struct expr *set, -+ const struct expr *expr); - - extern struct nftnl_table *netlink_table_alloc(const struct nlmsghdr *nlh); - extern struct nftnl_chain *netlink_chain_alloc(const struct nlmsghdr *nlh); -diff --git a/src/mnl.c b/src/mnl.c -index 23341e6..44cf1a4 100644 ---- a/src/mnl.c -+++ b/src/mnl.c -@@ -1201,33 +1201,102 @@ static int set_elem_cb(const struct nlmsghdr *nlh, void *data) - return MNL_CB_OK; - } - --static int mnl_nft_setelem_batch(struct nftnl_set *nls, -+static bool mnl_nft_attr_nest_overflow(struct nlmsghdr *nlh, -+ const struct nlattr *from, -+ const struct nlattr *to) -+{ -+ int len = (void *)to + to->nla_len - (void *)from; -+ -+ /* The attribute length field is 16 bits long, thus the maximum payload -+ * that an attribute can convey is UINT16_MAX. In case of overflow, -+ * discard the last attribute that did not fit into the nest. -+ */ -+ if (len > UINT16_MAX) { -+ nlh->nlmsg_len -= to->nla_len; -+ return true; -+ } -+ return false; -+} -+ -+static void netlink_dump_setelem(const struct nftnl_set_elem *nlse, -+ struct netlink_ctx *ctx) -+{ -+ FILE *fp = ctx->nft->output.output_fp; -+ char buf[4096]; -+ -+ if (!(ctx->nft->debug_mask & NFT_DEBUG_NETLINK) || !fp) -+ return; -+ -+ nftnl_set_elem_snprintf(buf, sizeof(buf), nlse, NFTNL_OUTPUT_DEFAULT, 0); -+ fprintf(fp, "\t%s", buf); -+} -+ -+static void netlink_dump_setelem_done(struct netlink_ctx *ctx) -+{ -+ FILE *fp = ctx->nft->output.output_fp; -+ -+ if (!(ctx->nft->debug_mask & NFT_DEBUG_NETLINK) || !fp) -+ return; -+ -+ fprintf(fp, "\n"); -+} -+ -+static int mnl_nft_setelem_batch(const struct nftnl_set *nls, - struct nftnl_batch *batch, - enum nf_tables_msg_types cmd, -- unsigned int flags, uint32_t seqnum) -+ unsigned int flags, uint32_t seqnum, -+ const struct expr *set, -+ struct netlink_ctx *ctx) - { -+ struct nlattr *nest1, *nest2; -+ struct nftnl_set_elem *nlse; - struct nlmsghdr *nlh; -- struct nftnl_set_elems_iter *iter; -- int ret; -- -- iter = nftnl_set_elems_iter_create(nls); -- if (iter == NULL) -- memory_allocation_error(); -+ struct expr *expr = NULL; -+ int i = 0; - - if (cmd == NFT_MSG_NEWSETELEM) - flags |= NLM_F_CREATE; - -- while (nftnl_set_elems_iter_cur(iter)) { -- nlh = nftnl_nlmsg_build_hdr(nftnl_batch_buffer(batch), cmd, -- nftnl_set_get_u32(nls, NFTNL_SET_FAMILY), -- flags, seqnum); -- ret = nftnl_set_elems_nlmsg_build_payload_iter(nlh, iter); -- mnl_nft_batch_continue(batch); -- if (ret <= 0) -- break; -+ if (set) -+ expr = list_first_entry(&set->expressions, struct expr, list); -+ -+next: -+ nlh = nftnl_nlmsg_build_hdr(nftnl_batch_buffer(batch), cmd, -+ nftnl_set_get_u32(nls, NFTNL_SET_FAMILY), -+ flags, seqnum); -+ -+ if (nftnl_set_is_set(nls, NFTNL_SET_TABLE)) { -+ mnl_attr_put_strz(nlh, NFTA_SET_ELEM_LIST_TABLE, -+ nftnl_set_get_str(nls, NFTNL_SET_TABLE)); -+ } -+ if (nftnl_set_is_set(nls, NFTNL_SET_NAME)) { -+ mnl_attr_put_strz(nlh, NFTA_SET_ELEM_LIST_SET, -+ nftnl_set_get_str(nls, NFTNL_SET_NAME)); - } -+ if (nftnl_set_is_set(nls, NFTNL_SET_ID)) { -+ mnl_attr_put_u32(nlh, NFTA_SET_ELEM_LIST_SET_ID, -+ htonl(nftnl_set_get_u32(nls, NFTNL_SET_ID))); -+ } -+ -+ if (!set || list_empty(&set->expressions)) -+ return 0; - -- nftnl_set_elems_iter_destroy(iter); -+ assert(expr); -+ nest1 = mnl_attr_nest_start(nlh, NFTA_SET_ELEM_LIST_ELEMENTS); -+ list_for_each_entry_from(expr, &set->expressions, list) { -+ nlse = alloc_nftnl_setelem(set, expr); -+ nest2 = nftnl_set_elem_nlmsg_build(nlh, nlse, ++i); -+ netlink_dump_setelem(nlse, ctx); -+ nftnl_set_elem_free(nlse); -+ if (mnl_nft_attr_nest_overflow(nlh, nest1, nest2)) { -+ mnl_attr_nest_end(nlh, nest1); -+ mnl_nft_batch_continue(batch); -+ goto next; -+ } -+ } -+ mnl_attr_nest_end(nlh, nest1); -+ mnl_nft_batch_continue(batch); -+ netlink_dump_setelem_done(ctx); - - return 0; - } -@@ -1249,11 +1318,10 @@ int mnl_nft_setelem_add(struct netlink_ctx *ctx, const struct set *set, - if (h->set_id) - nftnl_set_set_u32(nls, NFTNL_SET_ID, h->set_id); - -- alloc_setelem_cache(expr, nls); - netlink_dump_set(nls, ctx); - -- err = mnl_nft_setelem_batch(nls, ctx->batch, NFT_MSG_NEWSETELEM, flags, -- ctx->seqnum); -+ err = mnl_nft_setelem_batch(nls, ctx->batch, NFT_MSG_NEWSETELEM, -+ flags, ctx->seqnum, expr, ctx); - nftnl_set_free(nls); - - return err; -@@ -1306,12 +1374,10 @@ int mnl_nft_setelem_del(struct netlink_ctx *ctx, const struct cmd *cmd) - else if (h->handle.id) - nftnl_set_set_u64(nls, NFTNL_SET_HANDLE, h->handle.id); - -- if (cmd->expr) -- alloc_setelem_cache(cmd->expr, nls); - netlink_dump_set(nls, ctx); - - err = mnl_nft_setelem_batch(nls, ctx->batch, NFT_MSG_DELSETELEM, 0, -- ctx->seqnum); -+ ctx->seqnum, cmd->expr, ctx); - nftnl_set_free(nls); - - return err; -diff --git a/src/netlink.c b/src/netlink.c -index 825c2cc..f8c97d0 100644 ---- a/src/netlink.c -+++ b/src/netlink.c -@@ -95,8 +95,8 @@ struct nftnl_expr *alloc_nft_expr(const char *name) - return nle; - } - --static struct nftnl_set_elem *alloc_nftnl_setelem(const struct expr *set, -- const struct expr *expr) -+struct nftnl_set_elem *alloc_nftnl_setelem(const struct expr *set, -+ const struct expr *expr) - { - const struct expr *elem, *data; - struct nftnl_set_elem *nlse; --- -2.31.1 - diff --git a/SOURCES/0077-mnl-do-not-use-expr-identifier-to-fetch-device-name.patch b/SOURCES/0077-mnl-do-not-use-expr-identifier-to-fetch-device-name.patch deleted file mode 100644 index 037fbce..0000000 --- a/SOURCES/0077-mnl-do-not-use-expr-identifier-to-fetch-device-name.patch +++ /dev/null @@ -1,130 +0,0 @@ -From 2747cab9c49b570347c86ff59daec93a1432b0bc Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 27 Apr 2022 14:37:00 +0200 -Subject: [PATCH] mnl: do not use expr->identifier to fetch device name - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2070924 -Upstream Status: nftables commit 78bbe7f7a55be - -commit 78bbe7f7a55be48909067e25900de27623d8fa6a -Author: Pablo Neira Ayuso -Date: Wed Feb 19 21:05:26 2020 +0100 - - mnl: do not use expr->identifier to fetch device name - - This string might not be nul-terminated, resulting in spurious errors - when adding netdev chains. - - Fixes: 3fdc7541fba0 ("src: add multidevice support for netdev chain") - Fixes: 92911b362e90 ("src: add support to add flowtables") - Signed-off-by: Pablo Neira Ayuso ---- - src/mnl.c | 33 +++++++++++++++++++++++++++++---- - src/parser_bison.y | 6 +++--- - 2 files changed, 32 insertions(+), 7 deletions(-) - -diff --git a/src/mnl.c b/src/mnl.c -index 44cf1a4..f881d97 100644 ---- a/src/mnl.c -+++ b/src/mnl.c -@@ -26,6 +26,7 @@ - - #include - #include -+#include - #include - #include - #include -@@ -529,7 +530,9 @@ int mnl_nft_chain_add(struct netlink_ctx *ctx, const struct cmd *cmd, - { - int priority, policy, i = 0; - struct nftnl_chain *nlc; -+ unsigned int ifname_len; - const char **dev_array; -+ char ifname[IFNAMSIZ]; - struct nlmsghdr *nlh; - struct expr *expr; - int dev_array_len; -@@ -562,7 +565,12 @@ int mnl_nft_chain_add(struct netlink_ctx *ctx, const struct cmd *cmd, - dev_array = xmalloc(sizeof(char *) * 8); - dev_array_len = 8; - list_for_each_entry(expr, &cmd->chain->dev_expr->expressions, list) { -- dev_array[i++] = expr->identifier; -+ ifname_len = div_round_up(expr->len, BITS_PER_BYTE); -+ memset(ifname, 0, sizeof(ifname)); -+ mpz_export_data(ifname, expr->value, -+ BYTEORDER_HOST_ENDIAN, -+ ifname_len); -+ dev_array[i++] = xstrdup(ifname); - if (i == dev_array_len) { - dev_array_len *= 2; - dev_array = xrealloc(dev_array, -@@ -577,6 +585,10 @@ int mnl_nft_chain_add(struct netlink_ctx *ctx, const struct cmd *cmd, - nftnl_chain_set_data(nlc, NFTNL_CHAIN_DEVICES, dev_array, - sizeof(char *) * dev_array_len); - -+ i = 0; -+ while (dev_array[i] != NULL) -+ xfree(dev_array[i++]); -+ - xfree(dev_array); - } - } -@@ -1488,7 +1500,9 @@ int mnl_nft_flowtable_add(struct netlink_ctx *ctx, const struct cmd *cmd, - unsigned int flags) - { - struct nftnl_flowtable *flo; -+ unsigned int ifname_len; - const char **dev_array; -+ char ifname[IFNAMSIZ]; - struct nlmsghdr *nlh; - int i = 0, len = 1; - struct expr *expr; -@@ -1513,13 +1527,24 @@ int mnl_nft_flowtable_add(struct netlink_ctx *ctx, const struct cmd *cmd, - list_for_each_entry(expr, &cmd->flowtable->dev_expr->expressions, list) - len++; - -- dev_array = calloc(len, sizeof(char *)); -- list_for_each_entry(expr, &cmd->flowtable->dev_expr->expressions, list) -- dev_array[i++] = expr->identifier; -+ dev_array = xmalloc(sizeof(char *) * len); -+ -+ list_for_each_entry(expr, &cmd->flowtable->dev_expr->expressions, list) { -+ ifname_len = div_round_up(expr->len, BITS_PER_BYTE); -+ memset(ifname, 0, sizeof(ifname)); -+ mpz_export_data(ifname, expr->value, BYTEORDER_HOST_ENDIAN, -+ ifname_len); -+ dev_array[i++] = xstrdup(ifname); -+ } - - dev_array[i] = NULL; - nftnl_flowtable_set_data(flo, NFTNL_FLOWTABLE_DEVICES, - dev_array, sizeof(char *) * len); -+ -+ i = 0; -+ while (dev_array[i] != NULL) -+ xfree(dev_array[i++]); -+ - free(dev_array); - - netlink_dump_flowtable(flo, ctx); -diff --git a/src/parser_bison.y b/src/parser_bison.y -index 2cdf8ec..dc87571 100644 ---- a/src/parser_bison.y -+++ b/src/parser_bison.y -@@ -1909,9 +1909,9 @@ flowtable_list_expr : flowtable_expr_member - - flowtable_expr_member : STRING - { -- $$ = symbol_expr_alloc(&@$, SYMBOL_VALUE, -- current_scope(state), -- $1); -+ $$ = constant_expr_alloc(&@$, &string_type, -+ BYTEORDER_HOST_ENDIAN, -+ strlen($1) * BITS_PER_BYTE, $1); - xfree($1); - } - ; --- -2.34.1 - diff --git a/SOURCES/0078-tests-shell-auto-removal-of-chain-hook-on-netns-remo.patch b/SOURCES/0078-tests-shell-auto-removal-of-chain-hook-on-netns-remo.patch deleted file mode 100644 index acbe3c2..0000000 --- a/SOURCES/0078-tests-shell-auto-removal-of-chain-hook-on-netns-remo.patch +++ /dev/null @@ -1,47 +0,0 @@ -From 66369d42095a214672c1f935eed91902d4cca8d5 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 27 Apr 2022 14:37:00 +0200 -Subject: [PATCH] tests: shell: auto-removal of chain hook on netns removal - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2070924 -Upstream Status: nftables commit e632eea21f4b3 -Conflicts: Commit b4775dec9f80b ("src: ingress inet support") creating - the test not backported, RHEL8 does not support inet ingress. - Script adjusted accordingly. - -commit e632eea21f4b3d03b629a5c1ac7e776d65785873 -Author: Florian Westphal -Date: Tue Oct 19 14:07:25 2021 +0200 - - tests: shell: auto-removal of chain hook on netns removal - - This is the nft equivalent of the syzbot report that lead to - kernel commit 68a3765c659f8 - ("netfilter: nf_tables: skip netdev events generated on netns removal"). - - Signed-off-by: Florian Westphal ---- - tests/shell/testcases/chains/0043chain_ingress_0 | 11 +++++++++++ - 1 file changed, 11 insertions(+) - create mode 100755 tests/shell/testcases/chains/0043chain_ingress_0 - -diff --git a/tests/shell/testcases/chains/0043chain_ingress_0 b/tests/shell/testcases/chains/0043chain_ingress_0 -new file mode 100755 -index 0000000..09d6907 ---- /dev/null -+++ b/tests/shell/testcases/chains/0043chain_ingress_0 -@@ -0,0 +1,11 @@ -+#!/bin/bash -+ -+set -e -+ -+# Test auto-removal of chain hook on netns removal -+unshare -n bash -c "ip link add br0 type bridge; \ -+ $NFT add table netdev test; \ -+ $NFT add chain netdev test ingress { type filter hook ingress device \"br0\" priority 0\; policy drop\; } ; \ -+" || exit 1 -+ -+exit 0 --- -2.34.1 - diff --git a/SOURCES/0079-rule-memleak-in-__do_add_setelems.patch b/SOURCES/0079-rule-memleak-in-__do_add_setelems.patch deleted file mode 100644 index df12438..0000000 --- a/SOURCES/0079-rule-memleak-in-__do_add_setelems.patch +++ /dev/null @@ -1,53 +0,0 @@ -From bc2bfe4b68d213c74c634e87dee0116c066209e4 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 27 Apr 2022 14:46:47 +0200 -Subject: [PATCH] rule: memleak in __do_add_setelems() - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2073287 -Upstream Status: nftables commit b6d50bfde21b5 - -commit b6d50bfde21b5a24a606cbf22137e04e8e0f195d -Author: Pablo Neira Ayuso -Date: Thu Apr 30 14:18:45 2020 +0200 - - rule: memleak in __do_add_setelems() - - This patch invokes interval_map_decompose() with named sets: - - ==3402== 2,352 (128 direct, 2,224 indirect) bytes in 1 blocks are definitely lost in loss record 9 of 9 - ==3402== at 0x483577F: malloc (vg_replace_malloc.c:299) - ==3402== by 0x48996A8: xmalloc (utils.c:36) - ==3402== by 0x4899778: xzalloc (utils.c:65) - ==3402== by 0x487CB46: expr_alloc (expression.c:45) - ==3402== by 0x487E2A0: mapping_expr_alloc (expression.c:1140) - ==3402== by 0x4898AA8: interval_map_decompose (segtree.c:1095) - ==3402== by 0x4872BDF: __do_add_setelems (rule.c:1569) - ==3402== by 0x4872BDF: __do_add_setelems (rule.c:1559) - ==3402== by 0x4877936: do_command (rule.c:2710) - ==3402== by 0x489F1CB: nft_netlink.isra.5 (libnftables.c:42) - ==3402== by 0x489FB07: nft_run_cmd_from_filename (libnftables.c:508) - ==3402== by 0x10A9AA: main (main.c:455) - - Fixes: dd44081d91ce ("segtree: Fix add and delete of element in same batch") - Signed-off-by: Pablo Neira Ayuso ---- - src/rule.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/src/rule.c b/src/rule.c -index f7d888b..b2aa1d7 100644 ---- a/src/rule.c -+++ b/src/rule.c -@@ -1511,7 +1511,8 @@ static int __do_add_setelems(struct netlink_ctx *ctx, struct set *set, - if (mnl_nft_setelem_add(ctx, set, expr, flags) < 0) - return -1; - -- if (set->init != NULL && -+ if (!set_is_anonymous(set->flags) && -+ set->init != NULL && - set->flags & NFT_SET_INTERVAL && - set->desc.field_count <= 1) { - interval_map_decompose(expr); --- -2.34.1 - diff --git a/SOURCES/0080-rule-fix-element-cache-update-in-__do_add_setelems.patch b/SOURCES/0080-rule-fix-element-cache-update-in-__do_add_setelems.patch deleted file mode 100644 index 37d8031..0000000 --- a/SOURCES/0080-rule-fix-element-cache-update-in-__do_add_setelems.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 0e284af80adefc8d8738c7191eff0ca7c6ad64a6 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 27 Apr 2022 14:46:47 +0200 -Subject: [PATCH] rule: fix element cache update in __do_add_setelems() - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2073287 -Upstream Status: nftables commit e68938f2bf89f - -commit e68938f2bf89fcc9a99e12c9b7a10c1838f2a133 -Author: Pablo Neira Ayuso -Date: Thu Apr 30 16:30:15 2020 +0200 - - rule: fix element cache update in __do_add_setelems() - - The set->init and expr arguments might actually refer to the same list - of elements. Skip set element cache update introduced by dd44081d91ce - ("segtree: Fix add and delete of element in same batch") otherwise - list_splice_tail_init() actually operates with the same list as - arguments. Valgrind reports this problem as a memleak since the result - of this operation was an empty set element list. - - Fixes: dd44081d91ce ("segtree: Fix add and delete of element in same batch") - Signed-off-by: Pablo Neira Ayuso ---- - src/rule.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/rule.c b/src/rule.c -index b2aa1d7..9ae6d19 100644 ---- a/src/rule.c -+++ b/src/rule.c -@@ -1512,7 +1512,7 @@ static int __do_add_setelems(struct netlink_ctx *ctx, struct set *set, - return -1; - - if (!set_is_anonymous(set->flags) && -- set->init != NULL && -+ set->init != NULL && set->init != expr && - set->flags & NFT_SET_INTERVAL && - set->desc.field_count <= 1) { - interval_map_decompose(expr); --- -2.34.1 - diff --git a/SOURCES/0081-src-rename-CMD_OBJ_SETELEM-to-CMD_OBJ_ELEMENTS.patch b/SOURCES/0081-src-rename-CMD_OBJ_SETELEM-to-CMD_OBJ_ELEMENTS.patch deleted file mode 100644 index 3c72981..0000000 --- a/SOURCES/0081-src-rename-CMD_OBJ_SETELEM-to-CMD_OBJ_ELEMENTS.patch +++ /dev/null @@ -1,208 +0,0 @@ -From 43d5837615201d68108151e70c06cc0e90622fcc Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 27 Apr 2022 14:46:47 +0200 -Subject: [PATCH] src: rename CMD_OBJ_SETELEM to CMD_OBJ_ELEMENTS - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2073287 -Upstream Status: nftables commit 9ed076c6f5abc - -commit 9ed076c6f5abcbbad1b6b721dca29f87963f0ecc -Author: Pablo Neira Ayuso -Date: Fri May 8 14:44:01 2020 +0200 - - src: rename CMD_OBJ_SETELEM to CMD_OBJ_ELEMENTS - - The CMD_OBJ_ELEMENTS provides an expression that contains the list of - set elements. This leaves room to introduce CMD_OBJ_SETELEMS in a follow - up patch. - - Signed-off-by: Pablo Neira Ayuso ---- - include/rule.h | 4 ++-- - src/cache.c | 6 +++--- - src/evaluate.c | 6 +++--- - src/parser_bison.y | 8 ++++---- - src/parser_json.c | 2 +- - src/rule.c | 8 ++++---- - 6 files changed, 17 insertions(+), 17 deletions(-) - -diff --git a/include/rule.h b/include/rule.h -index 3637462..7fe607f 100644 ---- a/include/rule.h -+++ b/include/rule.h -@@ -542,7 +542,7 @@ enum cmd_ops { - * enum cmd_obj - command objects - * - * @CMD_OBJ_INVALID: invalid -- * @CMD_OBJ_SETELEM: set element(s) -+ * @CMD_OBJ_ELEMENTS: set element(s) - * @CMD_OBJ_SET: set - * @CMD_OBJ_SETS: multiple sets - * @CMD_OBJ_RULE: rule -@@ -570,7 +570,7 @@ enum cmd_ops { - */ - enum cmd_obj { - CMD_OBJ_INVALID, -- CMD_OBJ_SETELEM, -+ CMD_OBJ_ELEMENTS, - CMD_OBJ_SET, - CMD_OBJ_SETS, - CMD_OBJ_RULE, -diff --git a/src/cache.c b/src/cache.c -index 05f0d68..a45111a 100644 ---- a/src/cache.c -+++ b/src/cache.c -@@ -25,7 +25,7 @@ static unsigned int evaluate_cache_add(struct cmd *cmd, unsigned int flags) - case CMD_OBJ_FLOWTABLE: - flags |= NFT_CACHE_TABLE; - break; -- case CMD_OBJ_SETELEM: -+ case CMD_OBJ_ELEMENTS: - flags |= NFT_CACHE_TABLE | - NFT_CACHE_CHAIN | - NFT_CACHE_SET | -@@ -53,7 +53,7 @@ static unsigned int evaluate_cache_add(struct cmd *cmd, unsigned int flags) - static unsigned int evaluate_cache_del(struct cmd *cmd, unsigned int flags) - { - switch (cmd->obj) { -- case CMD_OBJ_SETELEM: -+ case CMD_OBJ_ELEMENTS: - flags |= NFT_CACHE_SETELEM; - break; - default: -@@ -66,7 +66,7 @@ static unsigned int evaluate_cache_del(struct cmd *cmd, unsigned int flags) - static unsigned int evaluate_cache_get(struct cmd *cmd, unsigned int flags) - { - switch (cmd->obj) { -- case CMD_OBJ_SETELEM: -+ case CMD_OBJ_ELEMENTS: - flags |= NFT_CACHE_TABLE | - NFT_CACHE_SET | - NFT_CACHE_SETELEM; -diff --git a/src/evaluate.c b/src/evaluate.c -index e495faf..fd6db8a 100644 ---- a/src/evaluate.c -+++ b/src/evaluate.c -@@ -3815,7 +3815,7 @@ static int table_evaluate(struct eval_ctx *ctx, struct table *table) - static int cmd_evaluate_add(struct eval_ctx *ctx, struct cmd *cmd) - { - switch (cmd->obj) { -- case CMD_OBJ_SETELEM: -+ case CMD_OBJ_ELEMENTS: - return setelem_evaluate(ctx, &cmd->expr); - case CMD_OBJ_SET: - handle_merge(&cmd->set->handle, &cmd->handle); -@@ -3847,7 +3847,7 @@ static int cmd_evaluate_add(struct eval_ctx *ctx, struct cmd *cmd) - static int cmd_evaluate_delete(struct eval_ctx *ctx, struct cmd *cmd) - { - switch (cmd->obj) { -- case CMD_OBJ_SETELEM: -+ case CMD_OBJ_ELEMENTS: - return setelem_evaluate(ctx, &cmd->expr); - case CMD_OBJ_SET: - case CMD_OBJ_RULE: -@@ -3874,7 +3874,7 @@ static int cmd_evaluate_get(struct eval_ctx *ctx, struct cmd *cmd) - struct set *set; - - switch (cmd->obj) { -- case CMD_OBJ_SETELEM: -+ case CMD_OBJ_ELEMENTS: - table = table_lookup(&cmd->handle, &ctx->nft->cache); - if (table == NULL) - return table_not_found(ctx); -diff --git a/src/parser_bison.y b/src/parser_bison.y -index dc87571..96f0a4c 100644 ---- a/src/parser_bison.y -+++ b/src/parser_bison.y -@@ -1019,7 +1019,7 @@ add_cmd : TABLE table_spec - } - | ELEMENT set_spec set_block_expr - { -- $$ = cmd_alloc(CMD_ADD, CMD_OBJ_SETELEM, &$2, &@$, $3); -+ $$ = cmd_alloc(CMD_ADD, CMD_OBJ_ELEMENTS, &$2, &@$, $3); - } - | FLOWTABLE flowtable_spec flowtable_block_alloc - '{' flowtable_block '}' -@@ -1116,7 +1116,7 @@ create_cmd : TABLE table_spec - } - | ELEMENT set_spec set_block_expr - { -- $$ = cmd_alloc(CMD_CREATE, CMD_OBJ_SETELEM, &$2, &@$, $3); -+ $$ = cmd_alloc(CMD_CREATE, CMD_OBJ_ELEMENTS, &$2, &@$, $3); - } - | FLOWTABLE flowtable_spec flowtable_block_alloc - '{' flowtable_block '}' -@@ -1208,7 +1208,7 @@ delete_cmd : TABLE table_spec - } - | ELEMENT set_spec set_block_expr - { -- $$ = cmd_alloc(CMD_DELETE, CMD_OBJ_SETELEM, &$2, &@$, $3); -+ $$ = cmd_alloc(CMD_DELETE, CMD_OBJ_ELEMENTS, &$2, &@$, $3); - } - | FLOWTABLE flowtable_spec - { -@@ -1266,7 +1266,7 @@ delete_cmd : TABLE table_spec - - get_cmd : ELEMENT set_spec set_block_expr - { -- $$ = cmd_alloc(CMD_GET, CMD_OBJ_SETELEM, &$2, &@$, $3); -+ $$ = cmd_alloc(CMD_GET, CMD_OBJ_ELEMENTS, &$2, &@$, $3); - } - ; - -diff --git a/src/parser_json.c b/src/parser_json.c -index 2250be9..15902a8 100644 ---- a/src/parser_json.c -+++ b/src/parser_json.c -@@ -3391,7 +3391,7 @@ static struct cmd *json_parse_cmd_add(struct json_ctx *ctx, - { "rule", CMD_OBJ_RULE, json_parse_cmd_add_rule }, - { "set", CMD_OBJ_SET, json_parse_cmd_add_set }, - { "map", CMD_OBJ_SET, json_parse_cmd_add_set }, -- { "element", CMD_OBJ_SETELEM, json_parse_cmd_add_element }, -+ { "element", CMD_OBJ_ELEMENTS, json_parse_cmd_add_element }, - { "flowtable", CMD_OBJ_FLOWTABLE, json_parse_cmd_add_flowtable }, - { "counter", CMD_OBJ_COUNTER, json_parse_cmd_add_object }, - { "quota", CMD_OBJ_QUOTA, json_parse_cmd_add_object }, -diff --git a/src/rule.c b/src/rule.c -index 9ae6d19..afb6dc9 100644 ---- a/src/rule.c -+++ b/src/rule.c -@@ -1456,7 +1456,7 @@ void cmd_free(struct cmd *cmd) - handle_free(&cmd->handle); - if (cmd->data != NULL) { - switch (cmd->obj) { -- case CMD_OBJ_SETELEM: -+ case CMD_OBJ_ELEMENTS: - expr_free(cmd->expr); - break; - case CMD_OBJ_SET: -@@ -1580,7 +1580,7 @@ static int do_command_add(struct netlink_ctx *ctx, struct cmd *cmd, bool excl) - return mnl_nft_rule_add(ctx, cmd, flags | NLM_F_APPEND); - case CMD_OBJ_SET: - return do_add_set(ctx, cmd, flags); -- case CMD_OBJ_SETELEM: -+ case CMD_OBJ_ELEMENTS: - return do_add_setelems(ctx, cmd, flags); - case CMD_OBJ_COUNTER: - case CMD_OBJ_QUOTA: -@@ -1659,7 +1659,7 @@ static int do_command_delete(struct netlink_ctx *ctx, struct cmd *cmd) - return mnl_nft_rule_del(ctx, cmd); - case CMD_OBJ_SET: - return mnl_nft_set_del(ctx, cmd); -- case CMD_OBJ_SETELEM: -+ case CMD_OBJ_ELEMENTS: - return do_delete_setelems(ctx, cmd); - case CMD_OBJ_COUNTER: - return mnl_nft_obj_del(ctx, cmd, NFT_OBJECT_COUNTER); -@@ -2519,7 +2519,7 @@ static int do_command_get(struct netlink_ctx *ctx, struct cmd *cmd) - table = table_lookup(&cmd->handle, &ctx->nft->cache); - - switch (cmd->obj) { -- case CMD_OBJ_SETELEM: -+ case CMD_OBJ_ELEMENTS: - return do_get_setelems(ctx, cmd, table); - default: - BUG("invalid command object type %u\n", cmd->obj); --- -2.34.1 - diff --git a/SOURCES/0082-src-add-CMD_OBJ_SETELEMS.patch b/SOURCES/0082-src-add-CMD_OBJ_SETELEMS.patch deleted file mode 100644 index ca0dd91..0000000 --- a/SOURCES/0082-src-add-CMD_OBJ_SETELEMS.patch +++ /dev/null @@ -1,125 +0,0 @@ -From 61c295c9dec447239ed2c84b0073594ffecf7554 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Wed, 27 Apr 2022 14:46:47 +0200 -Subject: [PATCH] src: add CMD_OBJ_SETELEMS - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2073287 -Upstream Status: nftables commit c9eae091983ae -Conflicts: Context change due to missing commit 086ec6f30c96e - ("mnl: extended error support for create command"). - -commit c9eae091983ae9ffcf2ca5b666bc03d5a1916c2f -Author: Pablo Neira Ayuso -Date: Fri May 8 14:44:03 2020 +0200 - - src: add CMD_OBJ_SETELEMS - - This new command type results from expanding the set definition in two - commands: One to add the set and another to add the elements. This - results in 1:1 mapping between the command object to the netlink API. - The command is then translated into a netlink message which gets a - unique sequence number. This sequence number allows to correlate the - netlink extended error reporting with the corresponding command. - - Signed-off-by: Pablo Neira Ayuso ---- - include/rule.h | 2 ++ - src/rule.c | 23 +++++++++++++++++++---- - 2 files changed, 21 insertions(+), 4 deletions(-) - -diff --git a/include/rule.h b/include/rule.h -index 7fe607f..1efd4fb 100644 ---- a/include/rule.h -+++ b/include/rule.h -@@ -545,6 +545,7 @@ enum cmd_ops { - * @CMD_OBJ_ELEMENTS: set element(s) - * @CMD_OBJ_SET: set - * @CMD_OBJ_SETS: multiple sets -+ * @CMD_OBJ_SETELEMS: set elements - * @CMD_OBJ_RULE: rule - * @CMD_OBJ_CHAIN: chain - * @CMD_OBJ_CHAINS: multiple chains -@@ -572,6 +573,7 @@ enum cmd_obj { - CMD_OBJ_INVALID, - CMD_OBJ_ELEMENTS, - CMD_OBJ_SET, -+ CMD_OBJ_SETELEMS, - CMD_OBJ_SETS, - CMD_OBJ_RULE, - CMD_OBJ_CHAIN, -diff --git a/src/rule.c b/src/rule.c -index afb6dc9..c43e0cd 100644 ---- a/src/rule.c -+++ b/src/rule.c -@@ -1352,11 +1352,11 @@ struct cmd *cmd_alloc(enum cmd_ops op, enum cmd_obj obj, - void nft_cmd_expand(struct cmd *cmd) - { - struct list_head new_cmds; -+ struct set *set, *newset; - struct flowtable *ft; - struct table *table; - struct chain *chain; - struct rule *rule; -- struct set *set; - struct obj *obj; - struct cmd *new; - struct handle h; -@@ -1412,6 +1412,18 @@ void nft_cmd_expand(struct cmd *cmd) - } - list_splice(&new_cmds, &cmd->list); - break; -+ case CMD_OBJ_SET: -+ set = cmd->set; -+ memset(&h, 0, sizeof(h)); -+ handle_merge(&h, &set->handle); -+ newset = set_clone(set); -+ newset->handle.set_id = set->handle.set_id; -+ newset->init = set->init; -+ set->init = NULL; -+ new = cmd_alloc(CMD_ADD, CMD_OBJ_SETELEMS, &h, -+ &set->location, newset); -+ list_add(&new->list, &cmd->list); -+ break; - default: - break; - } -@@ -1460,6 +1472,7 @@ void cmd_free(struct cmd *cmd) - expr_free(cmd->expr); - break; - case CMD_OBJ_SET: -+ case CMD_OBJ_SETELEMS: - set_free(cmd->set); - break; - case CMD_OBJ_RULE: -@@ -1545,7 +1558,7 @@ static int do_add_setelems(struct netlink_ctx *ctx, struct cmd *cmd, - } - - static int do_add_set(struct netlink_ctx *ctx, const struct cmd *cmd, -- uint32_t flags) -+ uint32_t flags, bool add) - { - struct set *set = cmd->set; - -@@ -1556,7 +1569,7 @@ static int do_add_set(struct netlink_ctx *ctx, const struct cmd *cmd, - &ctx->nft->output) < 0) - return -1; - } -- if (mnl_nft_set_add(ctx, cmd, flags) < 0) -+ if (add && mnl_nft_set_add(ctx, cmd, flags) < 0) - return -1; - if (set->init != NULL) { - return __do_add_setelems(ctx, set, set->init, flags); -@@ -1579,7 +1592,9 @@ static int do_command_add(struct netlink_ctx *ctx, struct cmd *cmd, bool excl) - case CMD_OBJ_RULE: - return mnl_nft_rule_add(ctx, cmd, flags | NLM_F_APPEND); - case CMD_OBJ_SET: -- return do_add_set(ctx, cmd, flags); -+ return do_add_set(ctx, cmd, flags, true); -+ case CMD_OBJ_SETELEMS: -+ return do_add_set(ctx, cmd, flags, false); - case CMD_OBJ_ELEMENTS: - return do_add_setelems(ctx, cmd, flags); - case CMD_OBJ_COUNTER: --- -2.34.1 - diff --git a/SOURCES/0083-libnftables-call-nft_cmd_expand-only-with-CMD_ADD.patch b/SOURCES/0083-libnftables-call-nft_cmd_expand-only-with-CMD_ADD.patch deleted file mode 100644 index 1a95385..0000000 --- a/SOURCES/0083-libnftables-call-nft_cmd_expand-only-with-CMD_ADD.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 34a7632a4d72c16d2a087fcc6450d1a783858124 Mon Sep 17 00:00:00 2001 -From: Phil Sutter -Date: Thu, 28 Apr 2022 14:14:39 +0200 -Subject: [PATCH] libnftables: call nft_cmd_expand() only with CMD_ADD - -Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2073287 -Upstream Status: nftables commit b81519f1641b5 - -commit b81519f1641b508c289ddfefc800b2c20ab243e6 -Author: Pablo Neira Ayuso -Date: Fri May 8 14:44:02 2020 +0200 - - libnftables: call nft_cmd_expand() only with CMD_ADD - - Restrict the expansion logic to the CMD_ADD command which is where this - is only required. - - Signed-off-by: Pablo Neira Ayuso ---- - src/libnftables.c | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - -diff --git a/src/libnftables.c b/src/libnftables.c -index cd2fcf2..ab01909 100644 ---- a/src/libnftables.c -+++ b/src/libnftables.c -@@ -421,8 +421,12 @@ static int nft_evaluate(struct nft_ctx *nft, struct list_head *msgs, - if (nft->state->nerrs) - return -1; - -- list_for_each_entry(cmd, cmds, list) -+ list_for_each_entry(cmd, cmds, list) { -+ if (cmd->op != CMD_ADD) -+ continue; -+ - nft_cmd_expand(cmd); -+ } - - return 0; - } --- -2.34.1 - diff --git a/SOURCES/monitor-run-tests.stderr.expect b/SOURCES/monitor-run-tests.stderr.expect new file mode 100644 index 0000000..c732d96 --- /dev/null +++ b/SOURCES/monitor-run-tests.stderr.expect @@ -0,0 +1,6 @@ +monitor: 2 tests from file object.t failed +monitor: 3 tests from file set-interval.t failed +monitor: 3 tests from file simple.t failed +echo: 2 tests from file object.t failed +echo: 3 tests from file set-interval.t failed +echo: 3 tests from file simple.t failed diff --git a/SOURCES/nat.nft b/SOURCES/nat.nft index 7079893..905179c 100644 --- a/SOURCES/nat.nft +++ b/SOURCES/nat.nft @@ -18,13 +18,21 @@ table ip nftables_svc { elements = { 192.168.122.0/24 } } + # force port randomization for non-locally originated connections using + # suspicious port values to prevent port-shadow attacks, i.e. + # accidental matching of new inbound connections vs. existing ones + chain do_masquerade { + meta iif > 0 th sport < 16384 th dport >= 32768 masquerade random + masquerade + } + # base-chain to manipulate conntrack in postrouting, # will see packets for new or related traffic only chain POSTROUTING { type nat hook postrouting priority srcnat + 20 policy accept - iifname @masq_interfaces oifname != @masq_interfaces masquerade - ip saddr @masq_ips masquerade + iifname @masq_interfaces oifname != @masq_interfaces jump do_masquerade + ip saddr @masq_ips jump do_masquerade } } diff --git a/SOURCES/nft-test.stderr.expect b/SOURCES/nft-test.stderr.expect new file mode 100644 index 0000000..d9edac9 --- /dev/null +++ b/SOURCES/nft-test.stderr.expect @@ -0,0 +1,851 @@ +any/objects.t: ERROR: line 3: I cannot create the chain 'egress' +any/objects.t: ERROR: line 16: The chain egress does not exist in netdev test-netdev. I cannot delete it. +any/rawpayload.t: ERROR: line 3: I cannot create the chain 'egress' +any/rawpayload.t: ERROR: line 8: add rule netdev test-netdev egress meta l4proto { tcp, udp, sctp} @th,16,16 { 22, 23, 80 }: This rule should not have failed. +any/rawpayload.t: ERROR: line 9: add rule netdev test-netdev egress meta l4proto tcp @th,16,16 { 22, 23, 80}: This rule should not have failed. +any/rawpayload.t: ERROR: line 10: add rule netdev test-netdev egress @nh,8,8 0xff: This rule should not have failed. +any/rawpayload.t: ERROR: line 11: add rule netdev test-netdev egress @nh,8,16 0x0: This rule should not have failed. +any/rawpayload.t: ERROR: line 18: add rule netdev test-netdev egress @ll,0,1 1: This rule should not have failed. +any/rawpayload.t: ERROR: line 19: add rule netdev test-netdev egress @ll,0,8 & 0x80 == 0x80: This rule should not have failed. +any/rawpayload.t: ERROR: line 20: add rule netdev test-netdev egress @ll,0,128 0xfedcba987654321001234567890abcde: This rule should not have failed. +any/rawpayload.t: ERROR: line 22: add rule inet test-inet input @ih,32,32 0x14000000: This rule should not have failed. +any/rawpayload.t: ERROR: line 22: The chain egress does not exist in netdev test-netdev. I cannot delete it. +any/quota.t: ERROR: line 3: I cannot create the chain 'egress' +any/quota.t: ERROR: line 12: add rule netdev test-netdev egress quota 1025 bytes: This rule should not have failed. +any/quota.t: ERROR: line 13: add rule netdev test-netdev egress quota 1 kbytes: This rule should not have failed. +any/quota.t: ERROR: line 14: add rule netdev test-netdev egress quota 2 kbytes: This rule should not have failed. +any/quota.t: ERROR: line 15: add rule netdev test-netdev egress quota 1025 kbytes: This rule should not have failed. +any/quota.t: ERROR: line 16: add rule netdev test-netdev egress quota 1023 mbytes: This rule should not have failed. +any/quota.t: ERROR: line 17: add rule netdev test-netdev egress quota 10230 mbytes: This rule should not have failed. +any/quota.t: ERROR: line 18: add rule netdev test-netdev egress quota 1023000 mbytes: This rule should not have failed. +any/quota.t: ERROR: line 20: add rule netdev test-netdev egress quota over 1 kbytes: This rule should not have failed. +any/quota.t: ERROR: line 21: add rule netdev test-netdev egress quota over 2 kbytes: This rule should not have failed. +any/quota.t: ERROR: line 22: add rule netdev test-netdev egress quota over 1025 kbytes: This rule should not have failed. +any/quota.t: ERROR: line 23: add rule netdev test-netdev egress quota over 1023 mbytes: This rule should not have failed. +any/quota.t: ERROR: line 24: add rule netdev test-netdev egress quota over 10230 mbytes: This rule should not have failed. +any/quota.t: ERROR: line 25: add rule netdev test-netdev egress quota over 1023000 mbytes: This rule should not have failed. +any/quota.t: ERROR: line 25: The chain egress does not exist in netdev test-netdev. I cannot delete it. +any/tcpopt.t: ERROR: line 58: add rule ip test-ip4 input reset tcp option mptcp: This rule should not have failed. +any/tcpopt.t: ERROR: line 59: add rule ip test-ip4 input reset tcp option 2: This rule should not have failed. +any/tcpopt.t: ERROR: line 60: add rule ip test-ip4 input reset tcp option 123: This rule should not have failed. +any/meta.t: ERROR: line 3: I cannot create the chain 'egress' +any/meta.t: ERROR: line 12: add rule netdev test-netdev egress meta length 1000: This rule should not have failed. +any/meta.t: ERROR: line 13: add rule netdev test-netdev egress meta length 22: This rule should not have failed. +any/meta.t: ERROR: line 14: add rule netdev test-netdev egress meta length != 233: This rule should not have failed. +any/meta.t: ERROR: line 15: add rule netdev test-netdev egress meta length 33-45: This rule should not have failed. +any/meta.t: ERROR: line 16: add rule netdev test-netdev egress meta length != 33-45: This rule should not have failed. +any/meta.t: ERROR: line 17: add rule netdev test-netdev egress meta length { 33, 55, 67, 88}: This rule should not have failed. +any/meta.t: ERROR: line 18: add rule netdev test-netdev egress meta length { 33-55, 67-88}: This rule should not have failed. +any/meta.t: ERROR: line 19: add rule netdev test-netdev egress meta length { 33-55, 56-88, 100-120}: This rule should not have failed. +any/meta.t: ERROR: line 20: add rule netdev test-netdev egress meta length != { 33, 55, 67, 88}: This rule should not have failed. +any/meta.t: ERROR: line 21: add rule netdev test-netdev egress meta length { 33-55, 66-88}: This rule should not have failed. +any/meta.t: ERROR: line 22: add rule netdev test-netdev egress meta length != { 33-55, 66-88}: This rule should not have failed. +any/meta.t: ERROR: line 24: add rule netdev test-netdev egress meta protocol { ip, arp, ip6, vlan }: This rule should not have failed. +any/meta.t: ERROR: line 25: add rule netdev test-netdev egress meta protocol != {ip, arp, ip6, 8021q}: This rule should not have failed. +any/meta.t: ERROR: line 26: add rule netdev test-netdev egress meta protocol ip: This rule should not have failed. +any/meta.t: ERROR: line 27: add rule netdev test-netdev egress meta protocol != ip: This rule should not have failed. +any/meta.t: ERROR: line 29: add rule netdev test-netdev egress meta l4proto 22: This rule should not have failed. +any/meta.t: ERROR: line 30: add rule netdev test-netdev egress meta l4proto != 233: This rule should not have failed. +any/meta.t: ERROR: line 31: add rule netdev test-netdev egress meta l4proto 33-45: This rule should not have failed. +any/meta.t: ERROR: line 32: add rule netdev test-netdev egress meta l4proto != 33-45: This rule should not have failed. +any/meta.t: ERROR: line 33: add rule netdev test-netdev egress meta l4proto { 33, 55, 67, 88}: This rule should not have failed. +any/meta.t: ERROR: line 34: add rule netdev test-netdev egress meta l4proto != { 33, 55, 67, 88}: This rule should not have failed. +any/meta.t: ERROR: line 35: add rule netdev test-netdev egress meta l4proto { 33-55, 66-88}: This rule should not have failed. +any/meta.t: ERROR: line 36: add rule netdev test-netdev egress meta l4proto != { 33-55, 66-88}: This rule should not have failed. +any/meta.t: ERROR: line 38: add rule netdev test-netdev egress meta priority root: This rule should not have failed. +any/meta.t: ERROR: line 39: add rule netdev test-netdev egress meta priority none: This rule should not have failed. +any/meta.t: ERROR: line 40: add rule netdev test-netdev egress meta priority 0x87654321: This rule should not have failed. +any/meta.t: ERROR: line 41: add rule netdev test-netdev egress meta priority 2271560481: This rule should not have failed. +any/meta.t: ERROR: line 42: add rule netdev test-netdev egress meta priority 1:1234: This rule should not have failed. +any/meta.t: ERROR: line 43: add rule netdev test-netdev egress meta priority bcad:dadc: This rule should not have failed. +any/meta.t: ERROR: line 44: add rule netdev test-netdev egress meta priority aabb:0: This rule should not have failed. +any/meta.t: ERROR: line 45: add rule netdev test-netdev egress meta priority != bcad:dadc: This rule should not have failed. +any/meta.t: ERROR: line 46: add rule netdev test-netdev egress meta priority != aabb:0: This rule should not have failed. +any/meta.t: ERROR: line 47: add rule netdev test-netdev egress meta priority bcad:dada-bcad:dadc: This rule should not have failed. +any/meta.t: ERROR: line 48: add rule netdev test-netdev egress meta priority != bcad:dada-bcad:dadc: This rule should not have failed. +any/meta.t: ERROR: line 49: add rule netdev test-netdev egress meta priority {bcad:dada, bcad:dadc, aaaa:bbbb}: This rule should not have failed. +any/meta.t: ERROR: line 50: add rule netdev test-netdev egress meta priority set cafe:beef: This rule should not have failed. +any/meta.t: ERROR: line 51: add rule netdev test-netdev egress meta priority != {bcad:dada, bcad:dadc, aaaa:bbbb}: This rule should not have failed. +any/meta.t: ERROR: line 53: add rule netdev test-netdev egress meta mark 0x4: This rule should not have failed. +any/meta.t: ERROR: line 54: add rule netdev test-netdev egress meta mark 0x32: This rule should not have failed. +any/meta.t: ERROR: line 55: add rule netdev test-netdev egress meta mark and 0x03 == 0x01: This rule should not have failed. +any/meta.t: ERROR: line 56: add rule netdev test-netdev egress meta mark and 0x03 != 0x01: This rule should not have failed. +any/meta.t: ERROR: line 57: add rule netdev test-netdev egress meta mark 0x10: This rule should not have failed. +any/meta.t: ERROR: line 58: add rule netdev test-netdev egress meta mark != 0x10: This rule should not have failed. +any/meta.t: ERROR: line 59: add rule netdev test-netdev egress meta mark 0xffffff00/24: This rule should not have failed. +any/meta.t: ERROR: line 61: add rule netdev test-netdev egress meta mark or 0x03 == 0x01: This rule should not have failed. +any/meta.t: ERROR: line 62: add rule netdev test-netdev egress meta mark or 0x03 != 0x01: This rule should not have failed. +any/meta.t: ERROR: line 63: add rule netdev test-netdev egress meta mark xor 0x03 == 0x01: This rule should not have failed. +any/meta.t: ERROR: line 64: add rule netdev test-netdev egress meta mark xor 0x03 != 0x01: This rule should not have failed. +any/meta.t: ERROR: line 66: add rule netdev test-netdev egress meta iif "lo" accept: This rule should not have failed. +any/meta.t: ERROR: line 67: add rule netdev test-netdev egress meta iif != "lo" accept: This rule should not have failed. +any/meta.t: ERROR: line 69: add rule netdev test-netdev egress meta iifname "dummy0": This rule should not have failed. +any/meta.t: ERROR: line 70: add rule netdev test-netdev egress meta iifname != "dummy0": This rule should not have failed. +any/meta.t: ERROR: line 71: add rule netdev test-netdev egress meta iifname {"dummy0", "lo"}: This rule should not have failed. +any/meta.t: ERROR: line 72: add rule netdev test-netdev egress meta iifname != {"dummy0", "lo"}: This rule should not have failed. +any/meta.t: ERROR: line 73: add rule netdev test-netdev egress meta iifname "dummy*": This rule should not have failed. +any/meta.t: ERROR: line 74: add rule netdev test-netdev egress meta iifname "dummy\*": This rule should not have failed. +any/meta.t: ERROR: line 77: add rule netdev test-netdev egress meta iiftype {ether, ppp, ipip, ipip6, loopback, sit, ipgre}: This rule should not have failed. +any/meta.t: ERROR: line 78: add rule netdev test-netdev egress meta iiftype != {ether, ppp, ipip, ipip6, loopback, sit, ipgre}: This rule should not have failed. +any/meta.t: ERROR: line 79: add rule netdev test-netdev egress meta iiftype != ether: This rule should not have failed. +any/meta.t: ERROR: line 80: add rule netdev test-netdev egress meta iiftype ether: This rule should not have failed. +any/meta.t: ERROR: line 81: add rule netdev test-netdev egress meta iiftype != ppp: This rule should not have failed. +any/meta.t: ERROR: line 82: add rule netdev test-netdev egress meta iiftype ppp: This rule should not have failed. +any/meta.t: ERROR: line 84: add rule netdev test-netdev egress meta oif "lo" accept: This rule should not have failed. +any/meta.t: ERROR: line 85: add rule netdev test-netdev egress meta oif != "lo" accept: This rule should not have failed. +any/meta.t: ERROR: line 87: add rule netdev test-netdev egress meta oifname "dummy0": This rule should not have failed. +any/meta.t: ERROR: line 88: add rule netdev test-netdev egress meta oifname != "dummy0": This rule should not have failed. +any/meta.t: ERROR: line 89: add rule netdev test-netdev egress meta oifname { "dummy0", "lo"}: This rule should not have failed. +any/meta.t: ERROR: line 90: add rule netdev test-netdev egress meta oifname "dummy*": This rule should not have failed. +any/meta.t: ERROR: line 91: add rule netdev test-netdev egress meta oifname "dummy\*": This rule should not have failed. +any/meta.t: ERROR: line 94: add rule netdev test-netdev egress meta oiftype {ether, ppp, ipip, ipip6, loopback, sit, ipgre}: This rule should not have failed. +any/meta.t: ERROR: line 95: add rule netdev test-netdev egress meta oiftype != {ether, ppp, ipip, ipip6, loopback, sit, ipgre}: This rule should not have failed. +any/meta.t: ERROR: line 96: add rule netdev test-netdev egress meta oiftype != ether: This rule should not have failed. +any/meta.t: ERROR: line 97: add rule netdev test-netdev egress meta oiftype ether: This rule should not have failed. +any/meta.t: ERROR: line 99: add rule netdev test-netdev egress meta skuid {"bin", "root", "daemon"} accept: This rule should not have failed. +any/meta.t: ERROR: line 100: add rule netdev test-netdev egress meta skuid != {"bin", "root", "daemon"} accept: This rule should not have failed. +any/meta.t: ERROR: line 101: add rule netdev test-netdev egress meta skuid "root": This rule should not have failed. +any/meta.t: ERROR: line 102: add rule netdev test-netdev egress meta skuid != "root": This rule should not have failed. +any/meta.t: ERROR: line 103: add rule netdev test-netdev egress meta skuid lt 3000 accept: This rule should not have failed. +any/meta.t: ERROR: line 104: add rule netdev test-netdev egress meta skuid gt 3000 accept: This rule should not have failed. +any/meta.t: ERROR: line 105: add rule netdev test-netdev egress meta skuid eq 3000 accept: This rule should not have failed. +any/meta.t: ERROR: line 106: add rule netdev test-netdev egress meta skuid 3001-3005 accept: This rule should not have failed. +any/meta.t: ERROR: line 107: add rule netdev test-netdev egress meta skuid != 2001-2005 accept: This rule should not have failed. +any/meta.t: ERROR: line 108: add rule netdev test-netdev egress meta skuid { 2001-2005, 3001-3005} accept: This rule should not have failed. +any/meta.t: ERROR: line 109: add rule netdev test-netdev egress meta skuid != { 2001-2005, 3001-3005} accept: This rule should not have failed. +any/meta.t: ERROR: line 111: add rule netdev test-netdev egress meta skgid {"bin", "root", "daemon"} accept: This rule should not have failed. +any/meta.t: ERROR: line 112: add rule netdev test-netdev egress meta skgid != {"bin", "root", "daemon"} accept: This rule should not have failed. +any/meta.t: ERROR: line 113: add rule netdev test-netdev egress meta skgid "root": This rule should not have failed. +any/meta.t: ERROR: line 114: add rule netdev test-netdev egress meta skgid != "root": This rule should not have failed. +any/meta.t: ERROR: line 115: add rule netdev test-netdev egress meta skgid lt 3000 accept: This rule should not have failed. +any/meta.t: ERROR: line 116: add rule netdev test-netdev egress meta skgid gt 3000 accept: This rule should not have failed. +any/meta.t: ERROR: line 117: add rule netdev test-netdev egress meta skgid eq 3000 accept: This rule should not have failed. +any/meta.t: ERROR: line 118: add rule netdev test-netdev egress meta skgid 2001-2005 accept: This rule should not have failed. +any/meta.t: ERROR: line 119: add rule netdev test-netdev egress meta skgid != 2001-2005 accept: This rule should not have failed. +any/meta.t: ERROR: line 131: add rule netdev test-netdev egress meta mark set 0xffffffc8 xor 0x16: This rule should not have failed. +any/meta.t: ERROR: line 132: add rule netdev test-netdev egress meta mark set 0x16 and 0x16: This rule should not have failed. +any/meta.t: ERROR: line 133: add rule netdev test-netdev egress meta mark set 0xffffffe9 or 0x16: This rule should not have failed. +any/meta.t: ERROR: line 134: add rule netdev test-netdev egress meta mark set 0xffffffde and 0x16: This rule should not have failed. +any/meta.t: ERROR: line 135: add rule netdev test-netdev egress meta mark set 0xf045ffde or 0x10: This rule should not have failed. +any/meta.t: ERROR: line 136: add rule netdev test-netdev egress meta mark set 0xffffffde or 0x16: This rule should not have failed. +any/meta.t: ERROR: line 137: add rule netdev test-netdev egress meta mark set 0x32 or 0xfffff: This rule should not have failed. +any/meta.t: ERROR: line 138: add rule netdev test-netdev egress meta mark set 0xfffe xor 0x16: This rule should not have failed. +any/meta.t: ERROR: line 143: add rule netdev test-netdev egress meta iif "lo": This rule should not have failed. +any/meta.t: ERROR: line 144: add rule netdev test-netdev egress meta oif "lo": This rule should not have failed. +any/meta.t: ERROR: line 145: add rule netdev test-netdev egress meta oifname "dummy2" accept: This rule should not have failed. +any/meta.t: ERROR: line 146: add rule netdev test-netdev egress meta skuid 3000: This rule should not have failed. +any/meta.t: ERROR: line 147: add rule netdev test-netdev egress meta skgid 3000: This rule should not have failed. +any/meta.t: ERROR: line 151: add rule netdev test-netdev egress meta rtclassid "cosmos": This rule should not have failed. +any/meta.t: ERROR: line 153: add rule netdev test-netdev egress meta pkttype broadcast: This rule should not have failed. +any/meta.t: ERROR: line 154: add rule netdev test-netdev egress meta pkttype host: This rule should not have failed. +any/meta.t: ERROR: line 155: add rule netdev test-netdev egress meta pkttype multicast: This rule should not have failed. +any/meta.t: ERROR: line 156: add rule netdev test-netdev egress meta pkttype != broadcast: This rule should not have failed. +any/meta.t: ERROR: line 157: add rule netdev test-netdev egress meta pkttype != host: This rule should not have failed. +any/meta.t: ERROR: line 158: add rule netdev test-netdev egress meta pkttype != multicast: This rule should not have failed. +any/meta.t: ERROR: line 160: add rule netdev test-netdev egress pkttype { broadcast, multicast} accept: This rule should not have failed. +any/meta.t: ERROR: line 162: add rule netdev test-netdev egress meta cpu 1: This rule should not have failed. +any/meta.t: ERROR: line 163: add rule netdev test-netdev egress meta cpu != 1: This rule should not have failed. +any/meta.t: ERROR: line 164: add rule netdev test-netdev egress meta cpu 1-3: This rule should not have failed. +any/meta.t: ERROR: line 165: add rule netdev test-netdev egress meta cpu != 1-2: This rule should not have failed. +any/meta.t: ERROR: line 166: add rule netdev test-netdev egress meta cpu { 2,3}: This rule should not have failed. +any/meta.t: ERROR: line 167: add rule netdev test-netdev egress meta cpu { 2-3, 5-7}: This rule should not have failed. +any/meta.t: ERROR: line 168: add rule netdev test-netdev egress meta cpu != { 2,3}: This rule should not have failed. +any/meta.t: ERROR: line 170: add rule netdev test-netdev egress meta iifgroup 0: This rule should not have failed. +any/meta.t: ERROR: line 171: add rule netdev test-netdev egress meta iifgroup != 0: This rule should not have failed. +any/meta.t: ERROR: line 172: add rule netdev test-netdev egress meta iifgroup "default": This rule should not have failed. +any/meta.t: ERROR: line 173: add rule netdev test-netdev egress meta iifgroup != "default": This rule should not have failed. +any/meta.t: ERROR: line 174: add rule netdev test-netdev egress meta iifgroup {"default", 11}: This rule should not have failed. +any/meta.t: ERROR: line 175: add rule netdev test-netdev egress meta iifgroup != {"default", 11}: This rule should not have failed. +any/meta.t: ERROR: line 176: add rule netdev test-netdev egress meta iifgroup { 11,33}: This rule should not have failed. +any/meta.t: ERROR: line 177: add rule netdev test-netdev egress meta iifgroup {11-33, 44-55}: This rule should not have failed. +any/meta.t: ERROR: line 178: add rule netdev test-netdev egress meta iifgroup != { 11,33}: This rule should not have failed. +any/meta.t: ERROR: line 179: add rule netdev test-netdev egress meta iifgroup != {11-33, 44-55}: This rule should not have failed. +any/meta.t: ERROR: line 180: add rule netdev test-netdev egress meta oifgroup 0: This rule should not have failed. +any/meta.t: ERROR: line 181: add rule netdev test-netdev egress meta oifgroup != 0: This rule should not have failed. +any/meta.t: ERROR: line 182: add rule netdev test-netdev egress meta oifgroup "default": This rule should not have failed. +any/meta.t: ERROR: line 183: add rule netdev test-netdev egress meta oifgroup != "default": This rule should not have failed. +any/meta.t: ERROR: line 184: add rule netdev test-netdev egress meta oifgroup {"default", 11}: This rule should not have failed. +any/meta.t: ERROR: line 185: add rule netdev test-netdev egress meta oifgroup != {"default", 11}: This rule should not have failed. +any/meta.t: ERROR: line 186: add rule netdev test-netdev egress meta oifgroup { 11,33}: This rule should not have failed. +any/meta.t: ERROR: line 187: add rule netdev test-netdev egress meta oifgroup {11-33, 44-55}: This rule should not have failed. +any/meta.t: ERROR: line 188: add rule netdev test-netdev egress meta oifgroup != { 11,33}: This rule should not have failed. +any/meta.t: ERROR: line 189: add rule netdev test-netdev egress meta oifgroup != {11-33, 44-55}: This rule should not have failed. +any/meta.t: ERROR: line 191: add rule netdev test-netdev egress meta cgroup 1048577: This rule should not have failed. +any/meta.t: ERROR: line 192: add rule netdev test-netdev egress meta cgroup != 1048577: This rule should not have failed. +any/meta.t: ERROR: line 193: add rule netdev test-netdev egress meta cgroup { 1048577, 1048578 }: This rule should not have failed. +any/meta.t: ERROR: line 194: add rule netdev test-netdev egress meta cgroup != { 1048577, 1048578}: This rule should not have failed. +any/meta.t: ERROR: line 195: add rule netdev test-netdev egress meta cgroup 1048577-1048578: This rule should not have failed. +any/meta.t: ERROR: line 196: add rule netdev test-netdev egress meta cgroup != 1048577-1048578: This rule should not have failed. +any/meta.t: ERROR: line 198: add rule netdev test-netdev egress meta iif . meta oif { "lo" . "lo" }: This rule should not have failed. +any/meta.t: ERROR: line 199: add rule netdev test-netdev egress meta iif . meta oif . meta mark { "lo" . "lo" . 0x0000000a }: This rule should not have failed. +any/meta.t: ERROR: line 200: add rule netdev test-netdev egress meta iif . meta oif vmap { "lo" . "lo" : drop }: This rule should not have failed. +any/meta.t: ERROR: line 202: add rule netdev test-netdev egress meta random eq 1: This rule should not have failed. +any/meta.t: ERROR: line 203: add rule netdev test-netdev egress meta random gt 1000000: This rule should not have failed. +any/meta.t: ERROR: line 205: add rule ip test-ip4 input meta time "1970-05-23 21:07:14" drop: This rule should not have failed. +any/meta.t: ERROR: line 206: add rule ip test-ip4 input meta time 12341234 drop: This rule should not have failed. +any/meta.t: ERROR: line 207: add rule ip test-ip4 input meta time "2019-06-21 17:00:00" drop: This rule should not have failed. +any/meta.t: ERROR: line 208: add rule ip test-ip4 input meta time "2019-07-01 00:00:00" drop: This rule should not have failed. +any/meta.t: ERROR: line 209: add rule ip test-ip4 input meta time "2019-07-01 00:01:00" drop: This rule should not have failed. +any/meta.t: ERROR: line 210: add rule ip test-ip4 input meta time "2019-07-01 00:00:01" drop: This rule should not have failed. +any/meta.t: ERROR: line 211: add rule ip test-ip4 input meta time < "2022-07-01 11:00:00" accept: This rule should not have failed. +any/meta.t: ERROR: line 212: add rule ip test-ip4 input meta time > "2022-07-01 11:00:00" accept: This rule should not have failed. +any/meta.t: ERROR: line 213: add rule ip test-ip4 input meta day "Saturday" drop: This rule should not have failed. +any/meta.t: ERROR: line 214: add rule ip test-ip4 input meta day 6 drop: This rule should not have failed. +any/meta.t: ERROR: line 216: add rule ip test-ip4 input meta hour "17:00" drop: This rule should not have failed. +any/meta.t: ERROR: line 217: add rule ip test-ip4 input meta hour "17:00:00" drop: This rule should not have failed. +any/meta.t: ERROR: line 218: add rule ip test-ip4 input meta hour "17:00:01" drop: This rule should not have failed. +any/meta.t: ERROR: line 219: add rule ip test-ip4 input meta hour "00:00" drop: This rule should not have failed. +any/meta.t: ERROR: line 220: add rule ip test-ip4 input meta hour "00:01" drop: This rule should not have failed. +any/meta.t: ERROR: line 221: add rule ip test-ip4 input time < "2022-07-01 11:00:00" accept: This rule should not have failed. +any/meta.t: ERROR: line 222: add rule ip test-ip4 input time > "2022-07-01 11:00:00" accept: This rule should not have failed. +any/meta.t: ERROR: line 226: The chain egress does not exist in netdev test-netdev. I cannot delete it. +any/limit.t: ERROR: line 3: I cannot create the chain 'egress' +any/limit.t: ERROR: line 12: add rule netdev test-netdev egress limit rate 400/minute: This rule should not have failed. +any/limit.t: ERROR: line 13: add rule netdev test-netdev egress limit rate 20/second: This rule should not have failed. +any/limit.t: ERROR: line 14: add rule netdev test-netdev egress limit rate 400/hour: This rule should not have failed. +any/limit.t: ERROR: line 15: add rule netdev test-netdev egress limit rate 40/day: This rule should not have failed. +any/limit.t: ERROR: line 16: add rule netdev test-netdev egress limit rate 400/week: This rule should not have failed. +any/limit.t: ERROR: line 17: add rule netdev test-netdev egress limit rate 1023/second burst 10 packets: This rule should not have failed. +any/limit.t: ERROR: line 20: add rule netdev test-netdev egress limit rate 1 kbytes/second: This rule should not have failed. +any/limit.t: ERROR: line 21: add rule netdev test-netdev egress limit rate 2 kbytes/second: This rule should not have failed. +any/limit.t: ERROR: line 22: add rule netdev test-netdev egress limit rate 1025 kbytes/second: This rule should not have failed. +any/limit.t: ERROR: line 23: add rule netdev test-netdev egress limit rate 1023 mbytes/second: This rule should not have failed. +any/limit.t: ERROR: line 24: add rule netdev test-netdev egress limit rate 10230 mbytes/second: This rule should not have failed. +any/limit.t: ERROR: line 25: add rule netdev test-netdev egress limit rate 1023000 mbytes/second: This rule should not have failed. +any/limit.t: ERROR: line 28: add rule netdev test-netdev egress limit rate 1 bytes / second: This rule should not have failed. +any/limit.t: ERROR: line 29: add rule netdev test-netdev egress limit rate 1 kbytes / second: This rule should not have failed. +any/limit.t: ERROR: line 30: add rule netdev test-netdev egress limit rate 1 mbytes / second: This rule should not have failed. +any/limit.t: ERROR: line 33: add rule netdev test-netdev egress limit rate 1025 bytes/second burst 512 bytes: This rule should not have failed. +any/limit.t: ERROR: line 34: add rule netdev test-netdev egress limit rate 1025 kbytes/second burst 1023 kbytes: This rule should not have failed. +any/limit.t: ERROR: line 35: add rule netdev test-netdev egress limit rate 1025 mbytes/second burst 1025 kbytes: This rule should not have failed. +any/limit.t: ERROR: line 36: add rule netdev test-netdev egress limit rate 1025000 mbytes/second burst 1023 mbytes: This rule should not have failed. +any/limit.t: ERROR: line 38: add rule netdev test-netdev egress limit rate over 400/minute: This rule should not have failed. +any/limit.t: ERROR: line 39: add rule netdev test-netdev egress limit rate over 20/second: This rule should not have failed. +any/limit.t: ERROR: line 40: add rule netdev test-netdev egress limit rate over 400/hour: This rule should not have failed. +any/limit.t: ERROR: line 41: add rule netdev test-netdev egress limit rate over 40/day: This rule should not have failed. +any/limit.t: ERROR: line 42: add rule netdev test-netdev egress limit rate over 400/week: This rule should not have failed. +any/limit.t: ERROR: line 43: add rule netdev test-netdev egress limit rate over 1023/second burst 10 packets: This rule should not have failed. +any/limit.t: ERROR: line 45: add rule netdev test-netdev egress limit rate over 1 kbytes/second: This rule should not have failed. +any/limit.t: ERROR: line 46: add rule netdev test-netdev egress limit rate over 2 kbytes/second: This rule should not have failed. +any/limit.t: ERROR: line 47: add rule netdev test-netdev egress limit rate over 1025 kbytes/second: This rule should not have failed. +any/limit.t: ERROR: line 48: add rule netdev test-netdev egress limit rate over 1023 mbytes/second: This rule should not have failed. +any/limit.t: ERROR: line 49: add rule netdev test-netdev egress limit rate over 10230 mbytes/second: This rule should not have failed. +any/limit.t: ERROR: line 50: add rule netdev test-netdev egress limit rate over 1023000 mbytes/second: This rule should not have failed. +any/limit.t: ERROR: line 52: add rule netdev test-netdev egress limit rate over 1025 bytes/second burst 512 bytes: This rule should not have failed. +any/limit.t: ERROR: line 53: add rule netdev test-netdev egress limit rate over 1025 kbytes/second burst 1023 kbytes: This rule should not have failed. +any/limit.t: ERROR: line 54: add rule netdev test-netdev egress limit rate over 1025 mbytes/second burst 1025 kbytes: This rule should not have failed. +any/limit.t: ERROR: line 55: add rule netdev test-netdev egress limit rate over 1025000 mbytes/second burst 1023 mbytes: This rule should not have failed. +any/limit.t: ERROR: line 55: The chain egress does not exist in netdev test-netdev. I cannot delete it. +any/ct.t: ERROR: line 62: add rule ip test-ip4 output ct mark set (meta mark | 0x10) << 8: This rule should not have failed. +any/ct.t: ERROR: line 133: add rule ip test-ip4 output ct id 12345: This rule should not have failed. +arp/arp.t: ERROR: line 4: I cannot create the chain 'egress' +arp/arp.t: ERROR: line 9: add rule netdev test-netdev egress arp htype 1: This rule should not have failed. +arp/arp.t: ERROR: line 10: add rule netdev test-netdev egress arp htype != 1: This rule should not have failed. +arp/arp.t: ERROR: line 11: add rule netdev test-netdev egress arp htype 22: This rule should not have failed. +arp/arp.t: ERROR: line 12: add rule netdev test-netdev egress arp htype != 233: This rule should not have failed. +arp/arp.t: ERROR: line 13: add rule netdev test-netdev egress arp htype 33-45: This rule should not have failed. +arp/arp.t: ERROR: line 14: add rule netdev test-netdev egress arp htype != 33-45: This rule should not have failed. +arp/arp.t: ERROR: line 15: add rule netdev test-netdev egress arp htype { 33, 55, 67, 88}: This rule should not have failed. +arp/arp.t: ERROR: line 16: add rule netdev test-netdev egress arp htype != { 33, 55, 67, 88}: This rule should not have failed. +arp/arp.t: ERROR: line 18: add rule netdev test-netdev egress arp ptype 0x0800: This rule should not have failed. +arp/arp.t: ERROR: line 20: add rule netdev test-netdev egress arp hlen 22: This rule should not have failed. +arp/arp.t: ERROR: line 21: add rule netdev test-netdev egress arp hlen != 233: This rule should not have failed. +arp/arp.t: ERROR: line 22: add rule netdev test-netdev egress arp hlen 33-45: This rule should not have failed. +arp/arp.t: ERROR: line 23: add rule netdev test-netdev egress arp hlen != 33-45: This rule should not have failed. +arp/arp.t: ERROR: line 24: add rule netdev test-netdev egress arp hlen { 33, 55, 67, 88}: This rule should not have failed. +arp/arp.t: ERROR: line 25: add rule netdev test-netdev egress arp hlen != { 33, 55, 67, 88}: This rule should not have failed. +arp/arp.t: ERROR: line 27: add rule netdev test-netdev egress arp plen 22: This rule should not have failed. +arp/arp.t: ERROR: line 28: add rule netdev test-netdev egress arp plen != 233: This rule should not have failed. +arp/arp.t: ERROR: line 29: add rule netdev test-netdev egress arp plen 33-45: This rule should not have failed. +arp/arp.t: ERROR: line 30: add rule netdev test-netdev egress arp plen != 33-45: This rule should not have failed. +arp/arp.t: ERROR: line 31: add rule netdev test-netdev egress arp plen { 33, 55, 67, 88}: This rule should not have failed. +arp/arp.t: ERROR: line 32: add rule netdev test-netdev egress arp plen != { 33, 55, 67, 88}: This rule should not have failed. +arp/arp.t: ERROR: line 34: add rule netdev test-netdev egress arp operation {nak, inreply, inrequest, rreply, rrequest, reply, request}: This rule should not have failed. +arp/arp.t: ERROR: line 35: add rule netdev test-netdev egress arp operation != {nak, inreply, inrequest, rreply, rrequest, reply, request}: This rule should not have failed. +arp/arp.t: ERROR: line 36: add rule netdev test-netdev egress arp operation 1-2: This rule should not have failed. +arp/arp.t: ERROR: line 37: add rule netdev test-netdev egress arp operation request: This rule should not have failed. +arp/arp.t: ERROR: line 38: add rule netdev test-netdev egress arp operation reply: This rule should not have failed. +arp/arp.t: ERROR: line 39: add rule netdev test-netdev egress arp operation rrequest: This rule should not have failed. +arp/arp.t: ERROR: line 40: add rule netdev test-netdev egress arp operation rreply: This rule should not have failed. +arp/arp.t: ERROR: line 41: add rule netdev test-netdev egress arp operation inrequest: This rule should not have failed. +arp/arp.t: ERROR: line 42: add rule netdev test-netdev egress arp operation inreply: This rule should not have failed. +arp/arp.t: ERROR: line 43: add rule netdev test-netdev egress arp operation nak: This rule should not have failed. +arp/arp.t: ERROR: line 44: add rule netdev test-netdev egress arp operation != request: This rule should not have failed. +arp/arp.t: ERROR: line 45: add rule netdev test-netdev egress arp operation != reply: This rule should not have failed. +arp/arp.t: ERROR: line 46: add rule netdev test-netdev egress arp operation != rrequest: This rule should not have failed. +arp/arp.t: ERROR: line 47: add rule netdev test-netdev egress arp operation != rreply: This rule should not have failed. +arp/arp.t: ERROR: line 48: add rule netdev test-netdev egress arp operation != inrequest: This rule should not have failed. +arp/arp.t: ERROR: line 49: add rule netdev test-netdev egress arp operation != inreply: This rule should not have failed. +arp/arp.t: ERROR: line 50: add rule netdev test-netdev egress arp operation != nak: This rule should not have failed. +arp/arp.t: ERROR: line 52: add rule netdev test-netdev egress arp saddr ip 1.2.3.4: This rule should not have failed. +arp/arp.t: ERROR: line 53: add rule netdev test-netdev egress arp daddr ip 4.3.2.1: This rule should not have failed. +arp/arp.t: ERROR: line 54: add rule netdev test-netdev egress arp saddr ether aa:bb:cc:aa:bb:cc: This rule should not have failed. +arp/arp.t: ERROR: line 55: add rule netdev test-netdev egress arp daddr ether aa:bb:cc:aa:bb:cc: This rule should not have failed. +arp/arp.t: ERROR: line 57: add rule netdev test-netdev egress arp saddr ip 192.168.1.1 arp daddr ether fe:ed:00:c0:ff:ee: This rule should not have failed. +arp/arp.t: ERROR: line 58: add rule netdev test-netdev egress arp daddr ether fe:ed:00:c0:ff:ee arp saddr ip 192.168.1.1: This rule should not have failed. +arp/arp.t: ERROR: line 60: add rule netdev test-netdev egress meta iifname "invalid" arp ptype 0x0800 arp htype 1 arp hlen 6 arp plen 4 @nh,192,32 0xc0a88f10 @nh,144,48 set 0x112233445566: This rule should not have failed. +arp/arp.t: ERROR: line 60: The chain egress does not exist in netdev test-netdev. I cannot delete it. +bridge/vlan.t: ERROR: line 3: I cannot create the chain 'egress' +bridge/vlan.t: ERROR: line 8: add rule netdev test-netdev egress vlan id 4094: This rule should not have failed. +bridge/vlan.t: ERROR: line 9: add rule netdev test-netdev egress vlan id 0: This rule should not have failed. +bridge/vlan.t: ERROR: line 12: add rule netdev test-netdev egress vlan id 4094 vlan dei 0: This rule should not have failed. +bridge/vlan.t: ERROR: line 13: add rule netdev test-netdev egress vlan id 4094 vlan dei 1: This rule should not have failed. +bridge/vlan.t: ERROR: line 14: add rule netdev test-netdev egress vlan id 4094 vlan dei != 1: This rule should not have failed. +bridge/vlan.t: ERROR: line 15: add rule netdev test-netdev egress vlan id 4094 vlan cfi 1: This rule should not have failed. +bridge/vlan.t: ERROR: line 19: add rule netdev test-netdev egress vlan id 4094 vlan dei 1 vlan pcp 7: This rule should not have failed. +bridge/vlan.t: ERROR: line 20: add rule netdev test-netdev egress vlan id 4094 vlan dei 1 vlan pcp 3: This rule should not have failed. +bridge/vlan.t: ERROR: line 22: add rule netdev test-netdev egress ether type vlan vlan id 4094: This rule should not have failed. +bridge/vlan.t: ERROR: line 23: add rule netdev test-netdev egress ether type vlan vlan id 0: This rule should not have failed. +bridge/vlan.t: ERROR: line 24: add rule netdev test-netdev egress ether type vlan vlan id 4094 vlan dei 0: This rule should not have failed. +bridge/vlan.t: ERROR: line 25: add rule netdev test-netdev egress ether type vlan vlan id 4094 vlan dei 1: This rule should not have failed. +bridge/vlan.t: ERROR: line 28: add rule netdev test-netdev egress vlan id 4094 tcp dport 22: This rule should not have failed. +bridge/vlan.t: ERROR: line 29: add rule netdev test-netdev egress vlan id 1 ip saddr 10.0.0.1: This rule should not have failed. +bridge/vlan.t: ERROR: line 30: add rule netdev test-netdev egress vlan id 1 ip saddr 10.0.0.0/23: This rule should not have failed. +bridge/vlan.t: ERROR: line 31: add rule netdev test-netdev egress vlan id 1 ip saddr 10.0.0.0/23 udp dport 53: This rule should not have failed. +bridge/vlan.t: ERROR: line 32: add rule netdev test-netdev egress ether type vlan vlan id 1 ip saddr 10.0.0.0/23 udp dport 53: This rule should not have failed. +bridge/vlan.t: ERROR: line 34: add rule netdev test-netdev egress vlan id { 1, 2, 4, 100, 4095 } vlan pcp 1-3: This rule should not have failed. +bridge/vlan.t: ERROR: line 37: add rule netdev test-netdev egress ether type vlan ip protocol 1 accept: This rule should not have failed. +bridge/vlan.t: ERROR: line 40: add rule netdev test-netdev egress ether type 8021ad vlan id 1 ip protocol 6 accept: This rule should not have failed. +bridge/vlan.t: ERROR: line 41: add rule netdev test-netdev egress ether type 8021ad vlan id 1 vlan type 8021q vlan id 2 vlan type ip counter: This rule should not have failed. +bridge/vlan.t: ERROR: line 42: add rule netdev test-netdev egress ether type 8021ad vlan id 1 vlan type 8021q vlan id 2 vlan type ip ip protocol 6: This rule should not have failed. +bridge/vlan.t: ERROR: line 49: add rule netdev test-netdev egress vlan id 1 vlan id set 2: This rule should not have failed. +bridge/vlan.t: ERROR: line 51: add rule netdev test-netdev egress ether saddr 00:01:02:03:04:05 vlan id 1: This rule should not have failed. +bridge/vlan.t: ERROR: line 52: add rule netdev test-netdev egress vlan id 2 ether saddr 0:1:2:3:4:6: This rule should not have failed. +bridge/vlan.t: ERROR: line 54: add rule netdev test-netdev egress ether saddr . vlan id { 0a:0b:0c:0d:0e:0f . 42, 0a:0b:0c:0d:0e:0f . 4095 }: This rule should not have failed. +bridge/vlan.t: ERROR: line 54: The chain egress does not exist in netdev test-netdev. I cannot delete it. +bridge/meta.t: ERROR: line 7: add rule bridge test-bridge input meta ibrvproto vlan: This rule should not have failed. +bridge/meta.t: ERROR: line 8: add rule bridge test-bridge input meta ibrpvid 100: This rule should not have failed. +inet/dccp.t: ERROR: line 3: I cannot create the chain 'egress' +inet/dccp.t: ERROR: line 10: add rule netdev test-netdev egress dccp sport 21-35: This rule should not have failed. +inet/dccp.t: ERROR: line 11: add rule netdev test-netdev egress dccp sport != 21-35: This rule should not have failed. +inet/dccp.t: ERROR: line 12: add rule netdev test-netdev egress dccp sport {23, 24, 25}: This rule should not have failed. +inet/dccp.t: ERROR: line 13: add rule netdev test-netdev egress dccp sport != {23, 24, 25}: This rule should not have failed. +inet/dccp.t: ERROR: line 15: add rule netdev test-netdev egress dccp sport 20-50: This rule should not have failed. +inet/dccp.t: ERROR: line 19: add rule netdev test-netdev egress dccp dport {23, 24, 25}: This rule should not have failed. +inet/dccp.t: ERROR: line 20: add rule netdev test-netdev egress dccp dport != {23, 24, 25}: This rule should not have failed. +inet/dccp.t: ERROR: line 22: add rule netdev test-netdev egress dccp type {request, response, data, ack, dataack, closereq, close, reset, sync, syncack}: This rule should not have failed. +inet/dccp.t: ERROR: line 23: add rule netdev test-netdev egress dccp type != {request, response, data, ack, dataack, closereq, close, reset, sync, syncack}: This rule should not have failed. +inet/dccp.t: ERROR: line 24: add rule netdev test-netdev egress dccp type request: This rule should not have failed. +inet/dccp.t: ERROR: line 25: add rule netdev test-netdev egress dccp type != request: This rule should not have failed. +inet/dccp.t: ERROR: line 25: The chain egress does not exist in netdev test-netdev. I cannot delete it. +inet/map.t: ERROR: line 3: I cannot create the chain 'egress' +inet/map.t: ERROR: line 9: add rule netdev test-netdev egress mark set ip saddr map { 10.2.3.2 : 0x0000002a, 10.2.3.1 : 0x00000017}: This rule should not have failed. +inet/map.t: ERROR: line 10: add rule netdev test-netdev egress mark set ip hdrlength map { 5 : 0x00000017, 4 : 0x00000001}: This rule should not have failed. +inet/map.t: ERROR: line 10: The chain egress does not exist in netdev test-netdev. I cannot delete it. +inet/synproxy.t: ERROR: line 7: add rule ip synproxyip synproxychain synproxy: This rule should not have failed. +inet/synproxy.t: ERROR: line 8: add rule ip synproxyip synproxychain synproxy mss 1460 wscale 7: This rule should not have failed. +inet/synproxy.t: ERROR: line 9: add rule ip synproxyip synproxychain synproxy mss 1460 wscale 5 timestamp sack-perm: This rule should not have failed. +inet/synproxy.t: ERROR: line 10: add rule ip synproxyip synproxychain synproxy timestamp sack-perm: This rule should not have failed. +inet/synproxy.t: ERROR: line 11: add rule ip synproxyip synproxychain synproxy timestamp: This rule should not have failed. +inet/synproxy.t: ERROR: line 12: add rule ip synproxyip synproxychain synproxy sack-perm: This rule should not have failed. +inet/sets.t: ERROR: line 3: I cannot create the chain 'egress' +inet/sets.t: ERROR: line 15: add rule netdev test-netdev egress ip saddr @set1 drop: This rule should not have failed. +inet/sets.t: ERROR: line 18: add rule netdev test-netdev egress ip6 daddr != @set2 accept: This rule should not have failed. +inet/sets.t: ERROR: line 24: add rule netdev test-netdev egress ip saddr . ip daddr . tcp dport @set3 accept: This rule should not have failed. +inet/sets.t: ERROR: line 25: add rule netdev test-netdev egress ip daddr . tcp dport { 10.0.0.0/8 . 10-23, 192.168.1.1-192.168.3.8 . 80-443 } accept: This rule should not have failed. +inet/sets.t: ERROR: line 25: The chain egress does not exist in netdev test-netdev. I cannot delete it. +inet/ip.t: ERROR: line 3: I cannot create the chain 'egress' +inet/ip.t: ERROR: line 10: add rule netdev test-netdev egress ip saddr . ip daddr . ether saddr { 1.1.1.1 . 2.2.2.2 . ca:fe:ca:fe:ca:fe }: This rule should not have failed. +inet/ip.t: ERROR: line 12: The chain egress does not exist in netdev test-netdev. I cannot delete it. +inet/udp.t: ERROR: line 3: I cannot create the chain 'egress' +inet/udp.t: ERROR: line 10: add rule netdev test-netdev egress udp sport 80 accept: This rule should not have failed. +inet/udp.t: ERROR: line 11: add rule netdev test-netdev egress udp sport != 60 accept: This rule should not have failed. +inet/udp.t: ERROR: line 12: add rule netdev test-netdev egress udp sport 50-70 accept: This rule should not have failed. +inet/udp.t: ERROR: line 13: add rule netdev test-netdev egress udp sport != 50-60 accept: This rule should not have failed. +inet/udp.t: ERROR: line 14: add rule netdev test-netdev egress udp sport { 49, 50} drop: This rule should not have failed. +inet/udp.t: ERROR: line 15: add rule netdev test-netdev egress udp sport != { 50, 60} accept: This rule should not have failed. +inet/udp.t: ERROR: line 19: add rule netdev test-netdev egress udp dport 80 accept: This rule should not have failed. +inet/udp.t: ERROR: line 20: add rule netdev test-netdev egress udp dport != 60 accept: This rule should not have failed. +inet/udp.t: ERROR: line 21: add rule netdev test-netdev egress udp dport 70-75 accept: This rule should not have failed. +inet/udp.t: ERROR: line 22: add rule netdev test-netdev egress udp dport != 50-60 accept: This rule should not have failed. +inet/udp.t: ERROR: line 23: add rule netdev test-netdev egress udp dport { 49, 50} drop: This rule should not have failed. +inet/udp.t: ERROR: line 24: add rule netdev test-netdev egress udp dport != { 50, 60} accept: This rule should not have failed. +inet/udp.t: ERROR: line 26: add rule netdev test-netdev egress udp length 6666: This rule should not have failed. +inet/udp.t: ERROR: line 27: add rule netdev test-netdev egress udp length != 6666: This rule should not have failed. +inet/udp.t: ERROR: line 28: add rule netdev test-netdev egress udp length 50-65 accept: This rule should not have failed. +inet/udp.t: ERROR: line 29: add rule netdev test-netdev egress udp length != 50-65 accept: This rule should not have failed. +inet/udp.t: ERROR: line 30: add rule netdev test-netdev egress udp length { 50, 65} accept: This rule should not have failed. +inet/udp.t: ERROR: line 31: add rule netdev test-netdev egress udp length != { 50, 65} accept: This rule should not have failed. +inet/udp.t: ERROR: line 33: add rule netdev test-netdev egress udp checksum 6666 drop: This rule should not have failed. +inet/udp.t: ERROR: line 34: add rule netdev test-netdev egress udp checksum != { 444, 555} accept: This rule should not have failed. +inet/udp.t: ERROR: line 36: add rule netdev test-netdev egress udp checksum 22: This rule should not have failed. +inet/udp.t: ERROR: line 37: add rule netdev test-netdev egress udp checksum != 233: This rule should not have failed. +inet/udp.t: ERROR: line 38: add rule netdev test-netdev egress udp checksum 33-45: This rule should not have failed. +inet/udp.t: ERROR: line 39: add rule netdev test-netdev egress udp checksum != 33-45: This rule should not have failed. +inet/udp.t: ERROR: line 40: add rule netdev test-netdev egress udp checksum { 33, 55, 67, 88}: This rule should not have failed. +inet/udp.t: ERROR: line 41: add rule netdev test-netdev egress udp checksum != { 33, 55, 67, 88}: This rule should not have failed. +inet/udp.t: ERROR: line 44: add rule netdev test-netdev egress iif "lo" udp checksum set 0: This rule should not have failed. +inet/udp.t: ERROR: line 45: add rule netdev test-netdev egress iif "lo" udp dport set 65535: This rule should not have failed. +inet/udp.t: ERROR: line 45: The chain egress does not exist in netdev test-netdev. I cannot delete it. +inet/ether.t: ERROR: line 3: I cannot create the chain 'egress' +inet/ether.t: ERROR: line 11: add rule netdev test-netdev egress tcp dport 22 iiftype ether ether saddr 00:0f:54:0c:11:4 accept: This rule should not have failed. +inet/ether.t: ERROR: line 12: add rule netdev test-netdev egress tcp dport 22 ether saddr 00:0f:54:0c:11:04 accept: This rule should not have failed. +inet/ether.t: ERROR: line 14: add rule netdev test-netdev egress ether saddr 00:0f:54:0c:11:04 accept: This rule should not have failed. +inet/ether.t: ERROR: line 14: The chain egress does not exist in netdev test-netdev. I cannot delete it. +inet/comp.t: ERROR: line 3: I cannot create the chain 'egress' +inet/comp.t: ERROR: line 12: add rule netdev test-netdev egress comp nexthdr != esp: This rule should not have failed. +inet/comp.t: ERROR: line 18: add rule netdev test-netdev egress comp flags 0x0: This rule should not have failed. +inet/comp.t: ERROR: line 19: add rule netdev test-netdev egress comp flags != 0x23: This rule should not have failed. +inet/comp.t: ERROR: line 20: add rule netdev test-netdev egress comp flags 0x33-0x45: This rule should not have failed. +inet/comp.t: ERROR: line 21: add rule netdev test-netdev egress comp flags != 0x33-0x45: This rule should not have failed. +inet/comp.t: ERROR: line 22: add rule netdev test-netdev egress comp flags {0x33, 0x55, 0x67, 0x88}: This rule should not have failed. +inet/comp.t: ERROR: line 23: add rule netdev test-netdev egress comp flags != {0x33, 0x55, 0x67, 0x88}: This rule should not have failed. +inet/comp.t: ERROR: line 25: add rule netdev test-netdev egress comp cpi 22: This rule should not have failed. +inet/comp.t: ERROR: line 26: add rule netdev test-netdev egress comp cpi != 233: This rule should not have failed. +inet/comp.t: ERROR: line 27: add rule netdev test-netdev egress comp cpi 33-45: This rule should not have failed. +inet/comp.t: ERROR: line 28: add rule netdev test-netdev egress comp cpi != 33-45: This rule should not have failed. +inet/comp.t: ERROR: line 29: add rule netdev test-netdev egress comp cpi {33, 55, 67, 88}: This rule should not have failed. +inet/comp.t: ERROR: line 30: add rule netdev test-netdev egress comp cpi != {33, 55, 67, 88}: This rule should not have failed. +inet/comp.t: ERROR: line 30: The chain egress does not exist in netdev test-netdev. I cannot delete it. +inet/udplite.t: ERROR: line 3: I cannot create the chain 'egress' +inet/udplite.t: ERROR: line 10: add rule netdev test-netdev egress udplite sport 80 accept: This rule should not have failed. +inet/udplite.t: ERROR: line 11: add rule netdev test-netdev egress udplite sport != 60 accept: This rule should not have failed. +inet/udplite.t: ERROR: line 12: add rule netdev test-netdev egress udplite sport 50-70 accept: This rule should not have failed. +inet/udplite.t: ERROR: line 13: add rule netdev test-netdev egress udplite sport != 50-60 accept: This rule should not have failed. +inet/udplite.t: ERROR: line 14: add rule netdev test-netdev egress udplite sport { 49, 50} drop: This rule should not have failed. +inet/udplite.t: ERROR: line 15: add rule netdev test-netdev egress udplite sport != { 49, 50} accept: This rule should not have failed. +inet/udplite.t: ERROR: line 17: add rule netdev test-netdev egress udplite dport 80 accept: This rule should not have failed. +inet/udplite.t: ERROR: line 18: add rule netdev test-netdev egress udplite dport != 60 accept: This rule should not have failed. +inet/udplite.t: ERROR: line 19: add rule netdev test-netdev egress udplite dport 70-75 accept: This rule should not have failed. +inet/udplite.t: ERROR: line 20: add rule netdev test-netdev egress udplite dport != 50-60 accept: This rule should not have failed. +inet/udplite.t: ERROR: line 21: add rule netdev test-netdev egress udplite dport { 49, 50} drop: This rule should not have failed. +inet/udplite.t: ERROR: line 22: add rule netdev test-netdev egress udplite dport != { 49, 50} accept: This rule should not have failed. +inet/udplite.t: ERROR: line 31: add rule netdev test-netdev egress udplite checksum 6666 drop: This rule should not have failed. +inet/udplite.t: ERROR: line 32: add rule netdev test-netdev egress udplite checksum != { 444, 555} accept: This rule should not have failed. +inet/udplite.t: ERROR: line 33: add rule netdev test-netdev egress udplite checksum 22: This rule should not have failed. +inet/udplite.t: ERROR: line 34: add rule netdev test-netdev egress udplite checksum != 233: This rule should not have failed. +inet/udplite.t: ERROR: line 35: add rule netdev test-netdev egress udplite checksum 33-45: This rule should not have failed. +inet/udplite.t: ERROR: line 36: add rule netdev test-netdev egress udplite checksum != 33-45: This rule should not have failed. +inet/udplite.t: ERROR: line 37: add rule netdev test-netdev egress udplite checksum { 33, 55, 67, 88}: This rule should not have failed. +inet/udplite.t: ERROR: line 38: add rule netdev test-netdev egress udplite checksum != { 33, 55, 67, 88}: This rule should not have failed. +inet/udplite.t: ERROR: line 38: The chain egress does not exist in netdev test-netdev. I cannot delete it. +inet/osf.t: ERROR: line 7: add rule ip osfip osfchain osf name "Linux": This rule should not have failed. +inet/osf.t: ERROR: line 8: add rule ip osfip osfchain osf ttl loose name "Linux": This rule should not have failed. +inet/osf.t: ERROR: line 9: add rule ip osfip osfchain osf ttl skip name "Linux": This rule should not have failed. +inet/osf.t: ERROR: line 10: add rule ip osfip osfchain osf ttl skip version "Linux:3.0": This rule should not have failed. +inet/osf.t: ERROR: line 15: add rule ip osfip osfchain osf name { "Windows", "MacOs" }: This rule should not have failed. +inet/osf.t: ERROR: line 16: add rule ip osfip osfchain osf version { "Windows:XP", "MacOs:Sierra" }: This rule should not have failed. +inet/osf.t: ERROR: line 17: add rule ip osfip osfchain ct mark set osf name map { "Windows" : 0x00000001, "MacOs" : 0x00000002 }: This rule should not have failed. +inet/osf.t: ERROR: line 18: add rule ip osfip osfchain ct mark set osf version map { "Windows:XP" : 0x00000003, "MacOs:Sierra" : 0x00000004 }: This rule should not have failed. +inet/tcp.t: ERROR: line 3: I cannot create the chain 'egress' +inet/tcp.t: ERROR: line 12: add rule netdev test-netdev egress tcp dport 22: This rule should not have failed. +inet/tcp.t: ERROR: line 13: add rule netdev test-netdev egress tcp dport != 233: This rule should not have failed. +inet/tcp.t: ERROR: line 14: add rule netdev test-netdev egress tcp dport 33-45: This rule should not have failed. +inet/tcp.t: ERROR: line 15: add rule netdev test-netdev egress tcp dport != 33-45: This rule should not have failed. +inet/tcp.t: ERROR: line 16: add rule netdev test-netdev egress tcp dport { 33, 55, 67, 88}: This rule should not have failed. +inet/tcp.t: ERROR: line 17: add rule netdev test-netdev egress tcp dport != { 33, 55, 67, 88}: This rule should not have failed. +inet/tcp.t: ERROR: line 18: add rule netdev test-netdev egress tcp dport {telnet, http, https} accept: This rule should not have failed. +inet/tcp.t: ERROR: line 19: add rule netdev test-netdev egress tcp dport vmap { 22 : accept, 23 : drop }: This rule should not have failed. +inet/tcp.t: ERROR: line 20: add rule netdev test-netdev egress tcp dport vmap { 25:accept, 28:drop }: This rule should not have failed. +inet/tcp.t: ERROR: line 21: add rule netdev test-netdev egress tcp dport { 22, 53, 80, 110 }: This rule should not have failed. +inet/tcp.t: ERROR: line 22: add rule netdev test-netdev egress tcp dport != { 22, 53, 80, 110 }: This rule should not have failed. +inet/tcp.t: ERROR: line 26: add rule netdev test-netdev egress tcp sport 22: This rule should not have failed. +inet/tcp.t: ERROR: line 27: add rule netdev test-netdev egress tcp sport != 233: This rule should not have failed. +inet/tcp.t: ERROR: line 28: add rule netdev test-netdev egress tcp sport 33-45: This rule should not have failed. +inet/tcp.t: ERROR: line 29: add rule netdev test-netdev egress tcp sport != 33-45: This rule should not have failed. +inet/tcp.t: ERROR: line 30: add rule netdev test-netdev egress tcp sport { 33, 55, 67, 88}: This rule should not have failed. +inet/tcp.t: ERROR: line 31: add rule netdev test-netdev egress tcp sport != { 33, 55, 67, 88}: This rule should not have failed. +inet/tcp.t: ERROR: line 32: add rule netdev test-netdev egress tcp sport vmap { 25:accept, 28:drop }: This rule should not have failed. +inet/tcp.t: ERROR: line 34: add rule netdev test-netdev egress tcp sport 8080 drop: This rule should not have failed. +inet/tcp.t: ERROR: line 35: add rule netdev test-netdev egress tcp sport 1024 tcp dport 22: This rule should not have failed. +inet/tcp.t: ERROR: line 36: add rule netdev test-netdev egress tcp sport 1024 tcp dport 22 tcp sequence 0: This rule should not have failed. +inet/tcp.t: ERROR: line 38: add rule netdev test-netdev egress tcp sequence 0 tcp sport 1024 tcp dport 22: This rule should not have failed. +inet/tcp.t: ERROR: line 39: add rule netdev test-netdev egress tcp sequence 0 tcp sport { 1024, 1022} tcp dport 22: This rule should not have failed. +inet/tcp.t: ERROR: line 41: add rule netdev test-netdev egress tcp sequence 22: This rule should not have failed. +inet/tcp.t: ERROR: line 42: add rule netdev test-netdev egress tcp sequence != 233: This rule should not have failed. +inet/tcp.t: ERROR: line 43: add rule netdev test-netdev egress tcp sequence 33-45: This rule should not have failed. +inet/tcp.t: ERROR: line 44: add rule netdev test-netdev egress tcp sequence != 33-45: This rule should not have failed. +inet/tcp.t: ERROR: line 45: add rule netdev test-netdev egress tcp sequence { 33, 55, 67, 88}: This rule should not have failed. +inet/tcp.t: ERROR: line 46: add rule netdev test-netdev egress tcp sequence != { 33, 55, 67, 88}: This rule should not have failed. +inet/tcp.t: ERROR: line 48: add rule netdev test-netdev egress tcp ackseq 42949672 drop: This rule should not have failed. +inet/tcp.t: ERROR: line 49: add rule netdev test-netdev egress tcp ackseq 22: This rule should not have failed. +inet/tcp.t: ERROR: line 50: add rule netdev test-netdev egress tcp ackseq != 233: This rule should not have failed. +inet/tcp.t: ERROR: line 51: add rule netdev test-netdev egress tcp ackseq 33-45: This rule should not have failed. +inet/tcp.t: ERROR: line 52: add rule netdev test-netdev egress tcp ackseq != 33-45: This rule should not have failed. +inet/tcp.t: ERROR: line 53: add rule netdev test-netdev egress tcp ackseq { 33, 55, 67, 88}: This rule should not have failed. +inet/tcp.t: ERROR: line 54: add rule netdev test-netdev egress tcp ackseq != { 33, 55, 67, 88}: This rule should not have failed. +inet/tcp.t: ERROR: line 66: add rule netdev test-netdev egress tcp flags { fin, syn, rst, psh, ack, urg, ecn, cwr} drop: This rule should not have failed. +inet/tcp.t: ERROR: line 67: add rule netdev test-netdev egress tcp flags != { fin, urg, ecn, cwr} drop: This rule should not have failed. +inet/tcp.t: ERROR: line 68: add rule netdev test-netdev egress tcp flags cwr: This rule should not have failed. +inet/tcp.t: ERROR: line 69: add rule netdev test-netdev egress tcp flags != cwr: This rule should not have failed. +inet/tcp.t: ERROR: line 70: add rule netdev test-netdev egress tcp flags == syn: This rule should not have failed. +inet/tcp.t: ERROR: line 71: add rule netdev test-netdev egress tcp flags fin,syn / fin,syn: This rule should not have failed. +inet/tcp.t: ERROR: line 72: add rule netdev test-netdev egress tcp flags != syn / fin,syn: This rule should not have failed. +inet/tcp.t: ERROR: line 73: add rule netdev test-netdev egress tcp flags & syn != 0: This rule should not have failed. +inet/tcp.t: ERROR: line 74: add rule netdev test-netdev egress tcp flags & syn == 0: This rule should not have failed. +inet/tcp.t: ERROR: line 75: add rule netdev test-netdev egress tcp flags & (syn | ack) != 0: This rule should not have failed. +inet/tcp.t: ERROR: line 76: add rule netdev test-netdev egress tcp flags & (syn | ack) == 0: This rule should not have failed. +inet/tcp.t: ERROR: line 78: add rule netdev test-netdev egress tcp flags & syn == syn: This rule should not have failed. +inet/tcp.t: ERROR: line 79: add rule netdev test-netdev egress tcp flags & syn != syn: This rule should not have failed. +inet/tcp.t: ERROR: line 80: add rule netdev test-netdev egress tcp flags & (fin | syn | rst | ack) syn: This rule should not have failed. +inet/tcp.t: ERROR: line 81: add rule netdev test-netdev egress tcp flags & (fin | syn | rst | ack) == syn: This rule should not have failed. +inet/tcp.t: ERROR: line 82: add rule netdev test-netdev egress tcp flags & (fin | syn | rst | ack) != syn: This rule should not have failed. +inet/tcp.t: ERROR: line 83: add rule netdev test-netdev egress tcp flags & (fin | syn | rst | ack) == (syn | ack): This rule should not have failed. +inet/tcp.t: ERROR: line 84: add rule netdev test-netdev egress tcp flags & (fin | syn | rst | ack) != (syn | ack): This rule should not have failed. +inet/tcp.t: ERROR: line 85: add rule netdev test-netdev egress tcp flags & (syn | ack) == (syn | ack): This rule should not have failed. +inet/tcp.t: ERROR: line 86: add rule netdev test-netdev egress tcp flags & (fin | syn | rst | psh | ack | urg | ecn | cwr) == fin | syn | rst | psh | ack | urg | ecn | cwr: This rule should not have failed. +inet/tcp.t: ERROR: line 87: add rule netdev test-netdev egress tcp flags { syn, syn | ack }: This rule should not have failed. +inet/tcp.t: ERROR: line 88: add rule netdev test-netdev egress tcp flags & (fin | syn | rst | psh | ack | urg) == { fin, ack, psh | ack, fin | psh | ack }: This rule should not have failed. +inet/tcp.t: ERROR: line 89: add rule netdev test-netdev egress tcp flags ! fin,rst: This rule should not have failed. +inet/tcp.t: ERROR: line 92: add rule netdev test-netdev egress tcp window 22222: This rule should not have failed. +inet/tcp.t: ERROR: line 93: add rule netdev test-netdev egress tcp window 22: This rule should not have failed. +inet/tcp.t: ERROR: line 94: add rule netdev test-netdev egress tcp window != 233: This rule should not have failed. +inet/tcp.t: ERROR: line 95: add rule netdev test-netdev egress tcp window 33-45: This rule should not have failed. +inet/tcp.t: ERROR: line 96: add rule netdev test-netdev egress tcp window != 33-45: This rule should not have failed. +inet/tcp.t: ERROR: line 97: add rule netdev test-netdev egress tcp window { 33, 55, 67, 88}: This rule should not have failed. +inet/tcp.t: ERROR: line 98: add rule netdev test-netdev egress tcp window != { 33, 55, 67, 88}: This rule should not have failed. +inet/tcp.t: ERROR: line 100: add rule netdev test-netdev egress tcp checksum 22: This rule should not have failed. +inet/tcp.t: ERROR: line 101: add rule netdev test-netdev egress tcp checksum != 233: This rule should not have failed. +inet/tcp.t: ERROR: line 102: add rule netdev test-netdev egress tcp checksum 33-45: This rule should not have failed. +inet/tcp.t: ERROR: line 103: add rule netdev test-netdev egress tcp checksum != 33-45: This rule should not have failed. +inet/tcp.t: ERROR: line 104: add rule netdev test-netdev egress tcp checksum { 33, 55, 67, 88}: This rule should not have failed. +inet/tcp.t: ERROR: line 105: add rule netdev test-netdev egress tcp checksum != { 33, 55, 67, 88}: This rule should not have failed. +inet/tcp.t: ERROR: line 107: add rule netdev test-netdev egress tcp urgptr 1234 accept: This rule should not have failed. +inet/tcp.t: ERROR: line 108: add rule netdev test-netdev egress tcp urgptr 22: This rule should not have failed. +inet/tcp.t: ERROR: line 109: add rule netdev test-netdev egress tcp urgptr != 233: This rule should not have failed. +inet/tcp.t: ERROR: line 110: add rule netdev test-netdev egress tcp urgptr 33-45: This rule should not have failed. +inet/tcp.t: ERROR: line 111: add rule netdev test-netdev egress tcp urgptr != 33-45: This rule should not have failed. +inet/tcp.t: ERROR: line 112: add rule netdev test-netdev egress tcp urgptr { 33, 55, 67, 88}: This rule should not have failed. +inet/tcp.t: ERROR: line 113: add rule netdev test-netdev egress tcp urgptr != { 33, 55, 67, 88}: This rule should not have failed. +inet/tcp.t: ERROR: line 115: add rule netdev test-netdev egress tcp doff 8: This rule should not have failed. +inet/tcp.t: ERROR: line 115: The chain egress does not exist in netdev test-netdev. I cannot delete it. +inet/ip_tcp.t: ERROR: line 3: I cannot create the chain 'egress' +inet/ip_tcp.t: ERROR: line 10: add rule netdev test-netdev egress ip protocol tcp tcp dport 22: This rule should not have failed. +inet/ip_tcp.t: ERROR: line 13: add rule netdev test-netdev egress ip protocol tcp ip saddr 1.2.3.4 tcp dport 22: This rule should not have failed. +inet/ip_tcp.t: ERROR: line 16: add rule netdev test-netdev egress ip protocol tcp counter ip saddr 1.2.3.4 tcp dport 22: This rule should not have failed. +inet/ip_tcp.t: ERROR: line 19: add rule netdev test-netdev egress ip protocol tcp counter tcp dport 22: This rule should not have failed. +inet/ip_tcp.t: ERROR: line 21: add rule netdev test-netdev egress ether type ip tcp dport 22: This rule should not have failed. +inet/ip_tcp.t: ERROR: line 21: The chain egress does not exist in netdev test-netdev. I cannot delete it. +inet/meta.t: ERROR: line 23: add rule inet test-inet input meta mark set ct mark >> 8: This rule should not have failed. +inet/ah.t: ERROR: line 3: I cannot create the chain 'egress' +inet/ah.t: ERROR: line 22: add rule netdev test-netdev egress ah hdrlength 11-23: This rule should not have failed. +inet/ah.t: ERROR: line 23: add rule netdev test-netdev egress ah hdrlength != 11-23: This rule should not have failed. +inet/ah.t: ERROR: line 24: add rule netdev test-netdev egress ah hdrlength {11, 23, 44 }: This rule should not have failed. +inet/ah.t: ERROR: line 25: add rule netdev test-netdev egress ah hdrlength != {11, 23, 44 }: This rule should not have failed. +inet/ah.t: ERROR: line 27: add rule netdev test-netdev egress ah reserved 22: This rule should not have failed. +inet/ah.t: ERROR: line 28: add rule netdev test-netdev egress ah reserved != 233: This rule should not have failed. +inet/ah.t: ERROR: line 29: add rule netdev test-netdev egress ah reserved 33-45: This rule should not have failed. +inet/ah.t: ERROR: line 30: add rule netdev test-netdev egress ah reserved != 33-45: This rule should not have failed. +inet/ah.t: ERROR: line 31: add rule netdev test-netdev egress ah reserved {23, 100}: This rule should not have failed. +inet/ah.t: ERROR: line 32: add rule netdev test-netdev egress ah reserved != {23, 100}: This rule should not have failed. +inet/ah.t: ERROR: line 34: add rule netdev test-netdev egress ah spi 111: This rule should not have failed. +inet/ah.t: ERROR: line 35: add rule netdev test-netdev egress ah spi != 111: This rule should not have failed. +inet/ah.t: ERROR: line 36: add rule netdev test-netdev egress ah spi 111-222: This rule should not have failed. +inet/ah.t: ERROR: line 37: add rule netdev test-netdev egress ah spi != 111-222: This rule should not have failed. +inet/ah.t: ERROR: line 38: add rule netdev test-netdev egress ah spi {111, 122}: This rule should not have failed. +inet/ah.t: ERROR: line 39: add rule netdev test-netdev egress ah spi != {111, 122}: This rule should not have failed. +inet/ah.t: ERROR: line 42: add rule netdev test-netdev egress ah sequence 123: This rule should not have failed. +inet/ah.t: ERROR: line 43: add rule netdev test-netdev egress ah sequence != 123: This rule should not have failed. +inet/ah.t: ERROR: line 44: add rule netdev test-netdev egress ah sequence {23, 25, 33}: This rule should not have failed. +inet/ah.t: ERROR: line 45: add rule netdev test-netdev egress ah sequence != {23, 25, 33}: This rule should not have failed. +inet/ah.t: ERROR: line 46: add rule netdev test-netdev egress ah sequence 23-33: This rule should not have failed. +inet/ah.t: ERROR: line 47: add rule netdev test-netdev egress ah sequence != 23-33: This rule should not have failed. +inet/ah.t: ERROR: line 47: The chain egress does not exist in netdev test-netdev. I cannot delete it. +inet/vmap.t: ERROR: line 3: I cannot create the chain 'egress' +inet/vmap.t: ERROR: line 8: add rule netdev test-netdev egress iifname . ip protocol . th dport vmap { "eth0" . tcp . 22 : accept, "eth1" . udp . 67 : drop }: This rule should not have failed. +inet/vmap.t: ERROR: line 9: add rule inet test-inet input ip saddr . @ih,32,32 { 1.1.1.1 . 0x14, 2.2.2.2 . 0x1e }: This rule should not have failed. +inet/vmap.t: ERROR: line 10: add rule netdev test-netdev egress udp length . @th,160,128 vmap { 47-63 . 0xe373135363130333131303735353203 : accept }: This rule should not have failed. +inet/vmap.t: ERROR: line 10: The chain egress does not exist in netdev test-netdev. I cannot delete it. +inet/ether-ip.t: ERROR: line 3: I cannot create the chain 'egress' +inet/ether-ip.t: ERROR: line 8: add rule netdev test-netdev egress tcp dport 22 iiftype ether ip daddr 1.2.3.4 ether saddr 00:0f:54:0c:11:4 accept: This rule should not have failed. +inet/ether-ip.t: ERROR: line 9: add rule netdev test-netdev egress tcp dport 22 ip daddr 1.2.3.4 ether saddr 00:0f:54:0c:11:04: This rule should not have failed. +inet/ether-ip.t: ERROR: line 9: The chain egress does not exist in netdev test-netdev. I cannot delete it. +inet/esp.t: ERROR: line 3: I cannot create the chain 'egress' +inet/esp.t: ERROR: line 10: add rule netdev test-netdev egress esp spi 100: This rule should not have failed. +inet/esp.t: ERROR: line 11: add rule netdev test-netdev egress esp spi != 100: This rule should not have failed. +inet/esp.t: ERROR: line 12: add rule netdev test-netdev egress esp spi 111-222: This rule should not have failed. +inet/esp.t: ERROR: line 13: add rule netdev test-netdev egress esp spi != 111-222: This rule should not have failed. +inet/esp.t: ERROR: line 14: add rule netdev test-netdev egress esp spi { 100, 102}: This rule should not have failed. +inet/esp.t: ERROR: line 15: add rule netdev test-netdev egress esp spi != { 100, 102}: This rule should not have failed. +inet/esp.t: ERROR: line 17: add rule netdev test-netdev egress esp sequence 22: This rule should not have failed. +inet/esp.t: ERROR: line 18: add rule netdev test-netdev egress esp sequence 22-24: This rule should not have failed. +inet/esp.t: ERROR: line 19: add rule netdev test-netdev egress esp sequence != 22-24: This rule should not have failed. +inet/esp.t: ERROR: line 20: add rule netdev test-netdev egress esp sequence { 22, 24}: This rule should not have failed. +inet/esp.t: ERROR: line 21: add rule netdev test-netdev egress esp sequence != { 22, 24}: This rule should not have failed. +inet/esp.t: ERROR: line 21: The chain egress does not exist in netdev test-netdev. I cannot delete it. +inet/sctp.t: ERROR: line 3: I cannot create the chain 'egress' +inet/sctp.t: ERROR: line 10: add rule netdev test-netdev egress sctp sport 23: This rule should not have failed. +inet/sctp.t: ERROR: line 11: add rule netdev test-netdev egress sctp sport != 23: This rule should not have failed. +inet/sctp.t: ERROR: line 12: add rule netdev test-netdev egress sctp sport 23-44: This rule should not have failed. +inet/sctp.t: ERROR: line 13: add rule netdev test-netdev egress sctp sport != 23-44: This rule should not have failed. +inet/sctp.t: ERROR: line 14: add rule netdev test-netdev egress sctp sport { 23, 24, 25}: This rule should not have failed. +inet/sctp.t: ERROR: line 15: add rule netdev test-netdev egress sctp sport != { 23, 24, 25}: This rule should not have failed. +inet/sctp.t: ERROR: line 17: add rule netdev test-netdev egress sctp dport 23: This rule should not have failed. +inet/sctp.t: ERROR: line 18: add rule netdev test-netdev egress sctp dport != 23: This rule should not have failed. +inet/sctp.t: ERROR: line 19: add rule netdev test-netdev egress sctp dport 23-44: This rule should not have failed. +inet/sctp.t: ERROR: line 20: add rule netdev test-netdev egress sctp dport != 23-44: This rule should not have failed. +inet/sctp.t: ERROR: line 21: add rule netdev test-netdev egress sctp dport { 23, 24, 25}: This rule should not have failed. +inet/sctp.t: ERROR: line 22: add rule netdev test-netdev egress sctp dport != { 23, 24, 25}: This rule should not have failed. +inet/sctp.t: ERROR: line 24: add rule netdev test-netdev egress sctp checksum 1111: This rule should not have failed. +inet/sctp.t: ERROR: line 25: add rule netdev test-netdev egress sctp checksum != 11: This rule should not have failed. +inet/sctp.t: ERROR: line 26: add rule netdev test-netdev egress sctp checksum 21-333: This rule should not have failed. +inet/sctp.t: ERROR: line 27: add rule netdev test-netdev egress sctp checksum != 32-111: This rule should not have failed. +inet/sctp.t: ERROR: line 28: add rule netdev test-netdev egress sctp checksum { 22, 33, 44}: This rule should not have failed. +inet/sctp.t: ERROR: line 29: add rule netdev test-netdev egress sctp checksum != { 22, 33, 44}: This rule should not have failed. +inet/sctp.t: ERROR: line 31: add rule netdev test-netdev egress sctp vtag 22: This rule should not have failed. +inet/sctp.t: ERROR: line 32: add rule netdev test-netdev egress sctp vtag != 233: This rule should not have failed. +inet/sctp.t: ERROR: line 33: add rule netdev test-netdev egress sctp vtag 33-45: This rule should not have failed. +inet/sctp.t: ERROR: line 34: add rule netdev test-netdev egress sctp vtag != 33-45: This rule should not have failed. +inet/sctp.t: ERROR: line 35: add rule netdev test-netdev egress sctp vtag {33, 55, 67, 88}: This rule should not have failed. +inet/sctp.t: ERROR: line 36: add rule netdev test-netdev egress sctp vtag != {33, 55, 67, 88}: This rule should not have failed. +inet/sctp.t: ERROR: line 39: add rule netdev test-netdev egress sctp chunk data exists: This rule should not have failed. +inet/sctp.t: ERROR: line 40: add rule netdev test-netdev egress sctp chunk init exists: This rule should not have failed. +inet/sctp.t: ERROR: line 41: add rule netdev test-netdev egress sctp chunk init-ack exists: This rule should not have failed. +inet/sctp.t: ERROR: line 42: add rule netdev test-netdev egress sctp chunk sack exists: This rule should not have failed. +inet/sctp.t: ERROR: line 43: add rule netdev test-netdev egress sctp chunk heartbeat exists: This rule should not have failed. +inet/sctp.t: ERROR: line 44: add rule netdev test-netdev egress sctp chunk heartbeat-ack exists: This rule should not have failed. +inet/sctp.t: ERROR: line 45: add rule netdev test-netdev egress sctp chunk abort exists: This rule should not have failed. +inet/sctp.t: ERROR: line 46: add rule netdev test-netdev egress sctp chunk shutdown exists: This rule should not have failed. +inet/sctp.t: ERROR: line 47: add rule netdev test-netdev egress sctp chunk shutdown-ack exists: This rule should not have failed. +inet/sctp.t: ERROR: line 48: add rule netdev test-netdev egress sctp chunk error exists: This rule should not have failed. +inet/sctp.t: ERROR: line 49: add rule netdev test-netdev egress sctp chunk cookie-echo exists: This rule should not have failed. +inet/sctp.t: ERROR: line 50: add rule netdev test-netdev egress sctp chunk cookie-ack exists: This rule should not have failed. +inet/sctp.t: ERROR: line 51: add rule netdev test-netdev egress sctp chunk ecne exists: This rule should not have failed. +inet/sctp.t: ERROR: line 52: add rule netdev test-netdev egress sctp chunk cwr exists: This rule should not have failed. +inet/sctp.t: ERROR: line 53: add rule netdev test-netdev egress sctp chunk shutdown-complete exists: This rule should not have failed. +inet/sctp.t: ERROR: line 54: add rule netdev test-netdev egress sctp chunk asconf-ack exists: This rule should not have failed. +inet/sctp.t: ERROR: line 55: add rule netdev test-netdev egress sctp chunk forward-tsn exists: This rule should not have failed. +inet/sctp.t: ERROR: line 56: add rule netdev test-netdev egress sctp chunk asconf exists: This rule should not have failed. +inet/sctp.t: ERROR: line 59: add rule netdev test-netdev egress sctp chunk data type 0: This rule should not have failed. +inet/sctp.t: ERROR: line 60: add rule netdev test-netdev egress sctp chunk init flags 23: This rule should not have failed. +inet/sctp.t: ERROR: line 61: add rule netdev test-netdev egress sctp chunk init-ack length 42: This rule should not have failed. +inet/sctp.t: ERROR: line 64: add rule netdev test-netdev egress sctp chunk data stream 1337: This rule should not have failed. +inet/sctp.t: ERROR: line 65: add rule netdev test-netdev egress sctp chunk init initial-tsn 5: This rule should not have failed. +inet/sctp.t: ERROR: line 66: add rule netdev test-netdev egress sctp chunk init-ack num-outbound-streams 3: This rule should not have failed. +inet/sctp.t: ERROR: line 67: add rule netdev test-netdev egress sctp chunk sack a-rwnd 1: This rule should not have failed. +inet/sctp.t: ERROR: line 68: add rule netdev test-netdev egress sctp chunk shutdown cum-tsn-ack 65535: This rule should not have failed. +inet/sctp.t: ERROR: line 69: add rule netdev test-netdev egress sctp chunk ecne lowest-tsn 5: This rule should not have failed. +inet/sctp.t: ERROR: line 70: add rule netdev test-netdev egress sctp chunk cwr lowest-tsn 8: This rule should not have failed. +inet/sctp.t: ERROR: line 71: add rule netdev test-netdev egress sctp chunk asconf-ack seqno 12345: This rule should not have failed. +inet/sctp.t: ERROR: line 72: add rule netdev test-netdev egress sctp chunk forward-tsn new-cum-tsn 31337: This rule should not have failed. +inet/sctp.t: ERROR: line 73: add rule netdev test-netdev egress sctp chunk asconf seqno 12345: This rule should not have failed. +inet/sctp.t: ERROR: line 73: The chain egress does not exist in netdev test-netdev. I cannot delete it. +inet/socket.t: ERROR: line 11: add rule ip sockip4 sockchain socket mark 0x00000005: This rule should not have failed. +inet/socket.t: ERROR: line 13: add rule ip sockip4 sockchain socket wildcard 0: This rule should not have failed. +inet/socket.t: ERROR: line 14: add rule ip sockip4 sockchain socket wildcard 1: This rule should not have failed. +ip/objects.t: ERROR: line 37: add ct timeout ip test-ip4 cttime1 { protocol tcp; policy = { established:122 } ;}: I cannot add the ct timeout cttime1 +ip/objects.t: ERROR: line 39: add ct timeout ip test-ip4 cttime3 { protocol tcp; policy = { established:132, close:16, close_wait:16 } ; l3proto ip ;}: I cannot add the ct timeout cttime3 +ip/objects.t: ERROR: line 40: add ct timeout ip test-ip4 cttime4 { protocol udp; policy = { replied:14, unreplied:19 } ;}: I cannot add the ct timeout cttime4 +ip/objects.t: ERROR: line 43: add rule ip test-ip4 output ct timeout set "cttime1": This rule should not have failed. +ip/objects.t: ERROR: line 46: add ct expectation ip test-ip4 ctexpect1 { protocol tcp; dport 1234; timeout 2m; size 12; }: I cannot add the ct expectation ctexpect1 +ip/objects.t: ERROR: line 50: add ct expectation ip test-ip4 ctexpect5 { protocol udp; dport 9876; timeout 2m; size 12; l3proto ip; }: I cannot add the ct expectation ctexpect5 +ip/objects.t: ERROR: line 52: add rule ip test-ip4 output ct expectation set "ctexpect1": This rule should not have failed. +ip/objects.t: ERROR: line 55: add synproxy ip test-ip4 synproxy1 mss 1460 wscale 7: I cannot add the synproxy synproxy1 +ip/objects.t: ERROR: line 56: add synproxy ip test-ip4 synproxy2 mss 1460 wscale 7 timestamp sack-perm: I cannot add the synproxy synproxy2 +ip/objects.t: ERROR: line 58: add rule ip test-ip4 output synproxy name tcp dport map {443 : "synproxy1", 80 : "synproxy2"}: This rule should not have failed. +ip/sets.t: ERROR: line 3: I cannot create the chain 'egress' +ip/sets.t: ERROR: line 32: add rule netdev test-netdev egress ip saddr @set1 drop: This rule should not have failed. +ip/sets.t: ERROR: line 33: add rule netdev test-netdev egress ip saddr != @set1 drop: This rule should not have failed. +ip/sets.t: ERROR: line 34: add rule netdev test-netdev egress ip saddr @set2 drop: This rule should not have failed. +ip/sets.t: ERROR: line 35: add rule netdev test-netdev egress ip saddr != @set2 drop: This rule should not have failed. +ip/sets.t: ERROR: line 52: add rule netdev test-netdev egress ip saddr . ip daddr @set5 drop: This rule should not have failed. +ip/sets.t: ERROR: line 53: add rule netdev test-netdev egress add @set5 { ip saddr . ip daddr }: This rule should not have failed. +ip/sets.t: ERROR: line 56: add rule netdev test-netdev egress ip saddr { { 1.1.1.0, 3.3.3.0 }, 2.2.2.0 }: This rule should not have failed. +ip/sets.t: ERROR: line 57: add rule netdev test-netdev egress ip saddr { { 1.1.1.0/24, 3.3.3.0/24 }, 2.2.2.0/24 }: This rule should not have failed. +ip/sets.t: ERROR: line 60: add element ip test-ip4 set6 { 192.168.3.5, * }: This rule should not have failed. +ip/sets.t: ERROR: line 61: add rule netdev test-netdev egress ip saddr @set6 drop: This rule should not have failed. +ip/sets.t: ERROR: line 63: add rule ip test-ip4 input ip saddr vmap { 1.1.1.1 : drop, * : accept }: This rule should not have failed. +ip/sets.t: ERROR: line 64: add rule ip test-ip4 input meta mark set ip saddr map { 1.1.1.1 : 0x00000001, * : 0x00000002 }: This rule should not have failed. +ip/sets.t: ERROR: line 65: The chain egress does not exist in netdev test-netdev. I cannot delete it. +ip/ip.t: ERROR: line 3: I cannot create the chain 'egress' +ip/ip.t: ERROR: line 28: add rule netdev test-netdev egress ip dscp cs1: This rule should not have failed. +ip/ip.t: ERROR: line 29: add rule netdev test-netdev egress ip dscp != cs1: This rule should not have failed. +ip/ip.t: ERROR: line 30: add rule netdev test-netdev egress ip dscp 0x38: This rule should not have failed. +ip/ip.t: ERROR: line 31: add rule netdev test-netdev egress ip dscp != 0x20: This rule should not have failed. +ip/ip.t: ERROR: line 32: add rule netdev test-netdev egress ip dscp {cs0, cs1, cs2, cs3, cs4, cs5, cs6, cs7, af11, af12, af13, af21, af22, af23, af31, af32, af33, af41, af42, af43, ef}: This rule should not have failed. +ip/ip.t: ERROR: line 34: add rule netdev test-netdev egress ip dscp != {cs0, cs3}: This rule should not have failed. +ip/ip.t: ERROR: line 35: add rule netdev test-netdev egress ip dscp vmap { cs1 : continue , cs4 : accept } counter: This rule should not have failed. +ip/ip.t: ERROR: line 37: add rule netdev test-netdev egress ip length 232: This rule should not have failed. +ip/ip.t: ERROR: line 38: add rule netdev test-netdev egress ip length != 233: This rule should not have failed. +ip/ip.t: ERROR: line 39: add rule netdev test-netdev egress ip length 333-435: This rule should not have failed. +ip/ip.t: ERROR: line 40: add rule netdev test-netdev egress ip length != 333-453: This rule should not have failed. +ip/ip.t: ERROR: line 41: add rule netdev test-netdev egress ip length { 333, 553, 673, 838}: This rule should not have failed. +ip/ip.t: ERROR: line 42: add rule netdev test-netdev egress ip length != { 333, 553, 673, 838}: This rule should not have failed. +ip/ip.t: ERROR: line 44: add rule netdev test-netdev egress ip id 22: This rule should not have failed. +ip/ip.t: ERROR: line 45: add rule netdev test-netdev egress ip id != 233: This rule should not have failed. +ip/ip.t: ERROR: line 46: add rule netdev test-netdev egress ip id 33-45: This rule should not have failed. +ip/ip.t: ERROR: line 47: add rule netdev test-netdev egress ip id != 33-45: This rule should not have failed. +ip/ip.t: ERROR: line 48: add rule netdev test-netdev egress ip id { 33, 55, 67, 88}: This rule should not have failed. +ip/ip.t: ERROR: line 49: add rule netdev test-netdev egress ip id != { 33, 55, 67, 88}: This rule should not have failed. +ip/ip.t: ERROR: line 51: add rule netdev test-netdev egress ip frag-off 222 accept: This rule should not have failed. +ip/ip.t: ERROR: line 52: add rule netdev test-netdev egress ip frag-off != 233: This rule should not have failed. +ip/ip.t: ERROR: line 53: add rule netdev test-netdev egress ip frag-off 33-45: This rule should not have failed. +ip/ip.t: ERROR: line 54: add rule netdev test-netdev egress ip frag-off != 33-45: This rule should not have failed. +ip/ip.t: ERROR: line 55: add rule netdev test-netdev egress ip frag-off { 33, 55, 67, 88}: This rule should not have failed. +ip/ip.t: ERROR: line 56: add rule netdev test-netdev egress ip frag-off != { 33, 55, 67, 88}: This rule should not have failed. +ip/ip.t: ERROR: line 58: add rule netdev test-netdev egress ip ttl 0 drop: This rule should not have failed. +ip/ip.t: ERROR: line 59: add rule netdev test-netdev egress ip ttl 233: This rule should not have failed. +ip/ip.t: ERROR: line 60: add rule netdev test-netdev egress ip ttl 33-55: This rule should not have failed. +ip/ip.t: ERROR: line 61: add rule netdev test-netdev egress ip ttl != 45-50: This rule should not have failed. +ip/ip.t: ERROR: line 62: add rule netdev test-netdev egress ip ttl {43, 53, 45 }: This rule should not have failed. +ip/ip.t: ERROR: line 63: add rule netdev test-netdev egress ip ttl != {43, 53, 45 }: This rule should not have failed. +ip/ip.t: ERROR: line 65: add rule netdev test-netdev egress ip protocol tcp: This rule should not have failed. +ip/ip.t: ERROR: line 66: add rule netdev test-netdev egress ip protocol != tcp: This rule should not have failed. +ip/ip.t: ERROR: line 67: add rule netdev test-netdev egress ip protocol { icmp, esp, ah, comp, udp, udplite, tcp, dccp, sctp} accept: This rule should not have failed. +ip/ip.t: ERROR: line 68: add rule netdev test-netdev egress ip protocol != { icmp, esp, ah, comp, udp, udplite, tcp, dccp, sctp} accept: This rule should not have failed. +ip/ip.t: ERROR: line 70: add rule netdev test-netdev egress ip protocol 255: This rule should not have failed. +ip/ip.t: ERROR: line 73: add rule netdev test-netdev egress ip checksum 13172 drop: This rule should not have failed. +ip/ip.t: ERROR: line 74: add rule netdev test-netdev egress ip checksum 22: This rule should not have failed. +ip/ip.t: ERROR: line 75: add rule netdev test-netdev egress ip checksum != 233: This rule should not have failed. +ip/ip.t: ERROR: line 76: add rule netdev test-netdev egress ip checksum 33-45: This rule should not have failed. +ip/ip.t: ERROR: line 77: add rule netdev test-netdev egress ip checksum != 33-45: This rule should not have failed. +ip/ip.t: ERROR: line 78: add rule netdev test-netdev egress ip checksum { 33, 55, 67, 88}: This rule should not have failed. +ip/ip.t: ERROR: line 79: add rule netdev test-netdev egress ip checksum != { 33, 55, 67, 88}: This rule should not have failed. +ip/ip.t: ERROR: line 83: add rule netdev test-netdev egress ip saddr 192.168.2.0/24: This rule should not have failed. +ip/ip.t: ERROR: line 84: add rule netdev test-netdev egress ip saddr != 192.168.2.0/24: This rule should not have failed. +ip/ip.t: ERROR: line 85: add rule netdev test-netdev egress ip saddr 192.168.3.1 ip daddr 192.168.3.100: This rule should not have failed. +ip/ip.t: ERROR: line 86: add rule netdev test-netdev egress ip saddr != 1.1.1.1: This rule should not have failed. +ip/ip.t: ERROR: line 87: add rule netdev test-netdev egress ip saddr 1.1.1.1: This rule should not have failed. +ip/ip.t: ERROR: line 88: add rule netdev test-netdev egress ip daddr 192.168.0.1-192.168.0.250: This rule should not have failed. +ip/ip.t: ERROR: line 89: add rule netdev test-netdev egress ip daddr 10.0.0.0-10.255.255.255: This rule should not have failed. +ip/ip.t: ERROR: line 90: add rule netdev test-netdev egress ip daddr 172.16.0.0-172.31.255.255: This rule should not have failed. +ip/ip.t: ERROR: line 91: add rule netdev test-netdev egress ip daddr 192.168.3.1-192.168.4.250: This rule should not have failed. +ip/ip.t: ERROR: line 92: add rule netdev test-netdev egress ip daddr != 192.168.0.1-192.168.0.250: This rule should not have failed. +ip/ip.t: ERROR: line 93: add rule netdev test-netdev egress ip daddr { 192.168.5.1, 192.168.5.2, 192.168.5.3 } accept: This rule should not have failed. +ip/ip.t: ERROR: line 94: add rule netdev test-netdev egress ip daddr != { 192.168.5.1, 192.168.5.2, 192.168.5.3 } accept: This rule should not have failed. +ip/ip.t: ERROR: line 96: add rule netdev test-netdev egress ip daddr 192.168.1.2-192.168.1.55: This rule should not have failed. +ip/ip.t: ERROR: line 97: add rule netdev test-netdev egress ip daddr != 192.168.1.2-192.168.1.55: This rule should not have failed. +ip/ip.t: ERROR: line 98: add rule netdev test-netdev egress ip saddr 192.168.1.3-192.168.33.55: This rule should not have failed. +ip/ip.t: ERROR: line 99: add rule netdev test-netdev egress ip saddr != 192.168.1.3-192.168.33.55: This rule should not have failed. +ip/ip.t: ERROR: line 101: add rule netdev test-netdev egress ip daddr 192.168.0.1: This rule should not have failed. +ip/ip.t: ERROR: line 102: add rule netdev test-netdev egress ip daddr 192.168.0.1 drop: This rule should not have failed. +ip/ip.t: ERROR: line 103: add rule netdev test-netdev egress ip daddr 192.168.0.2: This rule should not have failed. +ip/ip.t: ERROR: line 105: add rule netdev test-netdev egress ip saddr & 0xff == 1: This rule should not have failed. +ip/ip.t: ERROR: line 106: add rule netdev test-netdev egress ip saddr & 0.0.0.255 < 0.0.0.127: This rule should not have failed. +ip/ip.t: ERROR: line 108: add rule netdev test-netdev egress ip saddr & 0xffff0000 == 0xffff0000: This rule should not have failed. +ip/ip.t: ERROR: line 110: add rule netdev test-netdev egress ip version 4 ip hdrlength 5: This rule should not have failed. +ip/ip.t: ERROR: line 111: add rule netdev test-netdev egress ip hdrlength 0: This rule should not have failed. +ip/ip.t: ERROR: line 112: add rule netdev test-netdev egress ip hdrlength 15: This rule should not have failed. +ip/ip.t: ERROR: line 113: add rule netdev test-netdev egress ip hdrlength vmap { 0-4 : drop, 5 : accept, 6 : continue } counter: This rule should not have failed. +ip/ip.t: ERROR: line 117: add rule netdev test-netdev egress iif "lo" ip daddr set 127.0.0.1: This rule should not have failed. +ip/ip.t: ERROR: line 118: add rule netdev test-netdev egress iif "lo" ip checksum set 0: This rule should not have failed. +ip/ip.t: ERROR: line 119: add rule netdev test-netdev egress iif "lo" ip id set 0: This rule should not have failed. +ip/ip.t: ERROR: line 120: add rule netdev test-netdev egress iif "lo" ip ecn set 1: This rule should not have failed. +ip/ip.t: ERROR: line 121: add rule netdev test-netdev egress iif "lo" ip ecn set ce: This rule should not have failed. +ip/ip.t: ERROR: line 122: add rule netdev test-netdev egress iif "lo" ip ttl set 23: This rule should not have failed. +ip/ip.t: ERROR: line 123: add rule netdev test-netdev egress iif "lo" ip protocol set 1: This rule should not have failed. +ip/ip.t: ERROR: line 125: add rule netdev test-netdev egress iif "lo" ip dscp set af23: This rule should not have failed. +ip/ip.t: ERROR: line 126: add rule netdev test-netdev egress iif "lo" ip dscp set cs0: This rule should not have failed. +ip/ip.t: ERROR: line 128: add rule netdev test-netdev egress ip saddr . ip daddr { 192.0.2.1 . 10.0.0.1-10.0.0.2 }: This rule should not have failed. +ip/ip.t: ERROR: line 129: add rule netdev test-netdev egress ip saddr . ip daddr vmap { 192.168.5.1-192.168.5.128 . 192.168.6.1-192.168.6.128 : accept }: This rule should not have failed. +ip/ip.t: ERROR: line 129: The chain egress does not exist in netdev test-netdev. I cannot delete it. +ip/snat.t: ERROR: line 17: add rule ip test-ip4 postrouting snat ip prefix to ip saddr map { 10.141.11.0/24 : 192.168.2.0/24 }: This rule should not have failed. +ip/meta.t: ERROR: line 16: add rule ip test-ip4 input meta sdif "lo" accept: This rule should not have failed. +ip/meta.t: ERROR: line 17: add rule ip test-ip4 input meta sdifname != "vrf1" accept: This rule should not have failed. +ip6/sets.t: ERROR: line 3: I cannot create the chain 'egress' +ip6/sets.t: ERROR: line 25: add rule netdev test-netdev egress ip6 saddr @set2 drop: This rule should not have failed. +ip6/sets.t: ERROR: line 26: add rule netdev test-netdev egress ip6 saddr != @set2 drop: This rule should not have failed. +ip6/sets.t: ERROR: line 42: add rule netdev test-netdev egress ip6 saddr . ip6 daddr @set5 drop: This rule should not have failed. +ip6/sets.t: ERROR: line 43: add rule netdev test-netdev egress add @set5 { ip6 saddr . ip6 daddr }: This rule should not have failed. +ip6/sets.t: ERROR: line 44: add rule ip6 test-ip6 input delete @set5 { ip6 saddr . ip6 daddr }: This rule should not have failed. +ip6/sets.t: ERROR: line 44: The chain egress does not exist in netdev test-netdev. I cannot delete it. +ip6/frag.t: ERROR: line 3: I cannot create the chain 'egress' +ip6/frag.t: ERROR: line 9: add rule netdev test-netdev egress frag nexthdr tcp: This rule should not have failed. +ip6/frag.t: ERROR: line 10: add rule netdev test-netdev egress frag nexthdr != icmp: This rule should not have failed. +ip6/frag.t: ERROR: line 11: add rule netdev test-netdev egress frag nexthdr {esp, ah, comp, udp, udplite, tcp, dccp, sctp}: This rule should not have failed. +ip6/frag.t: ERROR: line 12: add rule netdev test-netdev egress frag nexthdr != {esp, ah, comp, udp, udplite, tcp, dccp, sctp}: This rule should not have failed. +ip6/frag.t: ERROR: line 13: add rule netdev test-netdev egress frag nexthdr esp: This rule should not have failed. +ip6/frag.t: ERROR: line 14: add rule netdev test-netdev egress frag nexthdr ah: This rule should not have failed. +ip6/frag.t: ERROR: line 16: add rule netdev test-netdev egress frag reserved 22: This rule should not have failed. +ip6/frag.t: ERROR: line 17: add rule netdev test-netdev egress frag reserved != 233: This rule should not have failed. +ip6/frag.t: ERROR: line 18: add rule netdev test-netdev egress frag reserved 33-45: This rule should not have failed. +ip6/frag.t: ERROR: line 19: add rule netdev test-netdev egress frag reserved != 33-45: This rule should not have failed. +ip6/frag.t: ERROR: line 20: add rule netdev test-netdev egress frag reserved { 33, 55, 67, 88}: This rule should not have failed. +ip6/frag.t: ERROR: line 21: add rule netdev test-netdev egress frag reserved != { 33, 55, 67, 88}: This rule should not have failed. +ip6/frag.t: ERROR: line 23: add rule netdev test-netdev egress frag frag-off 22: This rule should not have failed. +ip6/frag.t: ERROR: line 24: add rule netdev test-netdev egress frag frag-off != 233: This rule should not have failed. +ip6/frag.t: ERROR: line 25: add rule netdev test-netdev egress frag frag-off 33-45: This rule should not have failed. +ip6/frag.t: ERROR: line 26: add rule netdev test-netdev egress frag frag-off != 33-45: This rule should not have failed. +ip6/frag.t: ERROR: line 27: add rule netdev test-netdev egress frag frag-off { 33, 55, 67, 88}: This rule should not have failed. +ip6/frag.t: ERROR: line 28: add rule netdev test-netdev egress frag frag-off != { 33, 55, 67, 88}: This rule should not have failed. +ip6/frag.t: ERROR: line 30: add rule netdev test-netdev egress frag reserved2 1: This rule should not have failed. +ip6/frag.t: ERROR: line 31: add rule netdev test-netdev egress frag more-fragments 0: This rule should not have failed. +ip6/frag.t: ERROR: line 32: add rule netdev test-netdev egress frag more-fragments 1: This rule should not have failed. +ip6/frag.t: ERROR: line 34: add rule netdev test-netdev egress frag id 1: This rule should not have failed. +ip6/frag.t: ERROR: line 35: add rule netdev test-netdev egress frag id 22: This rule should not have failed. +ip6/frag.t: ERROR: line 36: add rule netdev test-netdev egress frag id != 33: This rule should not have failed. +ip6/frag.t: ERROR: line 37: add rule netdev test-netdev egress frag id 33-45: This rule should not have failed. +ip6/frag.t: ERROR: line 38: add rule netdev test-netdev egress frag id != 33-45: This rule should not have failed. +ip6/frag.t: ERROR: line 39: add rule netdev test-netdev egress frag id { 33, 55, 67, 88}: This rule should not have failed. +ip6/frag.t: ERROR: line 40: add rule netdev test-netdev egress frag id != { 33, 55, 67, 88}: This rule should not have failed. +ip6/frag.t: ERROR: line 40: The chain egress does not exist in netdev test-netdev. I cannot delete it. +ip6/meta.t: ERROR: line 15: add rule ip6 test-ip6 input meta sdif "lo" accept: This rule should not have failed. +ip6/meta.t: ERROR: line 16: add rule ip6 test-ip6 input meta sdifname != "vrf1" accept: This rule should not have failed. +ip6/vmap.t: ERROR: line 3: I cannot create the chain 'egress' +ip6/vmap.t: ERROR: line 9: add rule netdev test-netdev egress ip6 saddr vmap { abcd::3 : accept }: This rule should not have failed. +ip6/vmap.t: ERROR: line 14: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234:1234:1234:1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 15: add rule netdev test-netdev egress ip6 saddr vmap { ::1234:1234:1234:1234:1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 16: add rule netdev test-netdev egress ip6 saddr vmap { 1234::1234:1234:1234:1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 17: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234::1234:1234:1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 18: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234::1234:1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 19: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234:1234::1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 20: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234:1234:1234::1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 21: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234:1234:1234:1234::1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 22: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234:1234:1234:1234:1234:: : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 23: add rule netdev test-netdev egress ip6 saddr vmap { ::1234:1234:1234:1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 24: add rule netdev test-netdev egress ip6 saddr vmap { 1234::1234:1234:1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 25: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234::1234:1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 26: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234::1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 27: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234:1234::1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 28: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234:1234:1234::1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 29: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234:1234:1234:1234:: : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 30: add rule netdev test-netdev egress ip6 saddr vmap { ::1234:1234:1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 31: add rule netdev test-netdev egress ip6 saddr vmap { 1234::1234:1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 32: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234::1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 33: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234::1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 34: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234:1234::1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 35: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234:1234:1234:: : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 36: add rule netdev test-netdev egress ip6 saddr vmap { ::1234:1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 37: add rule netdev test-netdev egress ip6 saddr vmap { 1234::1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 38: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234::1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 39: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234::1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 40: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234:1234:: : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 41: add rule netdev test-netdev egress ip6 saddr vmap { ::1234:1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 42: add rule netdev test-netdev egress ip6 saddr vmap { 1234::1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 43: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234::1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 44: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:1234:: : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 45: add rule netdev test-netdev egress ip6 saddr vmap { ::1234:1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 46: add rule netdev test-netdev egress ip6 saddr vmap { 1234::1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 47: add rule netdev test-netdev egress ip6 saddr vmap { 1234:1234:: : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 48: add rule netdev test-netdev egress ip6 saddr vmap { ::1234 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 49: add rule netdev test-netdev egress ip6 saddr vmap { 1234:: : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 50: add rule netdev test-netdev egress ip6 saddr vmap { ::/64 : accept}: This rule should not have failed. +ip6/vmap.t: ERROR: line 52: add rule netdev test-netdev egress ip6 saddr vmap {1234:1234:1234:1234:1234:1234:aaaa:: : accept, ::aaaa : drop}: This rule should not have failed. +ip6/vmap.t: ERROR: line 53: add rule netdev test-netdev egress ip6 saddr vmap {1234:1234:1234:1234:1234:1234:aaaa:::accept, ::bbbb : drop}: This rule should not have failed. +ip6/vmap.t: ERROR: line 54: add rule netdev test-netdev egress ip6 saddr vmap {1234:1234:1234:1234:1234:1234:aaaa:::accept,::cccc : drop}: This rule should not have failed. +ip6/vmap.t: ERROR: line 55: add rule netdev test-netdev egress ip6 saddr vmap {1234:1234:1234:1234:1234:1234:aaaa:::accept,::dddd: drop}: This rule should not have failed. +ip6/vmap.t: ERROR: line 58: The chain egress does not exist in netdev test-netdev. I cannot delete it. +netdev/fwd.t: ERROR: line 2: I cannot create the chain 'egress' +netdev/fwd.t: ERROR: line 6: add rule netdev test-netdev egress fwd to "lo": This rule should not have failed. +netdev/fwd.t: ERROR: line 7: add rule netdev test-netdev egress fwd to meta mark map { 0x00000001 : "lo", 0x00000002 : "lo"}: This rule should not have failed. +netdev/fwd.t: ERROR: line 9: add rule netdev test-netdev egress fwd ip to 192.168.2.200 device "lo": This rule should not have failed. +netdev/fwd.t: ERROR: line 9: The chain egress does not exist in netdev test-netdev. I cannot delete it. +netdev/reject.t: ERROR: line 5: add rule netdev test-netdev ingress reject with icmp host-unreachable: This rule should not have failed. +netdev/reject.t: ERROR: line 6: add rule netdev test-netdev ingress reject with icmp net-unreachable: This rule should not have failed. +netdev/reject.t: ERROR: line 7: add rule netdev test-netdev ingress reject with icmp prot-unreachable: This rule should not have failed. +netdev/reject.t: ERROR: line 8: add rule netdev test-netdev ingress reject with icmp port-unreachable: This rule should not have failed. +netdev/reject.t: ERROR: line 9: add rule netdev test-netdev ingress reject with icmp net-prohibited: This rule should not have failed. +netdev/reject.t: ERROR: line 10: add rule netdev test-netdev ingress reject with icmp host-prohibited: This rule should not have failed. +netdev/reject.t: ERROR: line 11: add rule netdev test-netdev ingress reject with icmp admin-prohibited: This rule should not have failed. +netdev/reject.t: ERROR: line 13: add rule netdev test-netdev ingress reject with icmpv6 no-route: This rule should not have failed. +netdev/reject.t: ERROR: line 14: add rule netdev test-netdev ingress reject with icmpv6 admin-prohibited: This rule should not have failed. +netdev/reject.t: ERROR: line 15: add rule netdev test-netdev ingress reject with icmpv6 addr-unreachable: This rule should not have failed. +netdev/reject.t: ERROR: line 16: add rule netdev test-netdev ingress reject with icmpv6 port-unreachable: This rule should not have failed. +netdev/reject.t: ERROR: line 17: add rule netdev test-netdev ingress reject with icmpv6 policy-fail: This rule should not have failed. +netdev/reject.t: ERROR: line 18: add rule netdev test-netdev ingress reject with icmpv6 reject-route: This rule should not have failed. +netdev/reject.t: ERROR: line 20: add rule netdev test-netdev ingress mark 12345 reject with tcp reset: This rule should not have failed. +netdev/reject.t: ERROR: line 22: add rule netdev test-netdev ingress reject: This rule should not have failed. +netdev/reject.t: ERROR: line 23: add rule netdev test-netdev ingress meta protocol ip reject: This rule should not have failed. +netdev/reject.t: ERROR: line 24: add rule netdev test-netdev ingress meta protocol ip6 reject: This rule should not have failed. +netdev/reject.t: ERROR: line 26: add rule netdev test-netdev ingress reject with icmpx host-unreachable: This rule should not have failed. +netdev/reject.t: ERROR: line 27: add rule netdev test-netdev ingress reject with icmpx no-route: This rule should not have failed. +netdev/reject.t: ERROR: line 28: add rule netdev test-netdev ingress reject with icmpx admin-prohibited: This rule should not have failed. +netdev/reject.t: ERROR: line 29: add rule netdev test-netdev ingress reject with icmpx port-unreachable: This rule should not have failed. +netdev/reject.t: ERROR: line 31: add rule netdev test-netdev ingress meta protocol ip reject with icmp host-unreachable: This rule should not have failed. +netdev/reject.t: ERROR: line 32: add rule netdev test-netdev ingress meta protocol ip6 reject with icmpv6 no-route: This rule should not have failed. +netdev/reject.t: ERROR: line 39: add rule netdev test-netdev ingress meta protocol ip reject with icmpx admin-prohibited: This rule should not have failed. +netdev/reject.t: ERROR: line 40: add rule netdev test-netdev ingress meta protocol ip6 reject with icmpx admin-prohibited: This rule should not have failed. +netdev/dup.t: ERROR: line 2: I cannot create the chain 'egress' +netdev/dup.t: ERROR: line 6: add rule netdev test-netdev egress dup to "lo": This rule should not have failed. +netdev/dup.t: ERROR: line 7: add rule netdev test-netdev egress dup to meta mark map { 0x00000001 : "lo", 0x00000002 : "lo"}: This rule should not have failed. +netdev/dup.t: ERROR: line 8: The chain egress does not exist in netdev test-netdev. I cannot delete it. diff --git a/SOURCES/run-tests.stderr.expect b/SOURCES/run-tests.stderr.expect new file mode 100644 index 0000000..77cf01e --- /dev/null +++ b/SOURCES/run-tests.stderr.expect @@ -0,0 +1,27 @@ +W: [FAILED] ././tests/shell/testcases/cache/0008_delete_by_handle_0 +W: [FAILED] ././tests/shell/testcases/cache/0010_implicit_chain_0 +W: [FAILED] ././tests/shell/testcases/chains/0021prio_0 +W: [FAILED] ././tests/shell/testcases/chains/0040mark_shift_0 +W: [FAILED] ././tests/shell/testcases/chains/0040mark_shift_1 +W: [FAILED] ././tests/shell/testcases/chains/0041chain_binding_0 +W: [FAILED] ././tests/shell/testcases/chains/0043chain_ingress_0 +W: [FAILED] ././tests/shell/testcases/flowtable/0013addafterdelete_0 +W: [FAILED] ././tests/shell/testcases/flowtable/0014addafterdelete_0 +W: [FAILED] ././tests/shell/testcases/listing/0013objects_0 +W: [FAILED] ././tests/shell/testcases/maps/0011vmap_0 +W: [FAILED] ././tests/shell/testcases/maps/typeof_integer_0 +W: [FAILED] ././tests/shell/testcases/maps/typeof_maps_0 +W: [FAILED] ././tests/shell/testcases/maps/typeof_raw_0 +W: [FAILED] ././tests/shell/testcases/nft-f/0017ct_timeout_obj_0 +W: [FAILED] ././tests/shell/testcases/nft-f/0018ct_expectation_obj_0 +W: [DUMP FAIL] ././tests/shell/testcases/optionals/comments_chain_0 +W: [FAILED] ././tests/shell/testcases/optionals/comments_objects_0 +W: [DUMP FAIL] ././tests/shell/testcases/optionals/comments_table_0 +W: [FAILED] ././tests/shell/testcases/owner/0001-flowtable-uaf +W: [FAILED] ././tests/shell/testcases/sets/0024named_objects_0 +W: [FAILED] ././tests/shell/testcases/sets/0044interval_overlap_0 +W: [FAILED] ././tests/shell/testcases/sets/0046netmap_0 +W: [FAILED] ././tests/shell/testcases/sets/0063set_catchall_0 +W: [FAILED] ././tests/shell/testcases/sets/0064map_catchall_0 +W: [FAILED] ././tests/shell/testcases/sets/typeof_raw_0 +W: [FAILED] ././tests/shell/testcases/sets/typeof_sets_0 diff --git a/SPECS/nftables.spec b/SPECS/nftables.spec index 1b83bcb..60d8cc3 100644 --- a/SPECS/nftables.spec +++ b/SPECS/nftables.spec @@ -1,121 +1,74 @@ -%define rpmversion 0.9.3 -%define specrelease 26 -%define libnftnl_ver 1.1.5-5 +%define nft_rpmversion 1.0.4 +%define nft_specrelease 3 +%define libnftnl_ver 1.2.2-1 Name: nftables -Version: %{rpmversion} -Release: %{specrelease}%{?dist}%{?buildid} +Version: %{nft_rpmversion} +Release: %{nft_specrelease}%{?dist}%{?buildid} # Upstream released a 0.100 version, then 0.4. Need Epoch to get back on track. Epoch: 1 Summary: Netfilter Tables userspace utillites License: GPLv2 -URL: http://netfilter.org/projects/nftables/ -Source0: http://ftp.netfilter.org/pub/nftables/nftables-%{version}.tar.bz2 +URL: https://netfilter.org/projects/nftables/ +Source0: %{url}/files/%{name}-%{version}.tar.bz2 Source1: nftables.service Source2: nftables.conf Source3: main.nft Source4: router.nft Source5: nat.nft +Source6: nft-test.stderr.expect +Source7: run-tests.stderr.expect +Source8: monitor-run-tests.stderr.expect -Patch1: 0001-main-enforce-options-before-commands.patch -Patch2: 0002-main-restore-debug.patch -Patch3: 0003-monitor-Do-not-decompose-non-anonymous-sets.patch -Patch4: 0004-monitor-Fix-output-for-ranges-in-anonymous-sets.patch -Patch5: 0005-xfrm-spi-is-big-endian.patch -Patch6: 0006-tests-shell-Search-diff-tool-once-and-for-all.patch -Patch7: 0007-cache-Fix-for-doubled-output-after-reset-command.patch -Patch8: 0008-netlink-Fix-leak-in-unterminated-string-deserializer.patch -Patch9: 0009-netlink-Fix-leaks-in-netlink_parse_cmp.patch -Patch10: 0010-netlink-Avoid-potential-NULL-pointer-deref-in-netlin.patch -Patch11: 0011-tests-json_echo-Fix-for-Python3.patch -Patch12: 0012-tests-json_echo-Support-testing-host-binaries.patch -Patch13: 0013-tests-monitor-Support-running-individual-test-cases.patch -Patch14: 0014-tests-monitor-Support-testing-host-s-nft-binary.patch -Patch15: 0015-tests-py-Support-testing-host-binaries.patch -Patch16: 0016-doc-nft.8-Mention-wildcard-interface-matching.patch -Patch17: 0017-scanner-Extend-asteriskstring-definition.patch -Patch18: 0018-parser-add-a-helper-for-concat-expression-handling.patch -Patch19: 0019-include-resync-nf_tables.h-cache-copy.patch -Patch20: 0020-src-Add-support-for-NFTNL_SET_DESC_CONCAT.patch -Patch21: 0021-src-Add-support-for-concatenated-set-ranges.patch -Patch22: 0022-parser_json-Support-ranges-in-concat-expressions.patch -Patch23: 0023-doc-Document-notrack-statement.patch -Patch24: 0024-JSON-Improve-performance-of-json_events_cb.patch -Patch25: 0025-segtree-Fix-missing-expires-value-in-prefixes.patch -Patch26: 0026-segtree-Use-expr_clone-in-get_set_interval_.patch -Patch27: 0027-segtree-Merge-get_set_interval_find-and-get_set_inte.patch -Patch28: 0028-tests-0034get_element_0-do-not-discard-stderr.patch -Patch29: 0029-segtree-Fix-get-element-command-with-prefixes.patch -Patch30: 0030-include-Resync-nf_tables.h-cache-copy.patch -Patch31: 0031-src-Set-NFT_SET_CONCAT-flag-for-sets-with-concatenat.patch -Patch32: 0032-src-store-expr-not-dtype-to-track-data-in-sets.patch -Patch33: 0033-evaluate-Perform-set-evaluation-on-implicitly-declar.patch -Patch34: 0034-evaluate-missing-datatype-definition-in-implicit_set.patch -Patch35: 0035-mergesort-unbreak-listing-with-binops.patch -Patch36: 0036-proto-add-sctp-crc32-checksum-fixup.patch -Patch37: 0037-proto-Fix-ARP-header-field-ordering.patch -Patch38: 0038-json-echo-Speedup-seqnum_to_json.patch -Patch39: 0039-json-Fix-seqnum_to_json-functionality.patch -Patch40: 0040-json-don-t-leave-dangling-pointers-on-hlist.patch -Patch41: 0041-json-init-parser-state-for-every-new-buffer-file.patch -Patch42: 0042-tests-Disable-tests-known-to-fail-on-RHEL8.patch -Patch43: 0043-monitor-Fix-for-use-after-free-when-printing-map-ele.patch -Patch44: 0044-tests-monitor-use-correct-nft-value-in-EXIT-trap.patch -Patch45: 0045-evaluate-Reject-quoted-strings-containing-only-wildc.patch -Patch46: 0046-src-Support-odd-sized-payload-matches.patch -Patch47: 0047-src-Optimize-prefix-matches-on-byte-boundaries.patch -Patch48: 0048-tests-py-Move-tcpopt.t-to-any-directory.patch -Patch49: 0049-parser-merge-sack-perm-sack-permitted-and-maxseg-mss.patch -Patch50: 0050-tcpopts-clean-up-parser-tcpopt.c-plumbing.patch -Patch51: 0051-tcpopt-rename-noop-to-nop.patch -Patch52: 0052-tcpopt-split-tcpopt_hdr_fields-into-per-option-enum.patch -Patch53: 0053-tcpopt-allow-to-check-for-presence-of-any-tcp-option.patch -Patch54: 0054-tcp-add-raw-tcp-option-match-support.patch -Patch55: 0055-json-tcp-add-raw-tcp-option-match-support.patch -Patch56: 0056-json-Simplify-non-tcpopt-exthdr-printing-a-bit.patch -Patch57: 0057-scanner-introduce-start-condition-stack.patch -Patch58: 0058-scanner-sctp-Move-to-own-scope.patch -Patch59: 0059-exthdr-Implement-SCTP-Chunk-matching.patch -Patch60: 0060-include-missing-sctp_chunk.h-in-Makefile.am.patch -Patch61: 0061-doc-nft.8-Extend-monitor-description-by-trace.patch -Patch62: 0062-tests-shell-Fix-bogus-testsuite-failure-with-100Hz.patch -Patch63: 0063-parser_json-Fix-error-reporting-for-invalid-syntax.patch -Patch64: 0064-parser_bison-Fix-for-implicit-declaration-of-isalnum.patch -Patch65: 0065-parser_json-Fix-for-memleak-in-tcp-option-error-path.patch -Patch66: 0066-json-Drop-pointless-assignment-in-exthdr_expr_json.patch -Patch67: 0067-segtree-Fix-segfault-when-restoring-a-huge-interval-.patch -Patch68: 0068-tests-cover-baecd1cf2685-segtree-Fix-segfault-when-r.patch -Patch69: 0069-tests-shell-NFT-needs-to-be-invoked-unquoted.patch -Patch70: 0070-tests-shell-better-parameters-for-the-interval-stack.patch -Patch71: 0071-netlink-remove-unused-parameter-from-netlink_gen_stm.patch -Patch72: 0072-src-support-for-restoring-element-counters.patch -Patch73: 0073-evaluate-attempt-to-set_eval-flag-if-dynamic-updates.patch -Patch74: 0074-evaluate-fix-inet-nat-with-no-layer-3-info.patch -Patch75: 0075-tests-py-add-dnat-to-port-without-defining-destinati.patch -Patch76: 0076-mnl-do-not-build-nftnl_set-element-list.patch -Patch77: 0077-mnl-do-not-use-expr-identifier-to-fetch-device-name.patch -Patch78: 0078-tests-shell-auto-removal-of-chain-hook-on-netns-remo.patch -Patch79: 0079-rule-memleak-in-__do_add_setelems.patch -Patch80: 0080-rule-fix-element-cache-update-in-__do_add_setelems.patch -Patch81: 0081-src-rename-CMD_OBJ_SETELEM-to-CMD_OBJ_ELEMENTS.patch -Patch82: 0082-src-add-CMD_OBJ_SETELEMS.patch -Patch83: 0083-libnftables-call-nft_cmd_expand-only-with-CMD_ADD.patch +Patch1: 0001-tests-shell-runtime-set-element-automerge.patch +Patch2: 0002-rule-collapse-set-element-commands.patch +Patch3: 0003-intervals-do-not-report-exact-overlaps-for-new-eleme.patch +Patch4: 0004-intervals-do-not-empty-cache-for-maps.patch +Patch5: 0005-intervals-Do-not-sort-cached-set-elements-over-and-o.patch +Patch6: 0006-doc-Document-limitations-of-ipsec-expression-with-xf.patch +Patch7: 0007-tests-py-Add-a-test-for-failing-ipsec-after-counter.patch +Patch8: 0008-parser-add-missing-synproxy-scope-closure.patch +Patch9: 0009-scanner-don-t-pop-active-flex-scanner-scope.patch +Patch10: 0010-intervals-fix-crash-when-trying-to-remove-element-in.patch +Patch11: 0011-intervals-check-for-EXPR_F_REMOVE-in-case-of-element.patch +Patch12: 0012-netlink_delinearize-allow-postprocessing-on-concaten.patch +Patch13: 0013-netlink_delinearize-postprocess-binary-ands-in-conca.patch +Patch14: 0014-proto-track-full-stack-of-seen-l2-protocols-not-just.patch +Patch15: 0015-debug-dump-the-l2-protocol-stack.patch +Patch16: 0016-tests-add-a-test-case-for-ether-and-vlan-listing.patch +Patch17: 0017-netlink_delinearize-also-postprocess-OP_AND-in-set-e.patch +Patch18: 0018-evaluate-search-stacked-header-list-for-matching-pay.patch +Patch19: 0019-src-allow-anon-set-concatenation-with-ether-and-vlan.patch +Patch20: 0020-evaluate-set-eval-ctx-for-add-update-statements-with.patch +Patch21: 0021-monitor-Sanitize-startup-race-condition.patch +Patch22: 0022-netlink_delinearize-fix-decoding-of-concat-data-elem.patch +Patch23: 0023-netlink_linearize-fix-timeout-with-map-updates.patch +Patch24: 0024-tests-add-a-test-case-for-map-update-from-packet-pat.patch +Patch25: 0025-owner-Fix-potential-array-out-of-bounds-access.patch +Patch26: 0026-mnl-dump_nf_hooks-leaks-memory-in-error-path.patch +Patch27: 0027-meta-parse_iso_date-returns-boolean.patch +Patch28: 0028-netlink-Fix-for-potential-NULL-pointer-deref.patch +Patch29: 0029-optimize-Do-not-return-garbage-from-stack.patch +Patch30: 0030-optimize-Clarify-chain_optimize-array-allocations.patch +Patch31: 0031-netlink_delinearize-Sanitize-concat-data-element-dec.patch +Patch32: 0032-tests-monitor-Summarize-failures-per-test-case.patch +Patch33: 0033-rule-check-address-family-in-set-collapse.patch -BuildRequires: autogen BuildRequires: autoconf BuildRequires: automake BuildRequires: libtool +BuildRequires: make BuildRequires: gcc BuildRequires: flex BuildRequires: bison -BuildRequires: libmnl-devel +BuildRequires: pkgconfig(libmnl) >= 1.0.4 BuildRequires: gmp-devel BuildRequires: readline-devel BuildRequires: pkgconfig(libnftnl) >= %{libnftnl_ver} BuildRequires: systemd BuildRequires: asciidoc -BuildRequires: iptables-devel +BuildRequires: pkgconfig(xtables) >= 1.6.1 BuildRequires: jansson-devel BuildRequires: python3-devel @@ -142,12 +95,15 @@ The nftables python module provides an interface to libnftables via ctypes. %prep %autosetup -p1 +cp -a %{SOURCE6} ./tests/py/ +cp -a %{SOURCE7} ./tests/shell/ +cp -a %{SOURCE8} ./tests/monitor/run-tests.stderr.expect %build autoreconf -fi rm -Rf autom4te*.cache config.h.in~ %configure --disable-silent-rules --with-json --with-xtables \ - --enable-python --with-python-bin=%{__python3} + --enable-python --with-python-bin=%{__python3} --with-cli=readline make %{?_smp_mflags} %install @@ -165,7 +121,7 @@ cp -a %{SOURCE1} $RPM_BUILD_ROOT/%{_unitdir}/ mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/sysconfig cp -a %{SOURCE2} $RPM_BUILD_ROOT/%{_sysconfdir}/sysconfig/ -rm $RPM_BUILD_ROOT/%{_sysconfdir}/nftables/*.nft +rm $RPM_BUILD_ROOT/%{_datadir}/nftables/*.nft cp %{SOURCE3} %{SOURCE4} %{SOURCE5} \ $RPM_BUILD_ROOT/%{_sysconfdir}/nftables/ @@ -217,6 +173,18 @@ touch -r %{SOURCE2} $RPM_BUILD_ROOT/%{python3_sitelib}/nftables/nftables.py %{python3_sitelib}/nftables/ %changelog +* Thu Sep 21 2023 Phil Sutter [1.0.4-3.el8] +- spec: Rename variables to avoid a clash (Phil Sutter) [INTERNAL] +- rule: check address family in set collapse (Phil Sutter) [RHEL-5160] + +* Thu Jul 20 2023 Phil Sutter [1.0.4-2.el8] +- Add expected error records for testsuite runs (Phil Sutter) [2211076] +- tests: monitor: Summarize failures per test case (Phil Sutter) [2211076] + +* Tue May 30 2023 Phil Sutter [1.0.4-1.el8] +- Synchronize patch level with nftables-1.0.4-10.el9 (Phil Sutter) [2211076] +- Rebase onto version 1.0.4 (Phil Sutter) [2211076] + * Thu Apr 28 2022 Phil Sutter [0.9.3-26.el8] - libnftables: call nft_cmd_expand() only with CMD_ADD (Phil Sutter) [2073287] - src: add CMD_OBJ_SETELEMS (Phil Sutter) [2073287]