diff --git a/bcc-0.26.0-sync-with-latest-libbpf-repo.patch b/bcc-0.26.0-sync-with-latest-libbpf-repo.patch new file mode 100644 index 0000000..3f77c5e --- /dev/null +++ b/bcc-0.26.0-sync-with-latest-libbpf-repo.patch @@ -0,0 +1,363 @@ +From 34f77c4aaaa039fd2ef3d51b8b61db30fc34912f Mon Sep 17 00:00:00 2001 +From: Yonghong Song +Date: Wed, 19 Apr 2023 23:46:53 -0700 +Subject: [PATCH] sync with latest libbpf repo + +Sync libbpf submodule up to the following commit: + 44b0bc9ad70a ci: Regenerate latest vmlinux.h for old kernel CI tests. + +Signed-off-by: Yonghong Song +--- + src/cc/compat/linux/virtual_bpf.h | 141 ++++++++++++++++++++++++++---- + src/cc/export/helpers.h | 6 +- + 2 files changed, 126 insertions(+), 21 deletions(-) + +diff --git a/src/cc/compat/linux/virtual_bpf.h b/src/cc/compat/linux/virtual_bpf.h +index be3a4627..a182123e 100644 +--- a/src/cc/compat/linux/virtual_bpf.h ++++ b/src/cc/compat/linux/virtual_bpf.h +@@ -1034,6 +1034,7 @@ enum bpf_attach_type { + BPF_PERF_EVENT, + BPF_TRACE_KPROBE_MULTI, + BPF_LSM_CGROUP, ++ BPF_STRUCT_OPS, + __MAX_BPF_ATTACH_TYPE + }; + +@@ -1109,7 +1110,7 @@ enum bpf_link_type { + */ + #define BPF_F_STRICT_ALIGNMENT (1U << 0) + +-/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the ++/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the + * verifier will allow any alignment whatsoever. On platforms + * with strict alignment requirements for loads ands stores (such + * as sparc and mips) the verifier validates that all loads and +@@ -1157,6 +1158,11 @@ enum bpf_link_type { + */ + #define BPF_F_XDP_HAS_FRAGS (1U << 5) + ++/* If BPF_F_XDP_DEV_BOUND_ONLY is used in BPF_PROG_LOAD command, the loaded ++ * program becomes device-bound but can access XDP metadata. ++ */ ++#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6) ++ + /* link_create.kprobe_multi.flags used in LINK_CREATE command for + * BPF_TRACE_KPROBE_MULTI attach type to create return probe. + */ +@@ -1262,6 +1268,9 @@ enum { + + /* Create a map that is suitable to be an inner map with dynamic max entries */ + BPF_F_INNER_MAP = (1U << 12), ++ ++/* Create a map that will be registered/unregesitered by the backed bpf_link */ ++ BPF_F_LINK = (1U << 13), + }; + + /* Flags for BPF_PROG_QUERY. */ +@@ -1399,6 +1408,11 @@ union bpf_attr { + __aligned_u64 fd_array; /* array of FDs */ + __aligned_u64 core_relos; + __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */ ++ /* output: actual total log contents size (including termintaing zero). ++ * It could be both larger than original log_size (if log was ++ * truncated), or smaller (if log buffer wasn't filled completely). ++ */ ++ __u32 log_true_size; + }; + + struct { /* anonymous struct used by BPF_OBJ_* commands */ +@@ -1484,6 +1498,11 @@ union bpf_attr { + __u32 btf_size; + __u32 btf_log_size; + __u32 btf_log_level; ++ /* output: actual total log contents size (including termintaing zero). ++ * It could be both larger than original log_size (if log was ++ * truncated), or smaller (if log buffer wasn't filled completely). ++ */ ++ __u32 btf_log_true_size; + }; + + struct { +@@ -1503,7 +1522,10 @@ union bpf_attr { + } task_fd_query; + + struct { /* struct used by BPF_LINK_CREATE command */ +- __u32 prog_fd; /* eBPF program to attach */ ++ union { ++ __u32 prog_fd; /* eBPF program to attach */ ++ __u32 map_fd; /* struct_ops to attach */ ++ }; + union { + __u32 target_fd; /* object to attach to */ + __u32 target_ifindex; /* target ifindex */ +@@ -1544,12 +1566,23 @@ union bpf_attr { + + struct { /* struct used by BPF_LINK_UPDATE command */ + __u32 link_fd; /* link fd */ +- /* new program fd to update link with */ +- __u32 new_prog_fd; ++ union { ++ /* new program fd to update link with */ ++ __u32 new_prog_fd; ++ /* new struct_ops map fd to update link with */ ++ __u32 new_map_fd; ++ }; + __u32 flags; /* extra flags */ +- /* expected link's program fd; is specified only if +- * BPF_F_REPLACE flag is set in flags */ +- __u32 old_prog_fd; ++ union { ++ /* expected link's program fd; is specified only if ++ * BPF_F_REPLACE flag is set in flags. ++ */ ++ __u32 old_prog_fd; ++ /* expected link's map fd; is specified only ++ * if BPF_F_REPLACE flag is set. ++ */ ++ __u32 old_map_fd; ++ }; + } link_update; + + struct { +@@ -1643,17 +1676,17 @@ union bpf_attr { + * Description + * This helper is a "printk()-like" facility for debugging. It + * prints a message defined by format *fmt* (of size *fmt_size*) +- * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if ++ * to file *\/sys/kernel/tracing/trace* from TraceFS, if + * available. It can take up to three additional **u64** + * arguments (as an eBPF helpers, the total number of arguments is + * limited to five). + * + * Each time the helper is called, it appends a line to the trace. +- * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is +- * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. ++ * Lines are discarded while *\/sys/kernel/tracing/trace* is ++ * open, use *\/sys/kernel/tracing/trace_pipe* to avoid this. + * The format of the trace is customizable, and the exact output + * one will get depends on the options set in +- * *\/sys/kernel/debug/tracing/trace_options* (see also the ++ * *\/sys/kernel/tracing/trace_options* (see also the + * *README* file under the same directory). However, it usually + * defaults to something like: + * +@@ -2002,6 +2035,9 @@ union bpf_attr { + * sending the packet. This flag was added for GRE + * encapsulation, but might be used with other protocols + * as well in the future. ++ * **BPF_F_NO_TUNNEL_KEY** ++ * Add a flag to tunnel metadata indicating that no tunnel ++ * key should be set in the resulting tunnel header. + * + * Here is a typical usage on the transmit path: + * +@@ -2645,6 +2681,11 @@ union bpf_attr { + * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the + * L2 type as Ethernet. + * ++ * * **BPF_F_ADJ_ROOM_DECAP_L3_IPV4**, ++ * **BPF_F_ADJ_ROOM_DECAP_L3_IPV6**: ++ * Indicate the new IP header version after decapsulating the outer ++ * IP header. Used when the inner and outer IP versions are different. ++ * + * A call to this helper is susceptible to change the underlying + * packet buffer. Therefore, at load time, all checks on pointers + * previously done by the verifier are invalidated and must be +@@ -2789,7 +2830,7 @@ union bpf_attr { + * + * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) + * Description +- * For en eBPF program attached to a perf event, retrieve the ++ * For an eBPF program attached to a perf event, retrieve the + * value of the event counter associated to *ctx* and store it in + * the structure pointed by *buf* and of size *buf_size*. Enabled + * and running times are also stored in the structure (see +@@ -3122,6 +3163,11 @@ union bpf_attr { + * **BPF_FIB_LOOKUP_OUTPUT** + * Perform lookup from an egress perspective (default is + * ingress). ++ * **BPF_FIB_LOOKUP_SKIP_NEIGH** ++ * Skip the neighbour table lookup. *params*->dmac ++ * and *params*->smac will not be set as output. A common ++ * use case is to call **bpf_redirect_neigh**\ () after ++ * doing **bpf_fib_lookup**\ (). + * + * *ctx* is either **struct xdp_md** for XDP programs or + * **struct sk_buff** tc cls_act programs. +@@ -4952,6 +4998,12 @@ union bpf_attr { + * different maps if key/value layout matches across maps. + * Every bpf_timer_set_callback() can have different callback_fn. + * ++ * *flags* can be one of: ++ * ++ * **BPF_F_TIMER_ABS** ++ * Start the timer in absolute expire value instead of the ++ * default relative one. ++ * + * Return + * 0 on success. + * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier +@@ -5294,7 +5346,7 @@ union bpf_attr { + * Return + * Nothing. Always succeeds. + * +- * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset, u64 flags) ++ * long bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr *src, u32 offset, u64 flags) + * Description + * Read *len* bytes from *src* into *dst*, starting from *offset* + * into *src*. +@@ -5304,22 +5356,36 @@ union bpf_attr { + * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if + * *flags* is not 0. + * +- * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags) ++ * long bpf_dynptr_write(const struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags) + * Description + * Write *len* bytes from *src* into *dst*, starting from *offset* + * into *dst*. +- * *flags* is currently unused. ++ * ++ * *flags* must be 0 except for skb-type dynptrs. ++ * ++ * For skb-type dynptrs: ++ * * All data slices of the dynptr are automatically ++ * invalidated after **bpf_dynptr_write**\ (). This is ++ * because writing may pull the skb and change the ++ * underlying packet buffer. ++ * ++ * * For *flags*, please see the flags accepted by ++ * **bpf_skb_store_bytes**\ (). + * Return + * 0 on success, -E2BIG if *offset* + *len* exceeds the length + * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* +- * is a read-only dynptr or if *flags* is not 0. ++ * is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs, ++ * other errors correspond to errors returned by **bpf_skb_store_bytes**\ (). + * +- * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len) ++ * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u32 offset, u32 len) + * Description + * Get a pointer to the underlying dynptr data. + * + * *len* must be a statically known value. The returned data slice + * is invalidated whenever the dynptr is invalidated. ++ * ++ * skb and xdp type dynptrs may not use bpf_dynptr_data. They should ++ * instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr. + * Return + * Pointer to the underlying dynptr data, NULL if the dynptr is + * read-only, if the dynptr is invalid, or if the offset and length +@@ -5415,7 +5481,7 @@ union bpf_attr { + * Drain samples from the specified user ring buffer, and invoke + * the provided callback for each such sample: + * +- * long (\*callback_fn)(struct bpf_dynptr \*dynptr, void \*ctx); ++ * long (\*callback_fn)(const struct bpf_dynptr \*dynptr, void \*ctx); + * + * If **callback_fn** returns 0, the helper will continue to try + * and drain the next sample, up to a maximum of +@@ -5765,6 +5831,7 @@ enum { + BPF_F_ZERO_CSUM_TX = (1ULL << 1), + BPF_F_DONT_FRAGMENT = (1ULL << 2), + BPF_F_SEQ_NUMBER = (1ULL << 3), ++ BPF_F_NO_TUNNEL_KEY = (1ULL << 4), + }; + + /* BPF_FUNC_skb_get_tunnel_key flags. */ +@@ -5804,6 +5871,8 @@ enum { + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), + BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5), + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6), ++ BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = (1ULL << 7), ++ BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = (1ULL << 8), + }; + + enum { +@@ -6339,6 +6408,9 @@ struct bpf_link_info { + struct { + __u32 ifindex; + } xdp; ++ struct { ++ __u32 map_id; ++ } struct_ops; + }; + } __attribute__((aligned(8))); + +@@ -6735,6 +6807,7 @@ struct bpf_raw_tracepoint_args { + enum { + BPF_FIB_LOOKUP_DIRECT = (1U << 0), + BPF_FIB_LOOKUP_OUTPUT = (1U << 1), ++ BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), + }; + + enum { +@@ -6902,6 +6975,21 @@ struct bpf_list_node { + __u64 :64; + } __attribute__((aligned(8))); + ++struct bpf_rb_root { ++ __u64 :64; ++ __u64 :64; ++} __attribute__((aligned(8))); ++ ++struct bpf_rb_node { ++ __u64 :64; ++ __u64 :64; ++ __u64 :64; ++} __attribute__((aligned(8))); ++ ++struct bpf_refcount { ++ __u32 :32; ++} __attribute__((aligned(4))); ++ + struct bpf_sysctl { + __u32 write; /* Sysctl is being read (= 0) or written (= 1). + * Allows 1,2,4-byte read, but no write. +@@ -7051,5 +7139,22 @@ struct bpf_core_relo { + enum bpf_core_relo_kind kind; + }; + ++/* ++ * Flags to control bpf_timer_start() behaviour. ++ * - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is ++ * relative to current time. ++ */ ++enum { ++ BPF_F_TIMER_ABS = (1ULL << 0), ++}; ++ ++/* BPF numbers iterator state */ ++struct bpf_iter_num { ++ /* opaque iterator state; having __u64 here allows to preserve correct ++ * alignment requirements in vmlinux.h, generated from BTF ++ */ ++ __u64 __opaque[1]; ++} __attribute__((aligned(8))); ++ + #endif /* _UAPI__LINUX_BPF_H__ */ + )********" +diff --git a/src/cc/export/helpers.h b/src/cc/export/helpers.h +index d7b869e0..e989440a 100644 +--- a/src/cc/export/helpers.h ++++ b/src/cc/export/helpers.h +@@ -1006,13 +1006,13 @@ static void (*bpf_ringbuf_submit_dynptr)(struct bpf_dynptr *ptr, __u64 flags) = + (void *)BPF_FUNC_ringbuf_submit_dynptr; + static void (*bpf_ringbuf_discard_dynptr)(struct bpf_dynptr *ptr, __u64 flags) = + (void *)BPF_FUNC_ringbuf_discard_dynptr; +-static long (*bpf_dynptr_read)(void *dst, __u32 len, struct bpf_dynptr *src, __u32 offset, ++static long (*bpf_dynptr_read)(void *dst, __u32 len, const struct bpf_dynptr *src, __u32 offset, + __u64 flags) = + (void *)BPF_FUNC_dynptr_read; +-static long (*bpf_dynptr_write)(struct bpf_dynptr *dst, __u32 offset, void *src, __u32 len, ++static long (*bpf_dynptr_write)(const struct bpf_dynptr *dst, __u32 offset, void *src, __u32 len, + __u64 flags) = + (void *)BPF_FUNC_dynptr_write; +-static void *(*bpf_dynptr_data)(struct bpf_dynptr *ptr, __u32 offset, __u32 len) = ++static void *(*bpf_dynptr_data)(const struct bpf_dynptr *ptr, __u32 offset, __u32 len) = + (void *)BPF_FUNC_dynptr_data; + static __s64 (*bpf_tcp_raw_gen_syncookie_ipv4)(struct iphdr *iph, struct tcphdr *th, + __u32 th_len) = +-- +2.41.0 + diff --git a/bcc.spec b/bcc.spec index e023b83..5aeca4f 100644 --- a/bcc.spec +++ b/bcc.spec @@ -25,7 +25,7 @@ Name: bcc Version: 0.26.0 -Release: 3%{?dist} +Release: 4%{?dist} Summary: BPF Compiler Collection (BCC) License: ASL 2.0 URL: https://github.com/iovisor/bcc @@ -37,6 +37,7 @@ Patch3: %%{name}-%%{version}-tools-compactsnoop.py-Fix-raw_tracepoint-In Patch4: %%{name}-%%{version}-killsnoop-add-missing-s-and-T-options-to-the-synopsi.patch Patch5: %%{name}-%%{version}-tools-funcslower-fix-printing-of-folded-stacks.patch Patch6: %%{name}-%%{version}-tools-deadlock-Add-an-option-to-set-the-maximum-numb.patch +Patch7: %%{name}-%%{version}-sync-with-latest-libbpf-repo.patch # Arches will be included as upstream support is added and dependencies are # satisfied in the respective arches @@ -260,6 +261,9 @@ cp -a libbpf-tools/tmp-install/bin/* %{buildroot}/%{_sbindir}/ %endif %changelog +* Wed Aug 09 2023 Jerome Marchand - 0.26.0-4 +- Fix tcpretrans (rhbz#2226967) + * Fri May 12 2023 Jerome Marchand - 0.26.0-3 - Rebuild with LLVM 16 (rhbz#2050112) - Fix compactsnoop (rhbz#2042236)