Rebase bcc to 0.30.0

Also fix the following:
- Really prevent the loading of compromised headers (CVE-2024-2314)
- Add python3-pyelftools dependency
- Exclude unsupported tools

Resolves: RHEL-32379
CVE: CVE-2024-2314

Signed-off-by: Jerome Marchand <jmarchan@redhat.com>
This commit is contained in:
Jerome Marchand 2024-04-10 15:48:49 +02:00
parent 7a29eb1782
commit 16b407598e
7 changed files with 110 additions and 979 deletions

1
.gitignore vendored
View File

@ -22,3 +22,4 @@
/bcc-0.27.0.tar.gz
/bcc-0.28.0.tar.gz
/bcc-0.29.1.tar.gz
/bcc-0.30.0.tar.gz

View File

@ -1,132 +0,0 @@
From 89126c7452c29736d38dc072a952b0b0c831fade Mon Sep 17 00:00:00 2001
From: Yonghong Song <yonghong.song@linux.dev>
Date: Mon, 29 Jan 2024 16:13:30 -0800
Subject: [PATCH] [PATCH] Fix ttysnoop.py with newer kernels
Jerome Marchand reported that ttysnoop.py won't work properly
with newer kernels (#4884). I did some investigation and found
that some kernel data structure change caused verification failure.
The failure is caused by the following:
; kvec = from->kvec;
// R1=ptr_iov_iter()
15: (79) r1 = *(u64 *)(r1 +16) ; R1_w=scalar()
; count = kvec->iov_len;
16: (bf) r2 = r1 ; R1_w=scalar(id=1) R2_w=scalar(id=1)
17: (07) r2 += 8 ; R2_w=scalar()
18: (05) goto pc+3
;
22: (79) r2 = *(u64 *)(r2 +0)
R2 invalid mem access 'scalar'
So basically, loading 'iov_iter + 16' returns a scalar but verifier
expects it to be a pointer.
In v6.4, we have
struct iovec
{
void __user *iov_base; /* BSD uses caddr_t (1003.1g requires void *) */
__kernel_size_t iov_len; /* Must be size_t (1003.1g) */
};
struct iov_iter {
u8 iter_type;
bool copy_mc;
bool nofault;
bool data_source;
bool user_backed;
union {
size_t iov_offset;
int last_offset;
};
union {
struct iovec __ubuf_iovec;
struct {
union {
const struct iovec *__iov;
const struct kvec *kvec;
const struct bio_vec *bvec;
struct xarray *xarray;
struct pipe_inode_info *pipe;
void __user *ubuf;
};
size_t count;
};
};
union {
unsigned long nr_segs;
struct {
unsigned int head;
unsigned int start_head;
};
loff_t xarray_start;
};
};
The kernel traversal chain will be
"struct iov_iter" -> "struct iovec __ubuf_iovec" -> "void __user *iov_base".
Since the "iov_base" type is a ptr to void, the kernel considers the
loaded value as a scalar which caused verification failure.
But for old kernel like 5.19, we do not have this issue.
struct iovec
{
void __user *iov_base; /* BSD uses caddr_t (1003.1g requires void *) */
__kernel_size_t iov_len; /* Must be size_t (1003.1g) */
};
struct iov_iter {
u8 iter_type;
bool nofault;
bool data_source;
bool user_backed;
size_t iov_offset;
size_t count;
union {
const struct iovec *iov;
const struct kvec *kvec;
const struct bio_vec *bvec;
struct xarray *xarray;
struct pipe_inode_info *pipe;
void __user *ubuf;
};
union {
unsigned long nr_segs;
struct {
unsigned int head;
unsigned int start_head;
};
loff_t xarray_start;
};
};
The kernel traversal chain will be
"struct iov_iter" -> "const struct iovec *iov"
Note that "const struct iovec *iov" is used since it is the *first* member
inside the union. The traversal stops once we hit a pointer.
So the kernel verifier returns a 'struct iovec' object (untrusted, cannot
be used as a parameter to a call) and verifier can proceed.
To fix the problem, let us use bpf_probe_read_kernel() instead
so ttysnoop.py can continue to work with newer kernel.
Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
---
tools/ttysnoop.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/ttysnoop.py b/tools/ttysnoop.py
index 77f97b7c..aca09db4 100755
--- a/tools/ttysnoop.py
+++ b/tools/ttysnoop.py
@@ -162,8 +162,8 @@ PROBE_TTY_WRITE
*/
case CASE_ITER_IOVEC_NAME:
kvec = from->kvec;
- buf = kvec->iov_base;
- count = kvec->iov_len;
+ bpf_probe_read_kernel(&buf, sizeof(buf), &kvec->iov_base);
+ bpf_probe_read_kernel(&count, sizeof(count), &kvec->iov_len);
break;
CASE_ITER_UBUF_TEXT
/* TODO: Support more type */
--
2.43.0

View File

@ -1,727 +0,0 @@
From c0691e35cd65d5400f0b792d5eba81f8eae236dc Mon Sep 17 00:00:00 2001
From: yonghong-song <ys114321@gmail.com>
Date: Tue, 30 Jan 2024 09:14:30 -0800
Subject: [PATCH] Sync with latest libbpf repo (#4889)
Sync with latest libbpf repo.
The top libbpf commit is:
3b0973892891 sync: remove NETDEV_XSK_FLAGS_MASK which is not in bpf/bpf-next anymore
Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
---
introspection/bps.c | 1 +
src/cc/compat/linux/virtual_bpf.h | 368 ++++++++++++++++++++++++++----
src/cc/libbpf | 2 +-
3 files changed, 326 insertions(+), 45 deletions(-)
diff --git a/introspection/bps.c b/introspection/bps.c
index 3956fbf2..8cdef54a 100644
--- a/introspection/bps.c
+++ b/introspection/bps.c
@@ -48,6 +48,7 @@ static const char * const prog_type_strings[] = {
[BPF_PROG_TYPE_LSM] = "lsm",
[BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup",
[BPF_PROG_TYPE_SYSCALL] = "syscall",
+ [BPF_PROG_TYPE_NETFILTER] = "netfilter",
};
static const char * const map_type_strings[] = {
diff --git a/src/cc/compat/linux/virtual_bpf.h b/src/cc/compat/linux/virtual_bpf.h
index a182123e..fcabe71a 100644
--- a/src/cc/compat/linux/virtual_bpf.h
+++ b/src/cc/compat/linux/virtual_bpf.h
@@ -20,6 +20,7 @@ R"********(
/* ld/ldx fields */
#define BPF_DW 0x18 /* double word (64-bit) */
+#define BPF_MEMSX 0x80 /* load with sign extension */
#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
#define BPF_XADD 0xc0 /* exclusive add - legacy name */
@@ -847,6 +848,36 @@ union bpf_iter_link_info {
* Returns zero on success. On error, -1 is returned and *errno*
* is set appropriately.
*
+ * BPF_TOKEN_CREATE
+ * Description
+ * Create BPF token with embedded information about what
+ * BPF-related functionality it allows:
+ * - a set of allowed bpf() syscall commands;
+ * - a set of allowed BPF map types to be created with
+ * BPF_MAP_CREATE command, if BPF_MAP_CREATE itself is allowed;
+ * - a set of allowed BPF program types and BPF program attach
+ * types to be loaded with BPF_PROG_LOAD command, if
+ * BPF_PROG_LOAD itself is allowed.
+ *
+ * BPF token is created (derived) from an instance of BPF FS,
+ * assuming it has necessary delegation mount options specified.
+ * This BPF token can be passed as an extra parameter to various
+ * bpf() syscall commands to grant BPF subsystem functionality to
+ * unprivileged processes.
+ *
+ * When created, BPF token is "associated" with the owning
+ * user namespace of BPF FS instance (super block) that it was
+ * derived from, and subsequent BPF operations performed with
+ * BPF token would be performing capabilities checks (i.e.,
+ * CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN, CAP_SYS_ADMIN) within
+ * that user namespace. Without BPF token, such capabilities
+ * have to be granted in init user namespace, making bpf()
+ * syscall incompatible with user namespace, for the most part.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
* NOTES
* eBPF objects (maps and programs) can be shared between processes.
*
@@ -901,6 +932,8 @@ enum bpf_cmd {
BPF_ITER_CREATE,
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
+ BPF_TOKEN_CREATE,
+ __MAX_BPF_CMD,
};
enum bpf_map_type {
@@ -932,7 +965,14 @@ enum bpf_map_type {
*/
BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
- BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
+ /* BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE is available to bpf programs
+ * attaching to a cgroup. The new mechanism (BPF_MAP_TYPE_CGRP_STORAGE +
+ * local percpu kptr) supports all BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
+ * functionality and more. So mark * BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
+ * deprecated.
+ */
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK,
BPF_MAP_TYPE_SK_STORAGE,
@@ -944,6 +984,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_BLOOM_FILTER,
BPF_MAP_TYPE_USER_RINGBUF,
BPF_MAP_TYPE_CGRP_STORAGE,
+ __MAX_BPF_MAP_TYPE
};
/* Note that tracing related programs such as
@@ -987,6 +1028,8 @@ enum bpf_prog_type {
BPF_PROG_TYPE_LSM,
BPF_PROG_TYPE_SK_LOOKUP,
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
+ BPF_PROG_TYPE_NETFILTER,
+ __MAX_BPF_PROG_TYPE
};
enum bpf_attach_type {
@@ -1035,6 +1078,17 @@ enum bpf_attach_type {
BPF_TRACE_KPROBE_MULTI,
BPF_LSM_CGROUP,
BPF_STRUCT_OPS,
+ BPF_NETFILTER,
+ BPF_TCX_INGRESS,
+ BPF_TCX_EGRESS,
+ BPF_TRACE_UPROBE_MULTI,
+ BPF_CGROUP_UNIX_CONNECT,
+ BPF_CGROUP_UNIX_SENDMSG,
+ BPF_CGROUP_UNIX_RECVMSG,
+ BPF_CGROUP_UNIX_GETPEERNAME,
+ BPF_CGROUP_UNIX_GETSOCKNAME,
+ BPF_NETKIT_PRIMARY,
+ BPF_NETKIT_PEER,
__MAX_BPF_ATTACH_TYPE
};
@@ -1051,8 +1105,23 @@ enum bpf_link_type {
BPF_LINK_TYPE_PERF_EVENT = 7,
BPF_LINK_TYPE_KPROBE_MULTI = 8,
BPF_LINK_TYPE_STRUCT_OPS = 9,
+ BPF_LINK_TYPE_NETFILTER = 10,
+ BPF_LINK_TYPE_TCX = 11,
+ BPF_LINK_TYPE_UPROBE_MULTI = 12,
+ BPF_LINK_TYPE_NETKIT = 13,
+ __MAX_BPF_LINK_TYPE,
+};
+
+#define MAX_BPF_LINK_TYPE __MAX_BPF_LINK_TYPE
- MAX_BPF_LINK_TYPE,
+enum bpf_perf_event_type {
+ BPF_PERF_EVENT_UNSPEC = 0,
+ BPF_PERF_EVENT_UPROBE = 1,
+ BPF_PERF_EVENT_URETPROBE = 2,
+ BPF_PERF_EVENT_KPROBE = 3,
+ BPF_PERF_EVENT_KRETPROBE = 4,
+ BPF_PERF_EVENT_TRACEPOINT = 5,
+ BPF_PERF_EVENT_EVENT = 6,
};
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
@@ -1101,7 +1170,12 @@ enum bpf_link_type {
*/
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
#define BPF_F_ALLOW_MULTI (1U << 1)
+/* Generic attachment flags. */
#define BPF_F_REPLACE (1U << 2)
+#define BPF_F_BEFORE (1U << 3)
+#define BPF_F_AFTER (1U << 4)
+#define BPF_F_ID (1U << 5)
+#define BPF_F_LINK BPF_F_LINK /* 1 << 13 */
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will perform strict alignment checking as if the kernel
@@ -1163,10 +1237,27 @@ enum bpf_link_type {
*/
#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
+/* The verifier internal test flag. Behavior is undefined */
+#define BPF_F_TEST_REG_INVARIANTS (1U << 7)
+
/* link_create.kprobe_multi.flags used in LINK_CREATE command for
* BPF_TRACE_KPROBE_MULTI attach type to create return probe.
*/
-#define BPF_F_KPROBE_MULTI_RETURN (1U << 0)
+enum {
+ BPF_F_KPROBE_MULTI_RETURN = (1U << 0)
+};
+
+/* link_create.uprobe_multi.flags used in LINK_CREATE command for
+ * BPF_TRACE_UPROBE_MULTI attach type to create return probe.
+ */
+enum {
+ BPF_F_UPROBE_MULTI_RETURN = (1U << 0)
+};
+
+/* link_create.netfilter.flags used in LINK_CREATE command for
+ * BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation.
+ */
+#define BPF_F_NETFILTER_IP_DEFRAG (1U << 0)
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
* the following extensions:
@@ -1271,6 +1362,15 @@ enum {
/* Create a map that will be registered/unregesitered by the backed bpf_link */
BPF_F_LINK = (1U << 13),
+
+/* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */
+ BPF_F_PATH_FD = (1U << 14),
+
+/* Flag for value_type_btf_obj_fd, the fd is available */
+ BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15),
+
+/* BPF token FD is passed in a corresponding command's token_fd field */
+ BPF_F_TOKEN_FD = (1U << 16),
};
/* Flags for BPF_PROG_QUERY. */
@@ -1344,6 +1444,15 @@ union bpf_attr {
* to using 5 hash functions).
*/
__u64 map_extra;
+
+ __s32 value_type_btf_obj_fd; /* fd pointing to a BTF
+ * type data for
+ * btf_vmlinux_value_type_id.
+ */
+ /* BPF token FD to use with BPF_MAP_CREATE operation.
+ * If provided, map_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 map_token_fd;
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -1413,23 +1522,39 @@ union bpf_attr {
* truncated), or smaller (if log buffer wasn't filled completely).
*/
__u32 log_true_size;
+ /* BPF token FD to use with BPF_PROG_LOAD operation.
+ * If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 prog_token_fd;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
__aligned_u64 pathname;
__u32 bpf_fd;
__u32 file_flags;
+ /* Same as dirfd in openat() syscall; see openat(2)
+ * manpage for details of path FD and pathname semantics;
+ * path_fd should accompanied by BPF_F_PATH_FD flag set in
+ * file_flags field, otherwise it should be set to zero;
+ * if BPF_F_PATH_FD flag is not set, AT_FDCWD is assumed.
+ */
+ __s32 path_fd;
};
struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
- __u32 target_fd; /* container object to attach to */
- __u32 attach_bpf_fd; /* eBPF program to attach */
+ union {
+ __u32 target_fd; /* target object to attach to or ... */
+ __u32 target_ifindex; /* target ifindex */
+ };
+ __u32 attach_bpf_fd;
__u32 attach_type;
__u32 attach_flags;
- __u32 replace_bpf_fd; /* previously attached eBPF
- * program to replace if
- * BPF_F_REPLACE is used
- */
+ __u32 replace_bpf_fd;
+ union {
+ __u32 relative_fd;
+ __u32 relative_id;
+ };
+ __u64 expected_revision;
};
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
@@ -1475,16 +1600,26 @@ union bpf_attr {
} info;
struct { /* anonymous struct used by BPF_PROG_QUERY command */
- __u32 target_fd; /* container object to query */
+ union {
+ __u32 target_fd; /* target object to query or ... */
+ __u32 target_ifindex; /* target ifindex */
+ };
__u32 attach_type;
__u32 query_flags;
__u32 attach_flags;
__aligned_u64 prog_ids;
- __u32 prog_cnt;
+ union {
+ __u32 prog_cnt;
+ __u32 count;
+ };
+ __u32 :32;
/* output: per-program attach_flags.
* not allowed to be set during effective query.
*/
__aligned_u64 prog_attach_flags;
+ __aligned_u64 link_ids;
+ __aligned_u64 link_attach_flags;
+ __u64 revision;
} query;
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
@@ -1503,6 +1638,11 @@ union bpf_attr {
* truncated), or smaller (if log buffer wasn't filled completely).
*/
__u32 btf_log_true_size;
+ __u32 btf_flags;
+ /* BPF token FD to use with BPF_BTF_LOAD operation.
+ * If provided, btf_flags should have BPF_F_TOKEN_FD flag set.
+ */
+ __s32 btf_token_fd;
};
struct {
@@ -1527,13 +1667,13 @@ union bpf_attr {
__u32 map_fd; /* struct_ops to attach */
};
union {
- __u32 target_fd; /* object to attach to */
- __u32 target_ifindex; /* target ifindex */
+ __u32 target_fd; /* target object to attach to or ... */
+ __u32 target_ifindex; /* target ifindex */
};
__u32 attach_type; /* attach type */
__u32 flags; /* extra flags */
union {
- __u32 target_btf_id; /* btf_id of target to attach to */
+ __u32 target_btf_id; /* btf_id of target to attach to */
struct {
__aligned_u64 iter_info; /* extra bpf_iter_link_info */
__u32 iter_info_len; /* iter_info length */
@@ -1561,6 +1701,35 @@ union bpf_attr {
*/
__u64 cookie;
} tracing;
+ struct {
+ __u32 pf;
+ __u32 hooknum;
+ __s32 priority;
+ __u32 flags;
+ } netfilter;
+ struct {
+ union {
+ __u32 relative_fd;
+ __u32 relative_id;
+ };
+ __u64 expected_revision;
+ } tcx;
+ struct {
+ __aligned_u64 path;
+ __aligned_u64 offsets;
+ __aligned_u64 ref_ctr_offsets;
+ __aligned_u64 cookies;
+ __u32 cnt;
+ __u32 flags;
+ __u32 pid;
+ } uprobe_multi;
+ struct {
+ union {
+ __u32 relative_fd;
+ __u32 relative_id;
+ };
+ __u64 expected_revision;
+ } netkit;
};
} link_create;
@@ -1604,6 +1773,11 @@ union bpf_attr {
__u32 flags; /* extra flags */
} prog_bind_map;
+ struct { /* struct used by BPF_TOKEN_CREATE command */
+ __u32 flags;
+ __u32 bpffs_fd;
+ } token_create;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -1879,7 +2053,9 @@ union bpf_attr {
* performed again, if the helper is used in combination with
* direct packet access.
* Return
- * 0 on success, or a negative error in case of failure.
+ * 0 on success, or a negative error in case of failure. Positive
+ * error indicates a potential drop or congestion in the target
+ * device. The particular positive error codes are not defined.
*
* u64 bpf_get_current_pid_tgid(void)
* Description
@@ -2612,8 +2788,8 @@ union bpf_attr {
* *bpf_socket* should be one of the following:
*
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
- * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
- * and **BPF_CGROUP_INET6_CONNECT**.
+ * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
+ * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
*
* This helper actually implements a subset of **setsockopt()**.
* It supports the following *level*\ s:
@@ -2851,8 +3027,8 @@ union bpf_attr {
* *bpf_socket* should be one of the following:
*
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
- * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
- * and **BPF_CGROUP_INET6_CONNECT**.
+ * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**,
+ * **BPF_CGROUP_INET6_CONNECT** and **BPF_CGROUP_UNIX_CONNECT**.
*
* This helper actually implements a subset of **getsockopt()**.
* It supports the same set of *optname*\ s that is supported by
@@ -3160,6 +3336,10 @@ union bpf_attr {
* **BPF_FIB_LOOKUP_DIRECT**
* Do a direct table lookup vs full lookup using FIB
* rules.
+ * **BPF_FIB_LOOKUP_TBID**
+ * Used with BPF_FIB_LOOKUP_DIRECT.
+ * Use the routing table ID present in *params*->tbid
+ * for the fib lookup.
* **BPF_FIB_LOOKUP_OUTPUT**
* Perform lookup from an egress perspective (default is
* ingress).
@@ -3168,6 +3348,11 @@ union bpf_attr {
* and *params*->smac will not be set as output. A common
* use case is to call **bpf_redirect_neigh**\ () after
* doing **bpf_fib_lookup**\ ().
+ * **BPF_FIB_LOOKUP_SRC**
+ * Derive and set source IP addr in *params*->ipv{4,6}_src
+ * for the nexthop. If the src addr cannot be derived,
+ * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this
+ * case, *params*->dmac and *params*->smac are not set either.
*
* *ctx* is either **struct xdp_md** for XDP programs or
* **struct sk_buff** tc cls_act programs.
@@ -4137,9 +4322,6 @@ union bpf_attr {
* **-EOPNOTSUPP** if the operation is not supported, for example
* a call from outside of TC ingress.
*
- * **-ESOCKTNOSUPPORT** if the socket type is not supported
- * (reuseport).
- *
* long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
* Description
* Helper is overloaded depending on BPF program type. This
@@ -4404,6 +4586,8 @@ union bpf_attr {
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
+ * Note: the user stack will only be populated if the *task* is
+ * the current task; all other tasks will return -EOPNOTSUPP.
* To achieve this, the helper needs *task*, which is a valid
* pointer to **struct task_struct**. To store the stacktrace, the
* bpf program provides *buf* with a nonnegative *size*.
@@ -4415,6 +4599,7 @@ union bpf_attr {
*
* **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack.
+ * The *task* must be the current task.
* **BPF_F_USER_BUILD_ID**
* Collect buildid+offset instead of ips for user stack,
* only valid if **BPF_F_USER_STACK** is also specified.
@@ -4718,9 +4903,9 @@ union bpf_attr {
* going through the CPU's backlog queue.
*
* The *flags* argument is reserved and must be 0. The helper is
- * currently only supported for tc BPF program types at the ingress
- * hook and for veth device types. The peer device must reside in a
- * different network namespace.
+ * currently only supported for tc BPF program types at the
+ * ingress hook and for veth and netkit target device types. The
+ * peer device must reside in a different network namespace.
* Return
* The helper returns **TC_ACT_REDIRECT** on success or
* **TC_ACT_SHOT** on error.
@@ -5003,6 +5188,8 @@ union bpf_attr {
* **BPF_F_TIMER_ABS**
* Start the timer in absolute expire value instead of the
* default relative one.
+ * **BPF_F_TIMER_CPU_PIN**
+ * Timer will be pinned to the CPU of the caller.
*
* Return
* 0 on success.
@@ -5022,9 +5209,14 @@ union bpf_attr {
* u64 bpf_get_func_ip(void *ctx)
* Description
* Get address of the traced function (for tracing and kprobe programs).
+ *
+ * When called for kprobe program attached as uprobe it returns
+ * probe address for both entry and return uprobe.
+ *
* Return
- * Address of the traced function.
+ * Address of the traced function for kprobe.
* 0 for kprobes placed within the function (not at the entry).
+ * Address of the probe for uprobe and return uprobe.
*
* u64 bpf_get_attach_cookie(void *ctx)
* Description
@@ -6165,6 +6357,19 @@ struct bpf_sock_tuple {
};
};
+/* (Simplified) user return codes for tcx prog type.
+ * A valid tcx program must return one of these defined values. All other
+ * return codes are reserved for future use. Must remain compatible with
+ * their TC_ACT_* counter-parts. For compatibility in behavior, unknown
+ * return codes are mapped to TCX_NEXT.
+ */
+enum tcx_action_base {
+ TCX_NEXT = -1,
+ TCX_PASS = 0,
+ TCX_DROP = 2,
+ TCX_REDIRECT = 7,
+};
+
struct bpf_xdp_sock {
__u32 queue_id;
};
@@ -6346,7 +6551,7 @@ struct bpf_map_info {
__u32 btf_id;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
- __u32 :32; /* alignment pad */
+ __u32 btf_vmlinux_id;
__u64 map_extra;
} __attribute__((aligned(8)));
@@ -6411,6 +6616,69 @@ struct bpf_link_info {
struct {
__u32 map_id;
} struct_ops;
+ struct {
+ __u32 pf;
+ __u32 hooknum;
+ __s32 priority;
+ __u32 flags;
+ } netfilter;
+ struct {
+ __aligned_u64 addrs;
+ __u32 count; /* in/out: kprobe_multi function count */
+ __u32 flags;
+ __u64 missed;
+ __aligned_u64 cookies;
+ } kprobe_multi;
+ struct {
+ __aligned_u64 path;
+ __aligned_u64 offsets;
+ __aligned_u64 ref_ctr_offsets;
+ __aligned_u64 cookies;
+ __u32 path_size; /* in/out: real path size on success, including zero byte */
+ __u32 count; /* in/out: uprobe_multi offsets/ref_ctr_offsets/cookies count */
+ __u32 flags;
+ __u32 pid;
+ } uprobe_multi;
+ struct {
+ __u32 type; /* enum bpf_perf_event_type */
+ __u32 :32;
+ union {
+ struct {
+ __aligned_u64 file_name; /* in/out */
+ __u32 name_len;
+ __u32 offset; /* offset from file_name */
+ __u64 cookie;
+ } uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
+ struct {
+ __aligned_u64 func_name; /* in/out */
+ __u32 name_len;
+ __u32 offset; /* offset from func_name */
+ __u64 addr;
+ __u64 missed;
+ __u64 cookie;
+ } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
+ struct {
+ __aligned_u64 tp_name; /* in/out */
+ __u32 name_len;
+ __u32 :32;
+ __u64 cookie;
+ } tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
+ struct {
+ __u64 config;
+ __u32 type;
+ __u32 :32;
+ __u64 cookie;
+ } event; /* BPF_PERF_EVENT_EVENT */
+ };
+ } perf_event;
+ struct {
+ __u32 ifindex;
+ __u32 attach_type;
+ } tcx;
+ struct {
+ __u32 ifindex;
+ __u32 attach_type;
+ } netkit;
};
} __attribute__((aligned(8)));
@@ -6707,6 +6975,7 @@ enum {
BPF_TCP_LISTEN,
BPF_TCP_CLOSING, /* Now a valid state */
BPF_TCP_NEW_SYN_RECV,
+ BPF_TCP_BOUND_INACTIVE,
BPF_TCP_MAX_STATES /* Leave at the end! */
};
@@ -6808,6 +7077,8 @@ enum {
BPF_FIB_LOOKUP_DIRECT = (1U << 0),
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
+ BPF_FIB_LOOKUP_TBID = (1U << 3),
+ BPF_FIB_LOOKUP_SRC = (1U << 4),
};
enum {
@@ -6820,6 +7091,7 @@ enum {
BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
+ BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */
};
struct bpf_fib_lookup {
@@ -6854,6 +7126,9 @@ struct bpf_fib_lookup {
__u32 rt_metric;
};
+ /* input: source address to consider for lookup
+ * output: source address result from lookup
+ */
union {
__be32 ipv4_src;
__u32 ipv6_src[4]; /* in6_addr; network order */
@@ -6868,9 +7143,19 @@ struct bpf_fib_lookup {
__u32 ipv6_dst[4]; /* in6_addr; network order */
};
- /* output */
- __be16 h_vlan_proto;
- __be16 h_vlan_TCI;
+ union {
+ struct {
+ /* output */
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ };
+ /* input: when accompanied with the
+ * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a
+ * specific routing table to use for the fib lookup.
+ */
+ __u32 tbid;
+ };
+
__u8 smac[6]; /* ETH_ALEN */
__u8 dmac[6]; /* ETH_ALEN */
};
@@ -6956,38 +7241,31 @@ struct bpf_spin_lock {
};
struct bpf_timer {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_dynptr {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_head {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_list_node {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[3];
} __attribute__((aligned(8)));
struct bpf_rb_root {
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[2];
} __attribute__((aligned(8)));
struct bpf_rb_node {
- __u64 :64;
- __u64 :64;
- __u64 :64;
+ __u64 __opaque[4];
} __attribute__((aligned(8)));
struct bpf_refcount {
- __u32 :32;
+ __u32 __opaque[1];
} __attribute__((aligned(4)));
struct bpf_sysctl {
@@ -7143,9 +7421,11 @@ struct bpf_core_relo {
* Flags to control bpf_timer_start() behaviour.
* - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is
* relative to current time.
+ * - BPF_F_TIMER_CPU_PIN: Timer will be pinned to the CPU of the caller.
*/
enum {
BPF_F_TIMER_ABS = (1ULL << 0),
+ BPF_F_TIMER_CPU_PIN = (1ULL << 1),
};
/* BPF numbers iterator state */
--
2.43.0

View File

@ -0,0 +1,76 @@
From 32a47d9002269b391c0c7ff76aeb2c015deb4b59 Mon Sep 17 00:00:00 2001
From: Jerome Marchand <jmarchan@redhat.com>
Date: Fri, 17 May 2024 15:36:07 +0200
Subject: [PATCH] clang: fail when the kheaders ownership is wrong (#4928)
(#4985)
file_exists_and_ownedby() returns -1 when the file exists but its
ownership is unexpected, which is very misleading since anything non
zero is interpreted as true and a function with such a name is
expected to return a boolean. So currently all this does, is write a
warning message, and continues as if nothing is wrong.
Make file_exists_and_ownedby() returns false when the ownership is
wrong and have get_proc_kheaders() fails when this happen. Also have
all the *exists* functions return bool to avoid such issues in the
future.
Signed-off-by: Jerome Marchand <jmarchan@redhat.com>
---
src/cc/frontends/clang/kbuild_helper.cc | 22 +++++++++++++++++-----
1 file changed, 17 insertions(+), 5 deletions(-)
diff --git a/src/cc/frontends/clang/kbuild_helper.cc b/src/cc/frontends/clang/kbuild_helper.cc
index 9409e4cc..5d3ad9c2 100644
--- a/src/cc/frontends/clang/kbuild_helper.cc
+++ b/src/cc/frontends/clang/kbuild_helper.cc
@@ -140,20 +140,26 @@ int KBuildHelper::get_flags(const char *uname_machine, vector<string> *cflags) {
return 0;
}
-static inline int file_exists_and_ownedby(const char *f, uid_t uid)
+static inline bool file_exists(const char *f)
+{
+ struct stat buffer;
+ return (stat(f, &buffer) == 0);
+}
+
+static inline bool file_exists_and_ownedby(const char *f, uid_t uid)
{
struct stat buffer;
int ret = stat(f, &buffer) == 0;
if (ret) {
if (buffer.st_uid != uid) {
std::cout << "ERROR: header file ownership unexpected: " << std::string(f) << "\n";
- return -1;
+ return false;
}
}
return ret;
}
-static inline int proc_kheaders_exists(void)
+static inline bool proc_kheaders_exists(void)
{
return file_exists_and_ownedby(PROC_KHEADERS_PATH, 0);
}
@@ -231,8 +237,14 @@ int get_proc_kheaders(std::string &dirpath)
uname_data.release);
dirpath = std::string(dirpath_tmp);
- if (file_exists_and_ownedby(dirpath_tmp, 0))
- return 0;
+ if (file_exists(dirpath_tmp)) {
+ if (file_exists_and_ownedby(dirpath_tmp, 0))
+ return 0;
+ else
+ // The path exists, but is owned by a non-root user
+ // Something fishy is going on
+ return -EEXIST;
+ }
// First time so extract it
return extract_kheaders(dirpath, uname_data);
--
2.44.0

View File

@ -24,15 +24,13 @@
Name: bcc
Version: 0.29.1
Release: 2%{?dist}
Version: 0.30.0
Release: 1%{?dist}
Summary: BPF Compiler Collection (BCC)
License: Apache-2.0
URL: https://github.com/iovisor/bcc
Source0: %{url}/archive/v%{version}/%{name}-%{version}.tar.gz
Patch0: libbpf-tools-Fix-bindsnoop-for-kernel-v6.6.patch
Patch1: Fix-ttysnoop.py-with-newer-kernels.patch
Patch2: Sync-with-latest-libbpf-repo-4889.patch
Patch0: %%{name}-%%{version}-clang-fail-when-the-kheaders-ownership-is-wrong-4928.patch
# Arches will be included as upstream support is added and dependencies are
# satisfied in the respective arches
@ -117,6 +115,7 @@ Summary: Command line tools for BPF Compiler Collection (BCC)
Requires: bcc = %{version}-%{release}
Requires: python3-%{name} = %{version}-%{release}
Requires: python3-netaddr
Requires: python3-pyelftools
%description tools
Command line tools for BPF Compiler Collection (BCC)
@ -228,6 +227,22 @@ cp -a libbpf-tools/tmp-install/bin/* %{buildroot}/%{_sbindir}/
%dir %{_datadir}/%{name}
%{_datadir}/%{name}/tools/
%{_datadir}/%{name}/introspection/
%if 0%{?rhel} > 0
# inject relies on BPF_KPROBE_OVERRIDE which is not set on RHEL
%exclude %{_datadir}/%{name}/tools/inject
%exclude %{_datadir}/%{name}/tools/doc/inject_example.txt
%exclude %{_mandir}/man8/bcc-inject.8.gz
# btrfs f2fs and zfs are not available on RHEL
%exclude %{_datadir}/%{name}/tools/btrfs*
%exclude %{_datadir}/%{name}/tools/doc/btrfs*
%exclude %{_mandir}/man8/bcc-btrfs*
%exclude %{_datadir}/%{name}/tools/zfs*
%exclude %{_datadir}/%{name}/tools/doc/zfs*
%exclude %{_mandir}/man8/bcc-zfs*
%exclude %{_datadir}/%{name}/tools/f2fs*
%exclude %{_datadir}/%{name}/tools/doc/f2fs*
%exclude %{_mandir}/man8/bcc-f2fs*
%endif
%{_mandir}/man8/*
%if %{with lua}
@ -237,10 +252,22 @@ cp -a libbpf-tools/tmp-install/bin/* %{buildroot}/%{_sbindir}/
%if %{with libbpf_tools}
%files -n libbpf-tools
%ifarch s390x
%exclude %{_sbindir}/bpf-numamove
%endif
# RHEL doesn't provide btrfs or f2fs
%exclude %{_sbindir}/bpf-btrfs*
%exclude %{_sbindir}/bpf-f2fs*
%{_sbindir}/bpf-*
%endif
%changelog
* Thu Jun 13 2024 Jerome Marchand <jmarchan@redhat.com> - 0.30.1-1
- Rebase to the latest version (RHEL-32379)
- Really prevent the loading of compromised headers (CVE-2024-2314)
- Add python3-pyelftools dependency
- Exclude unsupported tools
* Tue Feb 13 2024 Jerome Marchand <jmarchan@redhat.com> - 0.29.1-2
- Don't use -no-pie flags.

View File

@ -1,114 +0,0 @@
From abf7b251c1461dcbe0c1e75d1d0da71662c9fae1 Mon Sep 17 00:00:00 2001
From: Hengqi Chen <hengqi.chen@gmail.com>
Date: Sun, 17 Dec 2023 11:27:10 +0000
Subject: [PATCH] libbpf-tools: Fix bindsnoop for kernel v6.6+
The freebind field in struct inet_sock gone in recent kernel
versions due to some kernel refactor works ([0]). The change
breaks the bindsnoop tool. Fix it in a CO-RE way.
This should close #4838.
[0]: https://lore.kernel.org/all/20230816081547.1272409-1-edumazet@google.com/
Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
---
libbpf-tools/bindsnoop.bpf.c | 8 +++--
libbpf-tools/core_fixes.bpf.h | 56 +++++++++++++++++++++++++++++++++++
2 files changed, 61 insertions(+), 3 deletions(-)
diff --git a/libbpf-tools/bindsnoop.bpf.c b/libbpf-tools/bindsnoop.bpf.c
index 41dce942..ead19c67 100644
--- a/libbpf-tools/bindsnoop.bpf.c
+++ b/libbpf-tools/bindsnoop.bpf.c
@@ -5,7 +5,9 @@
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_endian.h>
+
#include "bindsnoop.h"
+#include "core_fixes.bpf.h"
#define MAX_ENTRIES 10240
#define MAX_PORTS 1024
@@ -85,9 +87,9 @@ static int probe_exit(struct pt_regs *ctx, short ver)
if (filter_by_port && !port)
goto cleanup;
- opts.fields.freebind = BPF_CORE_READ_BITFIELD_PROBED(inet_sock, freebind);
- opts.fields.transparent = BPF_CORE_READ_BITFIELD_PROBED(inet_sock, transparent);
- opts.fields.bind_address_no_port = BPF_CORE_READ_BITFIELD_PROBED(inet_sock, bind_address_no_port);
+ opts.fields.freebind = get_inet_sock_freebind(inet_sock);
+ opts.fields.transparent = get_inet_sock_transparent(inet_sock);
+ opts.fields.bind_address_no_port = get_inet_sock_bind_address_no_port(inet_sock);
opts.fields.reuseaddress = BPF_CORE_READ_BITFIELD_PROBED(sock, __sk_common.skc_reuse);
opts.fields.reuseport = BPF_CORE_READ_BITFIELD_PROBED(sock, __sk_common.skc_reuseport);
event.opts = opts.data;
diff --git a/libbpf-tools/core_fixes.bpf.h b/libbpf-tools/core_fixes.bpf.h
index 84cb7f18..a4c84c02 100644
--- a/libbpf-tools/core_fixes.bpf.h
+++ b/libbpf-tools/core_fixes.bpf.h
@@ -249,4 +249,60 @@ static __always_inline __u64 get_sock_ident(struct sock *sk)
return (__u64)sk;
}
+/**
+ * During kernel 6.6 development cycle, several bitfields in struct inet_sock gone,
+ * they are placed in inet_sock::inet_flags instead ([0]).
+ *
+ * References:
+ * [0]: https://lore.kernel.org/all/20230816081547.1272409-1-edumazet@google.com/
+ */
+struct inet_sock___o {
+ __u8 freebind: 1;
+ __u8 transparent: 1;
+ __u8 bind_address_no_port: 1;
+};
+
+enum {
+ INET_FLAGS_FREEBIND___x = 11,
+ INET_FLAGS_TRANSPARENT___x = 15,
+ INET_FLAGS_BIND_ADDRESS_NO_PORT___x = 18,
+};
+
+struct inet_sock___x {
+ unsigned long inet_flags;
+};
+
+static __always_inline __u8 get_inet_sock_freebind(void *inet_sock)
+{
+ unsigned long inet_flags;
+
+ if (bpf_core_field_exists(struct inet_sock___o, freebind))
+ return BPF_CORE_READ_BITFIELD_PROBED((struct inet_sock___o *)inet_sock, freebind);
+
+ inet_flags = BPF_CORE_READ((struct inet_sock___x *)inet_sock, inet_flags);
+ return (1 << INET_FLAGS_FREEBIND___x) & inet_flags ? 1 : 0;
+}
+
+static __always_inline __u8 get_inet_sock_transparent(void *inet_sock)
+{
+ unsigned long inet_flags;
+
+ if (bpf_core_field_exists(struct inet_sock___o, transparent))
+ return BPF_CORE_READ_BITFIELD_PROBED((struct inet_sock___o *)inet_sock, transparent);
+
+ inet_flags = BPF_CORE_READ((struct inet_sock___x *)inet_sock, inet_flags);
+ return (1 << INET_FLAGS_TRANSPARENT___x) & inet_flags ? 1 : 0;
+}
+
+static __always_inline __u8 get_inet_sock_bind_address_no_port(void *inet_sock)
+{
+ unsigned long inet_flags;
+
+ if (bpf_core_field_exists(struct inet_sock___o, bind_address_no_port))
+ return BPF_CORE_READ_BITFIELD_PROBED((struct inet_sock___o *)inet_sock, bind_address_no_port);
+
+ inet_flags = BPF_CORE_READ((struct inet_sock___x *)inet_sock, inet_flags);
+ return (1 << INET_FLAGS_BIND_ADDRESS_NO_PORT___x) & inet_flags ? 1 : 0;
+}
+
#endif /* __CORE_FIXES_BPF_H */
--
2.43.0

View File

@ -1 +1 @@
SHA512 (bcc-0.29.1.tar.gz) = 9e60130ea602e19e6c6f88a8c17023cea5daf4c5bcc7af8816e9f5c662341136eb449a3fdf870ffad215495ac3bf895115c0d968d92ce79ebe2899b3e2464d24
SHA512 (bcc-0.30.0.tar.gz) = 70478ca8c18e7f106c462513ca9af46f49b4ebcca6380a9393208fca88f83895a7396f918bf5d01dce1bc4a876bccb9b95aa56d426e55d384cf11c9baaa6a89b