import CS bcc-0.34.0-2.el9

This commit is contained in:
eabdullin 2025-09-15 11:46:30 +00:00
parent 854fb1250e
commit 52c5f07baa
7 changed files with 419 additions and 6 deletions

View File

@ -1 +1 @@
c2d19784f22483fc34242c29a73d1f16d98356f9 SOURCES/bcc-0.32.0.tar.gz
a5fffd86f5faa8cac7433fdc9f34bdc198c1db9d SOURCES/bcc-0.34.0.tar.gz

2
.gitignore vendored
View File

@ -1 +1 @@
SOURCES/bcc-0.32.0.tar.gz
SOURCES/bcc-0.34.0.tar.gz

View File

@ -0,0 +1,274 @@
From bf795413ca6c3ee09fb3fff118bec2e5a43d0acb Mon Sep 17 00:00:00 2001
From: Jerome Marchand <jmarchan@redhat.com>
Date: Sun, 13 Jul 2025 06:12:33 +0200
Subject: [PATCH] libbpf-tools/klockstat: Allows kprobe fallback to work with
lock debugging (#5359)
The klockstat tool fallback on kprobes when fentries are not
available, but the fallback doesn't work on debug kernel
(CONFIG_DEBUG_LOCK_ALLOC enabled).
Attach kprobes to the debug locking functions (*_nested) when they
exists as is done with the fentry code.
Signed-off-by: Jerome Marchand <jmarchan@redhat.com>
---
libbpf-tools/klockstat.bpf.c | 195 +++++++++++++++++++++++++++++++++++
libbpf-tools/klockstat.c | 35 +++++++
2 files changed, 230 insertions(+)
diff --git a/libbpf-tools/klockstat.bpf.c b/libbpf-tools/klockstat.bpf.c
index c53cdcdb..b2a94354 100644
--- a/libbpf-tools/klockstat.bpf.c
+++ b/libbpf-tools/klockstat.bpf.c
@@ -832,6 +832,201 @@ int BPF_KPROBE(kprobe_up_write, struct rw_semaphore *lock)
return 0;
}
+/* CONFIG_DEBUG_LOCK_ALLOC is enabled */
+
+SEC("kprobe/mutex_lock_nested")
+int BPF_KPROBE(kprobe_mutex_lock_nested, struct mutex *lock)
+{
+ u32 tid = (u32)bpf_get_current_pid_tgid();
+
+ bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
+ lock_contended(ctx, lock);
+ return 0;
+}
+
+SEC("kretprobe/mutex_lock_nested")
+int BPF_KRETPROBE(kprobe_mutex_lock_exit_nested, long ret)
+{
+ u32 tid = (u32)bpf_get_current_pid_tgid();
+ void **lock;
+
+ lock = bpf_map_lookup_elem(&locks, &tid);
+ if (!lock)
+ return 0;
+
+ bpf_map_delete_elem(&locks, &tid);
+ lock_acquired(*lock);
+ return 0;
+}
+
+SEC("kprobe/mutex_lock_interruptible_nested")
+int BPF_KPROBE(kprobe_mutex_lock_interruptible_nested, struct mutex *lock)
+{
+ u32 tid = (u32)bpf_get_current_pid_tgid();
+
+ bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
+ lock_contended(ctx, lock);
+ return 0;
+}
+
+SEC("kretprobe/mutex_lock_interruptible_nested")
+int BPF_KRETPROBE(kprobe_mutex_lock_interruptible_exit_nested, long ret)
+{
+ u32 tid = (u32)bpf_get_current_pid_tgid();
+ void **lock;
+
+ lock = bpf_map_lookup_elem(&locks, &tid);
+ if (!lock)
+ return 0;
+
+ bpf_map_delete_elem(&locks, &tid);
+
+ if (ret)
+ lock_aborted(*lock);
+ else
+ lock_acquired(*lock);
+ return 0;
+}
+
+SEC("kprobe/mutex_lock_killable_nested")
+int BPF_KPROBE(kprobe_mutex_lock_killable_nested, struct mutex *lock)
+{
+ u32 tid = (u32)bpf_get_current_pid_tgid();
+
+ bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
+ lock_contended(ctx, lock);
+ return 0;
+}
+
+SEC("kretprobe/mutex_lock_killable_nested")
+int BPF_KRETPROBE(kprobe_mutex_lock_killable_exit_nested, long ret)
+{
+ u32 tid = (u32)bpf_get_current_pid_tgid();
+ void **lock;
+
+ lock = bpf_map_lookup_elem(&locks, &tid);
+ if (!lock)
+ return 0;
+
+ bpf_map_delete_elem(&locks, &tid);
+
+ if (ret)
+ lock_aborted(*lock);
+ else
+ lock_acquired(*lock);
+ return 0;
+}
+
+SEC("kprobe/down_read_nested")
+int BPF_KPROBE(kprobe_down_read_nested, struct rw_semaphore *lock)
+{
+ u32 tid = (u32)bpf_get_current_pid_tgid();
+
+ bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
+ lock_contended(ctx, lock);
+ return 0;
+}
+
+SEC("kretprobe/down_read_nested")
+int BPF_KRETPROBE(kprobe_down_read_exit_nested, long ret)
+{
+ u32 tid = (u32)bpf_get_current_pid_tgid();
+ void **lock;
+
+ lock = bpf_map_lookup_elem(&locks, &tid);
+ if (!lock)
+ return 0;
+
+ bpf_map_delete_elem(&locks, &tid);
+
+ lock_acquired(*lock);
+ return 0;
+}
+
+SEC("kprobe/down_read_killable_nested")
+int BPF_KPROBE(kprobe_down_read_killable_nested, struct rw_semaphore *lock)
+{
+ u32 tid = (u32)bpf_get_current_pid_tgid();
+
+ bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
+ lock_contended(ctx, lock);
+ return 0;
+}
+
+SEC("kretprobe/down_read_killable_nested")
+int BPF_KRETPROBE(kprobe_down_read_killable_exit_nested, long ret)
+{
+ u32 tid = (u32)bpf_get_current_pid_tgid();
+ void **lock;
+
+ lock = bpf_map_lookup_elem(&locks, &tid);
+ if (!lock)
+ return 0;
+
+ bpf_map_delete_elem(&locks, &tid);
+
+ if (ret)
+ lock_aborted(*lock);
+ else
+ lock_acquired(*lock);
+ return 0;
+}
+
+SEC("kprobe/down_write_nested")
+int BPF_KPROBE(kprobe_down_write_nested, struct rw_semaphore *lock)
+{
+ u32 tid = (u32)bpf_get_current_pid_tgid();
+
+ bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
+ lock_contended(ctx, lock);
+ return 0;
+}
+
+SEC("kretprobe/down_write_nested")
+int BPF_KRETPROBE(kprobe_down_write_exit_nested, long ret)
+{
+ u32 tid = (u32)bpf_get_current_pid_tgid();
+ void **lock;
+
+ lock = bpf_map_lookup_elem(&locks, &tid);
+ if (!lock)
+ return 0;
+
+ bpf_map_delete_elem(&locks, &tid);
+
+ lock_acquired(*lock);
+ return 0;
+}
+
+SEC("kprobe/down_write_killable_nested")
+int BPF_KPROBE(kprobe_down_write_killable_nested, struct rw_semaphore *lock)
+{
+ u32 tid = (u32)bpf_get_current_pid_tgid();
+
+ bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
+ lock_contended(ctx, lock);
+ return 0;
+}
+
+SEC("kretprobe/down_write_killable_nested")
+int BPF_KRETPROBE(kprobe_down_write_killable_exit_nested, long ret)
+{
+ u32 tid = (u32)bpf_get_current_pid_tgid();
+ void **lock;
+
+ lock = bpf_map_lookup_elem(&locks, &tid);
+ if (!lock)
+ return 0;
+
+ bpf_map_delete_elem(&locks, &tid);
+
+ if (ret)
+ lock_aborted(*lock);
+ else
+ lock_acquired(*lock);
+ return 0;
+}
+
SEC("kprobe/rtnetlink_rcv_msg")
int BPF_KPROBE(kprobe_rtnetlink_rcv_msg, struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *ext)
diff --git a/libbpf-tools/klockstat.c b/libbpf-tools/klockstat.c
index ab5ed908..c3c9be89 100644
--- a/libbpf-tools/klockstat.c
+++ b/libbpf-tools/klockstat.c
@@ -879,6 +879,41 @@ static void enable_kprobes(struct klockstat_bpf *obj)
bpf_program__set_autoload(obj->progs.netlink_dump_exit, false);
bpf_program__set_autoload(obj->progs.sock_do_ioctl, false);
bpf_program__set_autoload(obj->progs.sock_do_ioctl_exit, false);
+
+ /* CONFIG_DEBUG_LOCK_ALLOC is on */
+ if (kprobe_exists("mutex_lock_nested")) {
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_exit, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_exit, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_exit, false);
+
+ bpf_program__set_autoload(obj->progs.kprobe_down_read, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_exit, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_killable, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_exit, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_write, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_exit, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_killable, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit, false);
+ } else {
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_exit_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_exit_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_exit_nested, false);
+
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_exit_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_exit_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_exit_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit_nested, false);
+ }
}
static void disable_nldump_ioctl_probes(struct klockstat_bpf *obj)
--
2.50.0

View File

@ -0,0 +1,49 @@
From f372342fd4c1cd0c121500301833177fd6fb45ac Mon Sep 17 00:00:00 2001
From: Jerome Marchand <jmarchan@redhat.com>
Date: Fri, 18 Jul 2025 14:15:46 +0200
Subject: [PATCH] libbpf-tools/klockstat: Disable *_nested kprobes in the
fentry code
Commit 789e923f ("libbpf-tools/klockstat: Allows kprobe fallback to
work with lock debugging (#5359)") add new kprobes to be enabled on
debug kernel and disabled on normal kernel. But it forgot to disable
them in the fentry code, along with other kprobes.
Disable *_nested kprobes in the fentry code.
Fixes: 789e923f ("libbpf-tools/klockstat: Allows kprobe fallback to work with lock debugging (#5359)")
Signed-off-by: Jerome Marchand <jmarchan@redhat.com>
---
libbpf-tools/klockstat.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/libbpf-tools/klockstat.c b/libbpf-tools/klockstat.c
index c3c9be89..59be05ac 100644
--- a/libbpf-tools/klockstat.c
+++ b/libbpf-tools/klockstat.c
@@ -804,6 +804,22 @@ static void enable_fentry(struct klockstat_bpf *obj)
bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit, false);
bpf_program__set_autoload(obj->progs.kprobe_up_write, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_exit_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_exit_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_exit_nested, false);
+
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_exit_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_exit_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_exit_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_nested, false);
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit_nested, false);
+
bpf_program__set_autoload(obj->progs.kprobe_rtnetlink_rcv_msg, false);
bpf_program__set_autoload(obj->progs.kprobe_rtnetlink_rcv_msg_exit, false);
bpf_program__set_autoload(obj->progs.kprobe_netlink_dump, false);
--
2.50.1

View File

@ -0,0 +1,70 @@
From 06d96cee7d9f4d95025ac0988ac3b273ead0061e Mon Sep 17 00:00:00 2001
From: Jerome Marchand <jmarchan@redhat.com>
Date: Sun, 18 May 2025 01:12:30 +0200
Subject: [PATCH] tools/biosnoop: Fix biosnoop pattern option (#5304)
The code to support the block_io tracepoints failed to update some
code that is used with the -P option. That code changed the second
argument of __trace_req_completion from 'req' to 'key', but in the
"#ifdef INCLUDE_PATTERN" block, the use of 'req' remained unchanged.
Of course, 'key' should be use here too.
Also remove the commented out line of code related to rq_disk.
It fixes the following error:
$ biosnoop -P
/virtual/main.c:213:24: error: use of undeclared identifier 'req'
213 | data.pattern = req->__sector == *sector ? SEQUENTIAL : RANDOM;
| ^
/virtual/main.c:216:19: error: use of undeclared identifier 'req'
216 | last_sector = req->__sector + data.len / 512;
| ^
2 errors generated.
Traceback (most recent call last):
File "/usr/share/bcc/tools/biosnoop", line 335, in <module>
b = BPF(text=bpf_text)
File "/usr/lib/python3.13/site-packages/bcc/__init__.py", line 505, in __init__
raise Exception("Failed to compile BPF module %s" % (src_file or "<text>"))
Exception: Failed to compile BPF module <text>
Signed-off-by: Jerome Marchand <jmarchan@redhat.com>
---
tools/biosnoop.py | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/tools/biosnoop.py b/tools/biosnoop.py
index 431cd4a2..045e1f9d 100755
--- a/tools/biosnoop.py
+++ b/tools/biosnoop.py
@@ -218,7 +218,6 @@ static int __trace_req_completion(void *ctx, struct hash_key key)
struct start_req_t *startp;
struct val_t *valp;
struct data_t data = {};
- //struct gendisk *rq_disk;
u64 ts;
// fetch timestamp and calculate delta
@@ -228,7 +227,6 @@ static int __trace_req_completion(void *ctx, struct hash_key key)
return 0;
}
ts = bpf_ktime_get_ns();
- //rq_disk = req->__RQ_DISK__;
data.delta = ts - startp->ts;
data.ts = ts / 1000;
data.qdelta = 0;
@@ -260,10 +258,10 @@ static int __trace_req_completion(void *ctx, struct hash_key key)
sector = last_sectors.lookup(&sector_key);
if (sector != 0) {
- data.pattern = req->__sector == *sector ? SEQUENTIAL : RANDOM;
+ data.pattern = key.sector == *sector ? SEQUENTIAL : RANDOM;
}
- last_sector = req->__sector + data.len / 512;
+ last_sector = key.sector + data.len / 512;
last_sectors.update(&sector_key, &last_sector);
#endif
--
2.49.0

View File

@ -24,14 +24,16 @@
Name: bcc
Version: 0.32.0
Version: 0.34.0
Release: 2%{?dist}
Summary: BPF Compiler Collection (BCC)
License: ASL 2.0
URL: https://github.com/iovisor/bcc
Source0: %{url}/archive/v%{version}/%{name}-%{version}.tar.gz
Patch0: %%{name}-%%{version}-RHEL-Centos-tools-fix-alignment-in-tp_args-for-bio-t.patch
Patch1: %%{name}-%%{version}-tools-biosnoop-Fix-biosnoop-pattern-option-5304.patch
Patch2: %%{name}-%%{version}-libbpf-tools-klockstat-Allows-kprobe-fallback-to-wor.patch
Patch3: %%{name}-%%{version}-libbpf-tools-klockstat-Disable-_nested-kprobes-in-th.patch
# Arches will be included as upstream support is added and dependencies are
# satisfied in the respective arches
@ -191,7 +193,7 @@ rm -rf %{buildroot}%{_datadir}/%{name}/tools/old/
# We cannot run the test suit since it requires root and it makes changes to
# the machine (e.g, IP address)
#%check
# %%check
%if %{with libbpf_tools}
mkdir -p %{buildroot}/%{_sbindir}
@ -244,6 +246,10 @@ cp -a libbpf-tools/tmp-install/bin/* %{buildroot}/%{_sbindir}/
%exclude %{_datadir}/%{name}/tools/criticalstat
%exclude %{_datadir}/%{name}/tools/doc/criticalstat_example.txt
%exclude %{_mandir}/man8/bcc-criticalstat.8.gz
# slabratetop isn't supported on RHEL 9 kernel anymore
%exclude %{_datadir}/%{name}/tools/slabratetop
%exclude %{_datadir}/%{name}/tools/doc/slabratetop_example.txt
%exclude %{_mandir}/man8/bcc-slabratetop.8.gz
%endif
%{_mandir}/man8/*
@ -257,13 +263,27 @@ cp -a libbpf-tools/tmp-install/bin/* %{buildroot}/%{_sbindir}/
%ifarch s390x
%exclude %{_sbindir}/bpf-numamove
%endif
# RHEL doesn't provide btrfs or f2fs
# RHEL doesn't provide btrfs, f2fs bcachefs or zfs
%exclude %{_sbindir}/bpf-btrfs*
%exclude %{_sbindir}/bpf-f2fs*
%exclude %{_sbindir}/bpf-bcachefs*
%exclude %{_sbindir}/bpf-zfs*
%{_sbindir}/bpf-*
%endif
%changelog
* Mon Jul 21 2025 Jerome Marchand <jmarchan@redhat.com> - 0.34.0-2
- Fix bpf-klockstat on aarch64 and ppc64le debug (RHEL-78619)
- Remove macro in comment.
* Wed Jun 04 2025 Jerome Marchand <jmarchan@redhat.com> - 0.34.0-1
- Rebase to version 0.34.0 (RHEL-78920)
- Rebuild with LLVM 20 (RHEL-81773)
- Fix biosnoop pattern option (RHEL-90848)
- Fix bpf-klockstat on aarch64 and ppc64le (RHEL-78619)
- Remove unsupported bpf-bcachefs* and bpf-zfs* tools (RHEL-78166)
- Remove unsupported slaratetop tool (RHEL-78162)
* Wed Jan 29 2025 Jerome Marchand <jmarchan@redhat.com> - 0.32.0-1
- Rebuild with libbpf 1.5.0