Fix bpf-klockstat on aarch64 and ppc64le debug
Also fix the spec file. There shouldn't be macro in comment. Resolves: RHEL-78619 Signed-off-by: Jerome Marchand <jmarchan@redhat.com>
This commit is contained in:
parent
5884d2f569
commit
ba9d39adbf
@ -0,0 +1,274 @@
|
||||
From bf795413ca6c3ee09fb3fff118bec2e5a43d0acb Mon Sep 17 00:00:00 2001
|
||||
From: Jerome Marchand <jmarchan@redhat.com>
|
||||
Date: Sun, 13 Jul 2025 06:12:33 +0200
|
||||
Subject: [PATCH] libbpf-tools/klockstat: Allows kprobe fallback to work with
|
||||
lock debugging (#5359)
|
||||
|
||||
The klockstat tool fallback on kprobes when fentries are not
|
||||
available, but the fallback doesn't work on debug kernel
|
||||
(CONFIG_DEBUG_LOCK_ALLOC enabled).
|
||||
|
||||
Attach kprobes to the debug locking functions (*_nested) when they
|
||||
exists as is done with the fentry code.
|
||||
|
||||
Signed-off-by: Jerome Marchand <jmarchan@redhat.com>
|
||||
---
|
||||
libbpf-tools/klockstat.bpf.c | 195 +++++++++++++++++++++++++++++++++++
|
||||
libbpf-tools/klockstat.c | 35 +++++++
|
||||
2 files changed, 230 insertions(+)
|
||||
|
||||
diff --git a/libbpf-tools/klockstat.bpf.c b/libbpf-tools/klockstat.bpf.c
|
||||
index c53cdcdb..b2a94354 100644
|
||||
--- a/libbpf-tools/klockstat.bpf.c
|
||||
+++ b/libbpf-tools/klockstat.bpf.c
|
||||
@@ -832,6 +832,201 @@ int BPF_KPROBE(kprobe_up_write, struct rw_semaphore *lock)
|
||||
return 0;
|
||||
}
|
||||
|
||||
+/* CONFIG_DEBUG_LOCK_ALLOC is enabled */
|
||||
+
|
||||
+SEC("kprobe/mutex_lock_nested")
|
||||
+int BPF_KPROBE(kprobe_mutex_lock_nested, struct mutex *lock)
|
||||
+{
|
||||
+ u32 tid = (u32)bpf_get_current_pid_tgid();
|
||||
+
|
||||
+ bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
|
||||
+ lock_contended(ctx, lock);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+SEC("kretprobe/mutex_lock_nested")
|
||||
+int BPF_KRETPROBE(kprobe_mutex_lock_exit_nested, long ret)
|
||||
+{
|
||||
+ u32 tid = (u32)bpf_get_current_pid_tgid();
|
||||
+ void **lock;
|
||||
+
|
||||
+ lock = bpf_map_lookup_elem(&locks, &tid);
|
||||
+ if (!lock)
|
||||
+ return 0;
|
||||
+
|
||||
+ bpf_map_delete_elem(&locks, &tid);
|
||||
+ lock_acquired(*lock);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+SEC("kprobe/mutex_lock_interruptible_nested")
|
||||
+int BPF_KPROBE(kprobe_mutex_lock_interruptible_nested, struct mutex *lock)
|
||||
+{
|
||||
+ u32 tid = (u32)bpf_get_current_pid_tgid();
|
||||
+
|
||||
+ bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
|
||||
+ lock_contended(ctx, lock);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+SEC("kretprobe/mutex_lock_interruptible_nested")
|
||||
+int BPF_KRETPROBE(kprobe_mutex_lock_interruptible_exit_nested, long ret)
|
||||
+{
|
||||
+ u32 tid = (u32)bpf_get_current_pid_tgid();
|
||||
+ void **lock;
|
||||
+
|
||||
+ lock = bpf_map_lookup_elem(&locks, &tid);
|
||||
+ if (!lock)
|
||||
+ return 0;
|
||||
+
|
||||
+ bpf_map_delete_elem(&locks, &tid);
|
||||
+
|
||||
+ if (ret)
|
||||
+ lock_aborted(*lock);
|
||||
+ else
|
||||
+ lock_acquired(*lock);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+SEC("kprobe/mutex_lock_killable_nested")
|
||||
+int BPF_KPROBE(kprobe_mutex_lock_killable_nested, struct mutex *lock)
|
||||
+{
|
||||
+ u32 tid = (u32)bpf_get_current_pid_tgid();
|
||||
+
|
||||
+ bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
|
||||
+ lock_contended(ctx, lock);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+SEC("kretprobe/mutex_lock_killable_nested")
|
||||
+int BPF_KRETPROBE(kprobe_mutex_lock_killable_exit_nested, long ret)
|
||||
+{
|
||||
+ u32 tid = (u32)bpf_get_current_pid_tgid();
|
||||
+ void **lock;
|
||||
+
|
||||
+ lock = bpf_map_lookup_elem(&locks, &tid);
|
||||
+ if (!lock)
|
||||
+ return 0;
|
||||
+
|
||||
+ bpf_map_delete_elem(&locks, &tid);
|
||||
+
|
||||
+ if (ret)
|
||||
+ lock_aborted(*lock);
|
||||
+ else
|
||||
+ lock_acquired(*lock);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+SEC("kprobe/down_read_nested")
|
||||
+int BPF_KPROBE(kprobe_down_read_nested, struct rw_semaphore *lock)
|
||||
+{
|
||||
+ u32 tid = (u32)bpf_get_current_pid_tgid();
|
||||
+
|
||||
+ bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
|
||||
+ lock_contended(ctx, lock);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+SEC("kretprobe/down_read_nested")
|
||||
+int BPF_KRETPROBE(kprobe_down_read_exit_nested, long ret)
|
||||
+{
|
||||
+ u32 tid = (u32)bpf_get_current_pid_tgid();
|
||||
+ void **lock;
|
||||
+
|
||||
+ lock = bpf_map_lookup_elem(&locks, &tid);
|
||||
+ if (!lock)
|
||||
+ return 0;
|
||||
+
|
||||
+ bpf_map_delete_elem(&locks, &tid);
|
||||
+
|
||||
+ lock_acquired(*lock);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+SEC("kprobe/down_read_killable_nested")
|
||||
+int BPF_KPROBE(kprobe_down_read_killable_nested, struct rw_semaphore *lock)
|
||||
+{
|
||||
+ u32 tid = (u32)bpf_get_current_pid_tgid();
|
||||
+
|
||||
+ bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
|
||||
+ lock_contended(ctx, lock);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+SEC("kretprobe/down_read_killable_nested")
|
||||
+int BPF_KRETPROBE(kprobe_down_read_killable_exit_nested, long ret)
|
||||
+{
|
||||
+ u32 tid = (u32)bpf_get_current_pid_tgid();
|
||||
+ void **lock;
|
||||
+
|
||||
+ lock = bpf_map_lookup_elem(&locks, &tid);
|
||||
+ if (!lock)
|
||||
+ return 0;
|
||||
+
|
||||
+ bpf_map_delete_elem(&locks, &tid);
|
||||
+
|
||||
+ if (ret)
|
||||
+ lock_aborted(*lock);
|
||||
+ else
|
||||
+ lock_acquired(*lock);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+SEC("kprobe/down_write_nested")
|
||||
+int BPF_KPROBE(kprobe_down_write_nested, struct rw_semaphore *lock)
|
||||
+{
|
||||
+ u32 tid = (u32)bpf_get_current_pid_tgid();
|
||||
+
|
||||
+ bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
|
||||
+ lock_contended(ctx, lock);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+SEC("kretprobe/down_write_nested")
|
||||
+int BPF_KRETPROBE(kprobe_down_write_exit_nested, long ret)
|
||||
+{
|
||||
+ u32 tid = (u32)bpf_get_current_pid_tgid();
|
||||
+ void **lock;
|
||||
+
|
||||
+ lock = bpf_map_lookup_elem(&locks, &tid);
|
||||
+ if (!lock)
|
||||
+ return 0;
|
||||
+
|
||||
+ bpf_map_delete_elem(&locks, &tid);
|
||||
+
|
||||
+ lock_acquired(*lock);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+SEC("kprobe/down_write_killable_nested")
|
||||
+int BPF_KPROBE(kprobe_down_write_killable_nested, struct rw_semaphore *lock)
|
||||
+{
|
||||
+ u32 tid = (u32)bpf_get_current_pid_tgid();
|
||||
+
|
||||
+ bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
|
||||
+ lock_contended(ctx, lock);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
+SEC("kretprobe/down_write_killable_nested")
|
||||
+int BPF_KRETPROBE(kprobe_down_write_killable_exit_nested, long ret)
|
||||
+{
|
||||
+ u32 tid = (u32)bpf_get_current_pid_tgid();
|
||||
+ void **lock;
|
||||
+
|
||||
+ lock = bpf_map_lookup_elem(&locks, &tid);
|
||||
+ if (!lock)
|
||||
+ return 0;
|
||||
+
|
||||
+ bpf_map_delete_elem(&locks, &tid);
|
||||
+
|
||||
+ if (ret)
|
||||
+ lock_aborted(*lock);
|
||||
+ else
|
||||
+ lock_acquired(*lock);
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
SEC("kprobe/rtnetlink_rcv_msg")
|
||||
int BPF_KPROBE(kprobe_rtnetlink_rcv_msg, struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
struct netlink_ext_ack *ext)
|
||||
diff --git a/libbpf-tools/klockstat.c b/libbpf-tools/klockstat.c
|
||||
index ab5ed908..c3c9be89 100644
|
||||
--- a/libbpf-tools/klockstat.c
|
||||
+++ b/libbpf-tools/klockstat.c
|
||||
@@ -879,6 +879,41 @@ static void enable_kprobes(struct klockstat_bpf *obj)
|
||||
bpf_program__set_autoload(obj->progs.netlink_dump_exit, false);
|
||||
bpf_program__set_autoload(obj->progs.sock_do_ioctl, false);
|
||||
bpf_program__set_autoload(obj->progs.sock_do_ioctl_exit, false);
|
||||
+
|
||||
+ /* CONFIG_DEBUG_LOCK_ALLOC is on */
|
||||
+ if (kprobe_exists("mutex_lock_nested")) {
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_exit, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_exit, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_exit, false);
|
||||
+
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_read, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_exit, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_killable, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_exit, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_write, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_exit, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_killable, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit, false);
|
||||
+ } else {
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_exit_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_exit_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_exit_nested, false);
|
||||
+
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_exit_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_exit_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_exit_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit_nested, false);
|
||||
+ }
|
||||
}
|
||||
|
||||
static void disable_nldump_ioctl_probes(struct klockstat_bpf *obj)
|
||||
--
|
||||
2.50.0
|
||||
|
||||
@ -0,0 +1,49 @@
|
||||
From f372342fd4c1cd0c121500301833177fd6fb45ac Mon Sep 17 00:00:00 2001
|
||||
From: Jerome Marchand <jmarchan@redhat.com>
|
||||
Date: Fri, 18 Jul 2025 14:15:46 +0200
|
||||
Subject: [PATCH] libbpf-tools/klockstat: Disable *_nested kprobes in the
|
||||
fentry code
|
||||
|
||||
Commit 789e923f ("libbpf-tools/klockstat: Allows kprobe fallback to
|
||||
work with lock debugging (#5359)") add new kprobes to be enabled on
|
||||
debug kernel and disabled on normal kernel. But it forgot to disable
|
||||
them in the fentry code, along with other kprobes.
|
||||
|
||||
Disable *_nested kprobes in the fentry code.
|
||||
|
||||
Fixes: 789e923f ("libbpf-tools/klockstat: Allows kprobe fallback to work with lock debugging (#5359)")
|
||||
Signed-off-by: Jerome Marchand <jmarchan@redhat.com>
|
||||
---
|
||||
libbpf-tools/klockstat.c | 16 ++++++++++++++++
|
||||
1 file changed, 16 insertions(+)
|
||||
|
||||
diff --git a/libbpf-tools/klockstat.c b/libbpf-tools/klockstat.c
|
||||
index c3c9be89..59be05ac 100644
|
||||
--- a/libbpf-tools/klockstat.c
|
||||
+++ b/libbpf-tools/klockstat.c
|
||||
@@ -804,6 +804,22 @@ static void enable_fentry(struct klockstat_bpf *obj)
|
||||
bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit, false);
|
||||
bpf_program__set_autoload(obj->progs.kprobe_up_write, false);
|
||||
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_exit_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_interruptible_exit_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_mutex_lock_killable_exit_nested, false);
|
||||
+
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_exit_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_read_killable_exit_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_exit_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_nested, false);
|
||||
+ bpf_program__set_autoload(obj->progs.kprobe_down_write_killable_exit_nested, false);
|
||||
+
|
||||
bpf_program__set_autoload(obj->progs.kprobe_rtnetlink_rcv_msg, false);
|
||||
bpf_program__set_autoload(obj->progs.kprobe_rtnetlink_rcv_msg_exit, false);
|
||||
bpf_program__set_autoload(obj->progs.kprobe_netlink_dump, false);
|
||||
--
|
||||
2.50.1
|
||||
|
||||
11
bcc.spec
11
bcc.spec
@ -25,14 +25,15 @@
|
||||
|
||||
Name: bcc
|
||||
Version: 0.34.0
|
||||
Release: 1%{?dist}
|
||||
Release: 2%{?dist}
|
||||
Summary: BPF Compiler Collection (BCC)
|
||||
License: ASL 2.0
|
||||
URL: https://github.com/iovisor/bcc
|
||||
Source0: %{url}/archive/v%{version}/%{name}-%{version}.tar.gz
|
||||
Patch0: %%{name}-%%{version}-RHEL-Centos-tools-fix-alignment-in-tp_args-for-bio-t.patch
|
||||
Patch1: %%{name}-%%{version}-tools-biosnoop-Fix-biosnoop-pattern-option-5304.patch
|
||||
|
||||
Patch2: %%{name}-%%{version}-libbpf-tools-klockstat-Allows-kprobe-fallback-to-wor.patch
|
||||
Patch3: %%{name}-%%{version}-libbpf-tools-klockstat-Disable-_nested-kprobes-in-th.patch
|
||||
|
||||
# Arches will be included as upstream support is added and dependencies are
|
||||
# satisfied in the respective arches
|
||||
@ -192,7 +193,7 @@ rm -rf %{buildroot}%{_datadir}/%{name}/tools/old/
|
||||
|
||||
# We cannot run the test suit since it requires root and it makes changes to
|
||||
# the machine (e.g, IP address)
|
||||
#%check
|
||||
# %%check
|
||||
|
||||
%if %{with libbpf_tools}
|
||||
mkdir -p %{buildroot}/%{_sbindir}
|
||||
@ -271,6 +272,10 @@ cp -a libbpf-tools/tmp-install/bin/* %{buildroot}/%{_sbindir}/
|
||||
%endif
|
||||
|
||||
%changelog
|
||||
* Mon Jul 21 2025 Jerome Marchand <jmarchan@redhat.com> - 0.34.0-2
|
||||
- Fix bpf-klockstat on aarch64 and ppc64le debug (RHEL-78619)
|
||||
- Remove macro in comment.
|
||||
|
||||
* Wed Jun 04 2025 Jerome Marchand <jmarchan@redhat.com> - 0.34.0-1
|
||||
- Rebase to version 0.34.0 (RHEL-78920)
|
||||
- Rebuild with LLVM 20 (RHEL-81773)
|
||||
|
||||
Loading…
Reference in New Issue
Block a user