bcc/bcc-0.26.0-tools-readahead-Fix-Failed-to-attach-BPF-program-ent.patch

160 lines
5.9 KiB
Diff
Raw Normal View History

From 02fce045ce02fe81d8649ce63ce81d5cdf3e3a72 Mon Sep 17 00:00:00 2001
From: Rong Tao <rongtao@cestc.cn>
Date: Mon, 30 Jan 2023 17:39:35 +0800
Subject: [PATCH] tools/readahead: Fix: Failed to attach BPF program
entry__do_page_cache_readahead
since commit 56a4d67c264e("mm/readahead: Switch to page_cache_ra_order") switch
do_page_cache_ra() to page_cache_ra_order() (v5.17), and commit bb3c579e25e5
("mm/filemap: Add filemap_alloc_folio") swap __page_cache_alloc() to
filemap_alloc_folio() (since v5.15)
Reprocude the error(fedora37, 6.1.7-200.fc37.aarch64):
$ sudo ./readahead.py
cannot attach kprobe, probe entry may not exist
Traceback (most recent call last):
File "/home/rongtao/Git/bcc/tools/./readahead.py", line 159, in <module>
b.attach_kprobe(event=ra_event, fn_name="entry__do_page_cache_readahead")
File "/usr/lib/python3.11/site-packages/bcc/__init__.py", line 840, in attach_kprobe
raise Exception("Failed to attach BPF program %s to kprobe %s" %
Exception: Failed to attach BPF program b'entry__do_page_cache_readahead' to kprobe b'do_page_cache_ra'
Signed-off-by: Rong Tao <rongtao@cestc.cn>
---
tools/readahead.py | 69 +++++++++++++++++++++++++++++++++++++---------
1 file changed, 56 insertions(+), 13 deletions(-)
diff --git a/tools/readahead.py b/tools/readahead.py
index f2afdcb3..adad2ea8 100755
--- a/tools/readahead.py
+++ b/tools/readahead.py
@@ -12,6 +12,7 @@
#
# 20-Aug-2020 Suchakra Sharma Ported from bpftrace to BCC
# 17-Sep-2021 Hengqi Chen Migrated to kfunc
+# 30-Jan-2023 Rong Tao Support more kfunc/kprobe, introduce folio
from __future__ import print_function
from bcc import BPF
@@ -38,6 +39,7 @@ args = parser.parse_args()
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/mm_types.h>
+#include <linux/mm.h>
BPF_HASH(flag, u32, u8); // used to track if we are in do_page_cache_readahead()
BPF_HASH(birth, struct page*, u64); // used to track timestamps of cache alloc'ed page
@@ -65,7 +67,7 @@ int exit__do_page_cache_readahead(struct pt_regs *ctx) {
int exit__page_cache_alloc(struct pt_regs *ctx) {
u32 pid;
u64 ts;
- struct page *retval = (struct page*) PT_REGS_RC(ctx);
+ struct page *retval = (struct page*) GET_RETVAL_PAGE;
u32 zero = 0; // static key for accessing pages[0]
pid = bpf_get_current_pid_tgid();
u8 *f = flag.lookup(&pid);
@@ -111,6 +113,23 @@ KRETFUNC_PROBE(RA_FUNC)
return 0;
}
+KFUNC_PROBE(mark_page_accessed, struct page *arg0)
+{
+ u64 ts, delta;
+ u32 zero = 0; // static key for accessing pages[0]
+ u64 *bts = birth.lookup(&arg0);
+
+ if (bts != NULL) {
+ delta = bpf_ktime_get_ns() - *bts;
+ dist.atomic_increment(bpf_log2l(delta/1000000));
+ pages.atomic_increment(zero, -1);
+ birth.delete(&arg0); // remove the entry from hashmap
+ }
+ return 0;
+}
+"""
+
+bpf_text_kfunc_cache_alloc_ret_page = """
KRETFUNC_PROBE(__page_cache_alloc, gfp_t gfp, struct page *retval)
{
u64 ts;
@@ -125,18 +144,22 @@ KRETFUNC_PROBE(__page_cache_alloc, gfp_t gfp, struct page *retval)
}
return 0;
}
+"""
-KFUNC_PROBE(mark_page_accessed, struct page *arg0)
+bpf_text_kfunc_cache_alloc_ret_folio = """
+KRETFUNC_PROBE(filemap_alloc_folio, gfp_t gfp, unsigned int order,
+ struct folio *retval)
{
- u64 ts, delta;
+ u64 ts;
u32 zero = 0; // static key for accessing pages[0]
- u64 *bts = birth.lookup(&arg0);
+ u32 pid = bpf_get_current_pid_tgid();
+ u8 *f = flag.lookup(&pid);
+ struct page *page = folio_page(retval, 0);
- if (bts != NULL) {
- delta = bpf_ktime_get_ns() - *bts;
- dist.atomic_increment(bpf_log2l(delta/1000000));
- pages.atomic_increment(zero, -1);
- birth.delete(&arg0); // remove the entry from hashmap
+ if (f != NULL && *f == 1) {
+ ts = bpf_ktime_get_ns();
+ birth.update(&page, &ts);
+ pages.atomic_increment(zero);
}
return 0;
}
@@ -145,20 +168,40 @@ KFUNC_PROBE(mark_page_accessed, struct page *arg0)
if BPF.support_kfunc():
if BPF.get_kprobe_functions(b"__do_page_cache_readahead"):
ra_func = "__do_page_cache_readahead"
- else:
+ elif BPF.get_kprobe_functions(b"do_page_cache_ra"):
ra_func = "do_page_cache_ra"
+ elif BPF.get_kprobe_functions(b"page_cache_ra_order"):
+ ra_func = "page_cache_ra_order"
+ else:
+ print("Not found any kfunc.")
+ exit()
bpf_text += bpf_text_kfunc.replace("RA_FUNC", ra_func)
+ if BPF.get_kprobe_functions(b"__page_cache_alloc"):
+ bpf_text += bpf_text_kfunc_cache_alloc_ret_page
+ else:
+ bpf_text += bpf_text_kfunc_cache_alloc_ret_folio
b = BPF(text=bpf_text)
else:
bpf_text += bpf_text_kprobe
- b = BPF(text=bpf_text)
if BPF.get_kprobe_functions(b"__do_page_cache_readahead"):
ra_event = "__do_page_cache_readahead"
- else:
+ elif BPF.get_kprobe_functions(b"do_page_cache_ra"):
ra_event = "do_page_cache_ra"
+ elif BPF.get_kprobe_functions(b"page_cache_ra_order"):
+ ra_event = "page_cache_ra_order"
+ else:
+ print("Not found any kprobe.")
+ exit()
+ if BPF.get_kprobe_functions(b"__page_cache_alloc"):
+ cache_func = "__page_cache_alloc"
+ bpf_text = bpf_text.replace('GET_RETVAL_PAGE', 'PT_REGS_RC(ctx)')
+ else:
+ cache_func = "filemap_alloc_folio"
+ bpf_text = bpf_text.replace('GET_RETVAL_PAGE', 'folio_page((struct folio *)PT_REGS_RC(ctx), 0)')
+ b = BPF(text=bpf_text)
b.attach_kprobe(event=ra_event, fn_name="entry__do_page_cache_readahead")
b.attach_kretprobe(event=ra_event, fn_name="exit__do_page_cache_readahead")
- b.attach_kretprobe(event="__page_cache_alloc", fn_name="exit__page_cache_alloc")
+ b.attach_kretprobe(event=cache_func, fn_name="exit__page_cache_alloc")
b.attach_kprobe(event="mark_page_accessed", fn_name="entry_mark_page_accessed")
# header
--
2.39.1