import crash-trace-command-3.0-1.el8

This commit is contained in:
CentOS Sources 2023-03-28 10:03:45 +00:00 committed by root
parent 762ba5c1a7
commit 1cbfa0ed44
21 changed files with 106 additions and 1814 deletions

View File

@ -1 +1 @@
b92c7a1f6b69e5a2e3142b68c76f46e0ebcf204e SOURCES/crash-trace-command-2.0.tar.gz
b9636e856f96e77b2047e12c3d87991ec88b067c SOURCES/crash-trace-command-3.0.tar.gz

2
.gitignore vendored
View File

@ -1 +1 @@
SOURCES/crash-trace-command-2.0.tar.gz
SOURCES/crash-trace-command-3.0.tar.gz

View File

@ -1,121 +0,0 @@
From 2ed07609b2a8ed19ce3dda7a50b18373a6a8bd5c Mon Sep 17 00:00:00 2001
From: Dave Anderson <anderson@redhat.com>
Date: Mon, 25 Mar 2019 11:48:39 -0400
Subject: [PATCH 1/3] Fixes for the "trace.so" extension module: (1) The
reader_page can be empty if it was never read, do not record it if it is
empty. Better yet, do not record any page that is empty. The struct
buffer_page "real_end" is not available in older kernels, so it needs to
be tested if it exists before we can use it. (2) In newer kernels, the
sp->type of kernel module symbols does not contain the symbol type
character unless the module's debuginfo data has been loaded into the
crash session. Writing a garbage type to the kallsyms file for trace-cmd
to read causes it to crash, so just always write an 'm'. (3) Add the
"trace dump -t <trace.dat>" option to the SYNOPSIS line of the help page.
(rostedt@goodmis.org)
---
trace.c | 32 +++++++++++++++++++++++++++++---
1 file changed, 29 insertions(+), 3 deletions(-)
diff --git a/trace.c b/trace.c
index ad71951e8740..c26b6c7ec475 100644
--- a/trace.c
+++ b/trace.c
@@ -43,6 +43,11 @@ static int max_buffer_available;
*/
static int multiple_instances_available;
+/*
+ * buffer_page has "real_end"
+ */
+static int buffer_page_real_end_available;
+
#define koffset(struct, member) struct##_##member##_offset
static int koffset(trace_array, current_trace);
@@ -70,6 +75,7 @@ static int koffset(ring_buffer_per_cpu, entries);
static int koffset(buffer_page, read);
static int koffset(buffer_page, list);
static int koffset(buffer_page, page);
+static int koffset(buffer_page, real_end);
static int koffset(list_head, next);
@@ -229,6 +235,7 @@ static int init_offsets(void)
init_offset(buffer_page, read);
init_offset(buffer_page, list);
init_offset(buffer_page, page);
+ init_offset(buffer_page, real_end);
init_offset(list_head, next);
@@ -281,6 +288,7 @@ static void print_offsets(void)
print_offset(buffer_page, read);
print_offset(buffer_page, list);
print_offset(buffer_page, page);
+ print_offset(buffer_page, real_end);
print_offset(list_head, next);
@@ -295,6 +303,20 @@ static void print_offsets(void)
#undef print_offset
}
+static int buffer_page_has_data(ulong page)
+{
+ uint end;
+
+ if (!buffer_page_real_end_available)
+ return 1;
+
+ /* Only write pages with data in it */
+ read_value(end, page, buffer_page, real_end);
+ return end;
+out_fail:
+ return 0;
+}
+
static int ftrace_init_pages(struct ring_buffer_per_cpu *cpu_buffer,
unsigned nr_pages)
{
@@ -361,7 +383,8 @@ static int ftrace_init_pages(struct ring_buffer_per_cpu *cpu_buffer,
/* Setup linear pages */
- cpu_buffer->linear_pages[count++] = cpu_buffer->reader_page;
+ if (buffer_page_has_data(cpu_buffer->reader_page))
+ cpu_buffer->linear_pages[count++] = cpu_buffer->reader_page;
if (cpu_buffer->reader_page == cpu_buffer->commit_page)
goto done;
@@ -647,6 +670,8 @@ static int ftrace_init(void)
ftrace_trace_arrays = sym_ftrace_trace_arrays->value;
}
+ if (MEMBER_EXISTS("buffer_page", "real_end"))
+ buffer_page_real_end_available = 1;
if (MEMBER_EXISTS("trace_array", "current_trace")) {
encapsulated_current_trace = 1;
@@ -1809,7 +1834,7 @@ static void cmd_ftrace(void)
static char *help_ftrace[] = {
"trace",
"show or dump the tracing info",
-"[ <show [-c <cpulist>] [-f [no]<flagname>]> | <dump [-sm] <dest-dir>> ]",
+"[ <show [-c <cpulist>] [-f [no]<flagname>]> | <dump [-sm] <dest-dir>> ] | <dump -t <trace.dat> ]",
"trace",
" shows the current tracer and other informations.",
"",
@@ -2184,7 +2209,8 @@ static int save_proc_kallsyms(int fd)
if (!strncmp(sp->name, "_MODULE_", strlen("_MODULE_")))
continue;
- tmp_fprintf("%lx %c %s\t[%s]\n", sp->value, sp->type,
+ /* Currently sp->type for modules is not trusted */
+ tmp_fprintf("%lx %c %s\t[%s]\n", sp->value, 'm',
sp->name, lm->mod_name);
}
}
--
2.17.1

View File

@ -0,0 +1,36 @@
From 153629a96b07a8ae96b0b28cce100fde9ea1398d Mon Sep 17 00:00:00 2001
From: HATAYAMA Daisuke <d.hatayama@fujitsu.com>
Date: Thu, 28 Jan 2021 00:08:35 -0500
Subject: [PATCH 1/2] Makefile: set DT_SONAME to trace.so
Fedora never requires SONAME versioning for plugins according to the
Packaging Guidelines:
https://docs.fedoraproject.org/en-US/packaging-guidelines/#_downstream_so_name_versioning
In cases where upstream ships unversioned .so library (so this is
not needed for plugins, drivers, etc.), the packager MUST try to
convince upstream to start versioning it.
On the other hand, fedpkg lint still prints warning if the shared
library doesn't have DT_SONAME. To surpress this, we set DT_SONAME
field to trace.so for in case.
Signed-off-by: HATAYAMA Daisuke <d.hatayama@fujitsu.com>
---
Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Makefile b/Makefile
index a9e0a21..b4573b4 100755
--- a/Makefile
+++ b/Makefile
@@ -28,4 +28,4 @@ INCDIR=/usr/include/crash
all: trace.so
trace.so: $(INCDIR)/defs.h trace.c
- gcc $(RPM_OPT_FLAGS) -Wall -I$(INCDIR) -nostartfiles -shared -rdynamic -o trace.so trace.c -fPIC -D$(TARGET) $(TARGET_CFLAGS)
+ gcc $(RPM_OPT_FLAGS) -Wall -I$(INCDIR) -nostartfiles -shared -rdynamic -Wl,-z,now -Wl,-soname,trace.so -o trace.so trace.c -fPIC -D$(TARGET) $(TARGET_CFLAGS)
--
2.29.2

View File

@ -0,0 +1,50 @@
From 17c35d075f067d80fee112ae18365b8defa2ca5a Mon Sep 17 00:00:00 2001
From: HATAYAMA Daisuke <d.hatayama@fujitsu.com>
Date: Fri, 19 Feb 2021 11:38:59 +0900
Subject: [PATCH 2/2] Makefile: fix build failure on aarch64 and ppc64le
Currently, there is build failure on aarch64 and ppc64le as follows:
gcc -O2 -flto=auto -ffat-lto-objects -fexceptions -g -grecord-gcc-switches -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -fstack-protector-strong -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -m64 -mcpu=power8 -mtune=power8 -fasynchronous-unwind-tables -fstack-clash-protection -Wall -I/usr/include/crash -nostartfiles -shared -rdynamic -o trace.so trace.c -fPIC -D
gcc: error: macro name missing after '-D'
The failure is caused by missing arguments for -D option because
TARGET variable is empty.
This commit fixes the issue by defining TARGET variable properly
according to aarch64 and ppc64le.
Signed-off-by: HATAYAMA Daisuke <d.hatayama@fujitsu.com>
---
Makefile | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/Makefile b/Makefile
index b4573b4..0d35d4f 100755
--- a/Makefile
+++ b/Makefile
@@ -6,6 +6,10 @@ ifeq ($(shell arch), ppc64)
TARGET=PPC64
TARGET_CFLAGS=-m64
endif
+ifeq ($(shell arch), ppc64le)
+ TARGET=PPC64
+ TARGET_CFLAGS=-m64
+endif
ifeq ($(shell arch), ia64)
TARGET=IA64
TARGET_CFLAGS=
@@ -22,6 +26,10 @@ ifeq ($(shell arch), s390)
TARGET=S390
TARGET_CFLAGS=
endif
+ifeq ($(shell arch), aarch64)
+ TARGET=ARM64
+ TARGET_CFLAGS=
+endif
INCDIR=/usr/include/crash
--
2.29.2

View File

@ -1,139 +0,0 @@
From c4cdbe3c18bca496dc6b4af17e2637379528ad02 Mon Sep 17 00:00:00 2001
From: Valentin Schneider <valentin.schneider@arm.com>
Date: Thu, 3 Sep 2020 21:28:46 +0100
Subject: [PATCH 2/3] extensions/trace: Rename trace_buffer to array_buffer
This renaming is present from Linux v5.6 upwards, and was introduced by
commit:
1c5eb4481e01 ("tracing: Rename trace_buffer to array_buffer")
Rename the internal references from trace_buffer to array_buffer. Backwards
compatibility with older kernels is provided by checking whether struct
trace_array has an array_buffer field - should that not be the case, the
old naming scheme is used instead.
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
---
trace.c | 53 +++++++++++++++++++++++++++++++++++------------------
1 file changed, 35 insertions(+), 18 deletions(-)
diff --git a/trace.c b/trace.c
index c26b6c7ec475..2cea5b2d650a 100644
--- a/trace.c
+++ b/trace.c
@@ -31,9 +31,9 @@ static int per_cpu_buffer_sizes;
*/
static int encapsulated_current_trace;
/*
- * trace_buffer is supported
+ * array_buffer (trace_buffer pre v5.6) is supported
*/
-static int trace_buffer_available;
+static int array_buffer_available;
/*
* max_buffer is supported
*/
@@ -51,9 +51,9 @@ static int buffer_page_real_end_available;
#define koffset(struct, member) struct##_##member##_offset
static int koffset(trace_array, current_trace);
-static int koffset(trace_array, trace_buffer);
+static int koffset(trace_array, array_buffer);
static int koffset(trace_array, max_buffer);
-static int koffset(trace_buffer, buffer);
+static int koffset(array_buffer, buffer);
static int koffset(trace_array, buffer);
static int koffset(tracer, name);
@@ -117,7 +117,7 @@ static ulong max_tr_trace;
struct trace_instance {
char name[NAME_MAX + 1];
- ulong trace_buffer;
+ ulong array_buffer;
ulong max_buffer;
ulong ring_buffer;
unsigned pages;
@@ -174,8 +174,7 @@ static int write_and_check(int fd, void *data, size_t size)
static int init_offsets(void)
{
-#define init_offset(struct, member) do { \
- koffset(struct, member) = MEMBER_OFFSET(#struct, #member);\
+#define check_offset(struct, member) do { \
if (koffset(struct, member) < 0) { \
fprintf(fp, "failed to init the offset, struct:"\
#struct ", member:" #member); \
@@ -184,12 +183,29 @@ static int init_offsets(void)
} \
} while (0)
+#define init_offset(struct, member) do { \
+ koffset(struct, member) = MEMBER_OFFSET(#struct, #member); \
+ check_offset(struct, member); \
+ } while (0)
+
+#define init_offset_alternative(struct, member, alt_struct, alt_member) do { \
+ koffset(struct, member) = MEMBER_OFFSET(#alt_struct, #alt_member); \
+ check_offset(struct, member); \
+ } while (0)
+
if (encapsulated_current_trace)
init_offset(trace_array, current_trace);
- if (trace_buffer_available) {
- init_offset(trace_array, trace_buffer);
- init_offset(trace_buffer, buffer);
+ if (array_buffer_available) {
+ if (MEMBER_EXISTS("trace_array", "array_buffer")) {
+ init_offset(trace_array, array_buffer);
+ init_offset(array_buffer, buffer);
+ } else {
+ init_offset_alternative(trace_array, array_buffer,
+ trace_array, trace_buffer);
+ init_offset_alternative(array_buffer, buffer,
+ trace_buffer, buffer);
+ }
if (max_buffer_available)
init_offset(trace_array, max_buffer);
@@ -486,17 +502,17 @@ out_fail:
static int ftrace_init_trace(struct trace_instance *ti, ulong instance_addr)
{
- if (trace_buffer_available) {
- ti->trace_buffer = instance_addr +
- koffset(trace_array, trace_buffer);
- read_value(ti->ring_buffer, ti->trace_buffer,
- trace_buffer, buffer);
+ if (array_buffer_available) {
+ ti->array_buffer = instance_addr +
+ koffset(trace_array, array_buffer);
+ read_value(ti->ring_buffer, ti->array_buffer,
+ array_buffer, buffer);
if (max_buffer_available) {
ti->max_buffer = instance_addr +
koffset(trace_array, max_buffer);
read_value(ti->max_tr_ring_buffer, ti->max_buffer,
- trace_buffer, buffer);
+ array_buffer, buffer);
}
} else {
read_value(ti->ring_buffer, instance_addr, trace_array, buffer);
@@ -683,8 +699,9 @@ static int ftrace_init(void)
current_trace = sym_current_trace->value;
}
- if (MEMBER_EXISTS("trace_array", "trace_buffer")) {
- trace_buffer_available = 1;
+ if (MEMBER_EXISTS("trace_array", "array_buffer") ||
+ MEMBER_EXISTS("trace_array", "trace_buffer")) {
+ array_buffer_available = 1;
if (MEMBER_EXISTS("trace_array", "max_buffer"))
max_buffer_available = 1;
--
2.17.1

View File

@ -1,139 +0,0 @@
From 1d4357ba0e0e1ec8267c32855aec5fe5abbcfec8 Mon Sep 17 00:00:00 2001
From: Valentin Schneider <valentin.schneider@arm.com>
Date: Thu, 3 Sep 2020 21:28:47 +0100
Subject: [PATCH 3/3] extensions/trace: Rename ring_buffer to trace_buffer
This renaming is present from Linux v5.6 upwards, and was introduced by
commit:
13292494379f ("tracing: Make struct ring_buffer less ambiguous")
Rename the internal references from ring_buffer to trace_buffer. Backwards
compatibility with older kernels is provided by checking whether struct
trace_buffer has a buffer field of type struct ring buffer - should that
not be the case, the newer naming scheme is used instead.
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
---
trace.c | 50 +++++++++++++++++++++++++++++---------------------
1 file changed, 29 insertions(+), 21 deletions(-)
diff --git a/trace.c b/trace.c
index 2cea5b2d650a..c33907f98b00 100644
--- a/trace.c
+++ b/trace.c
@@ -57,10 +57,10 @@ static int koffset(array_buffer, buffer);
static int koffset(trace_array, buffer);
static int koffset(tracer, name);
-static int koffset(ring_buffer, pages);
-static int koffset(ring_buffer, flags);
-static int koffset(ring_buffer, cpus);
-static int koffset(ring_buffer, buffers);
+static int koffset(trace_buffer, pages);
+static int koffset(trace_buffer, flags);
+static int koffset(trace_buffer, cpus);
+static int koffset(trace_buffer, buffers);
static int koffset(ring_buffer_per_cpu, cpu);
static int koffset(ring_buffer_per_cpu, pages);
@@ -119,7 +119,7 @@ struct trace_instance {
char name[NAME_MAX + 1];
ulong array_buffer;
ulong max_buffer;
- ulong ring_buffer;
+ ulong trace_buffer;
unsigned pages;
struct ring_buffer_per_cpu *buffers;
@@ -225,11 +225,19 @@ static int init_offsets(void)
else if (kernel_symbol_exists("ring_buffer_read"))
gdb_set_crash_scope(symbol_value("ring_buffer_read"), "ring_buffer_read");
- if (!per_cpu_buffer_sizes)
- init_offset(ring_buffer, pages);
- init_offset(ring_buffer, flags);
- init_offset(ring_buffer, cpus);
- init_offset(ring_buffer, buffers);
+ if (STREQ(MEMBER_TYPE_NAME("trace_buffer", "buffer"), "ring_buffer")) {
+ if (!per_cpu_buffer_sizes)
+ init_offset_alternative(trace_buffer, pages, ring_buffer, pages);
+ init_offset_alternative(trace_buffer, flags, ring_buffer, flags);
+ init_offset_alternative(trace_buffer, cpus, ring_buffer, cpus);
+ init_offset_alternative(trace_buffer, buffers, ring_buffer, buffers);
+ } else {
+ if (!per_cpu_buffer_sizes)
+ init_offset(trace_buffer, pages);
+ init_offset(trace_buffer, flags);
+ init_offset(trace_buffer, cpus);
+ init_offset(trace_buffer, buffers);
+ }
if (MEMBER_SIZE("ring_buffer_per_cpu", "pages") == sizeof(ulong)) {
lockless_ring_buffer = 1;
@@ -287,10 +295,10 @@ static void print_offsets(void)
print_offset(trace_array, buffer);
print_offset(tracer, name);
- print_offset(ring_buffer, pages);
- print_offset(ring_buffer, flags);
- print_offset(ring_buffer, cpus);
- print_offset(ring_buffer, buffers);
+ print_offset(trace_buffer, pages);
+ print_offset(trace_buffer, flags);
+ print_offset(trace_buffer, cpus);
+ print_offset(trace_buffer, buffers);
print_offset(ring_buffer_per_cpu, cpu);
print_offset(ring_buffer_per_cpu, pages);
@@ -445,12 +453,12 @@ static void ftrace_destroy_buffers(struct ring_buffer_per_cpu *buffers)
}
static int ftrace_init_buffers(struct ring_buffer_per_cpu *buffers,
- ulong ring_buffer, unsigned pages)
+ ulong trace_buffer, unsigned pages)
{
int i;
ulong buffers_array;
- read_value(buffers_array, ring_buffer, ring_buffer, buffers);
+ read_value(buffers_array, trace_buffer, trace_buffer, buffers);
for (i = 0; i < nr_cpu_ids; i++) {
if (!readmem(buffers_array + sizeof(ulong) * i, KVADDR,
@@ -505,7 +513,7 @@ static int ftrace_init_trace(struct trace_instance *ti, ulong instance_addr)
if (array_buffer_available) {
ti->array_buffer = instance_addr +
koffset(trace_array, array_buffer);
- read_value(ti->ring_buffer, ti->array_buffer,
+ read_value(ti->trace_buffer, ti->array_buffer,
array_buffer, buffer);
if (max_buffer_available) {
@@ -515,19 +523,19 @@ static int ftrace_init_trace(struct trace_instance *ti, ulong instance_addr)
array_buffer, buffer);
}
} else {
- read_value(ti->ring_buffer, instance_addr, trace_array, buffer);
- read_value(ti->pages, ti->ring_buffer, ring_buffer, pages);
+ read_value(ti->trace_buffer, instance_addr, trace_array, buffer);
+ read_value(ti->pages, ti->trace_buffer, trace_buffer, pages);
read_value(ti->max_tr_ring_buffer, max_tr_trace, trace_array, buffer);
if (ti->max_tr_ring_buffer)
- read_value(ti->max_tr_pages, ti->max_tr_ring_buffer, ring_buffer, pages);
+ read_value(ti->max_tr_pages, ti->max_tr_ring_buffer, trace_buffer, pages);
}
ti->buffers = calloc(sizeof(*ti->buffers), nr_cpu_ids);
if (ti->buffers == NULL)
goto out_fail;
- if (ftrace_init_buffers(ti->buffers, ti->ring_buffer,
+ if (ftrace_init_buffers(ti->buffers, ti->trace_buffer,
ti->pages) < 0)
goto out_fail;
--
2.17.1

View File

@ -1,13 +0,0 @@
--- crash-trace-command-2.0/Makefile.orig
+++ crash-trace-command-2.0/Makefile
@@ -22,6 +22,10 @@ ifeq ($(shell arch), s390)
TARGET=S390
TARGET_CFLAGS=
endif
+ifeq ($(shell arch), aarch64)
+ TARGET=ARM64
+ TARGET_CFLAGS=
+endif
INCDIR=/usr/include/crash

View File

@ -1,743 +0,0 @@
--- crash-trace-command-2.0/trace.c.orig
+++ crash-trace-command-2.0/trace.c
@@ -38,6 +38,10 @@ static int trace_buffer_available;
* max_buffer is supported
*/
static int max_buffer_available;
+/*
+ * multiple trace instances are supported
+ */
+static int multiple_instances_available;
#define koffset(struct, member) struct##_##member##_offset
@@ -78,6 +82,8 @@ static int koffset(ftrace_event_field, o
static int koffset(ftrace_event_field, size);
static int koffset(ftrace_event_field, is_signed);
+static int koffset(trace_array, name);
+
static int koffset(POINTER_SYM, POINTER) = 0;
struct ring_buffer_per_cpu {
@@ -101,16 +107,25 @@ struct ring_buffer_per_cpu {
};
static ulong global_trace;
-static ulong global_trace_buffer;
-static ulong global_max_buffer;
-static ulong global_ring_buffer;
-static unsigned global_pages;
-static struct ring_buffer_per_cpu *global_buffers;
-
static ulong max_tr_trace;
-static ulong max_tr_ring_buffer;
-static unsigned max_tr_pages;
-static struct ring_buffer_per_cpu *max_tr_buffers;
+
+struct trace_instance {
+ char name[NAME_MAX + 1];
+ ulong trace_buffer;
+ ulong max_buffer;
+ ulong ring_buffer;
+ unsigned pages;
+ struct ring_buffer_per_cpu *buffers;
+
+ ulong max_tr_ring_buffer;
+ unsigned max_tr_pages;
+ struct ring_buffer_per_cpu *max_tr_buffers;
+};
+
+static ulong ftrace_trace_arrays;
+static struct trace_instance global_trace_instance;
+static struct trace_instance *trace_instances = NULL;
+static int instance_count;
static ulong ftrace_events;
static ulong current_trace;
@@ -229,6 +244,9 @@ static int init_offsets(void)
init_offset(ftrace_event_field, size);
init_offset(ftrace_event_field, is_signed);
+ if (MEMBER_EXISTS("trace_array", "name"))
+ init_offset(trace_array, name);
+
return 0;
#undef init_offset
}
@@ -435,61 +453,140 @@ out_fail:
return -1;
}
-static int ftrace_int_global_trace(void)
+static int ftrace_init_trace(struct trace_instance *ti, ulong instance_addr)
{
if (trace_buffer_available) {
- global_trace_buffer = global_trace + koffset(trace_array, trace_buffer);
- read_value(global_ring_buffer, global_trace_buffer, trace_buffer, buffer);
+ ti->trace_buffer = instance_addr +
+ koffset(trace_array, trace_buffer);
+ read_value(ti->ring_buffer, ti->trace_buffer,
+ trace_buffer, buffer);
+
+ if (max_buffer_available) {
+ ti->max_buffer = instance_addr +
+ koffset(trace_array, max_buffer);
+ read_value(ti->max_tr_ring_buffer, ti->max_buffer,
+ trace_buffer, buffer);
+ }
} else {
- read_value(global_ring_buffer, global_trace, trace_array, buffer);
- read_value(global_pages, global_ring_buffer, ring_buffer, pages);
+ read_value(ti->ring_buffer, instance_addr, trace_array, buffer);
+ read_value(ti->pages, ti->ring_buffer, ring_buffer, pages);
+
+ read_value(ti->max_tr_ring_buffer, max_tr_trace, trace_array, buffer);
+ if (ti->max_tr_ring_buffer)
+ read_value(ti->max_tr_pages, ti->max_tr_ring_buffer, ring_buffer, pages);
}
- global_buffers = calloc(sizeof(*global_buffers), nr_cpu_ids);
- if (global_buffers == NULL)
+ ti->buffers = calloc(sizeof(*ti->buffers), nr_cpu_ids);
+ if (ti->buffers == NULL)
+ goto out_fail;
+
+ if (ftrace_init_buffers(ti->buffers, ti->ring_buffer,
+ ti->pages) < 0)
+ goto out_fail;
+
+ if (!ti->max_tr_ring_buffer)
+ return 0;
+
+ ti->max_tr_buffers = calloc(sizeof(*ti->max_tr_buffers), nr_cpu_ids);
+ if (ti->max_tr_buffers == NULL)
goto out_fail;
- if (ftrace_init_buffers(global_buffers, global_ring_buffer,
- global_pages) < 0)
+ if (ftrace_init_buffers(ti->max_tr_buffers, ti->max_tr_ring_buffer,
+ ti->max_tr_pages) < 0)
goto out_fail;
return 0;
out_fail:
- free(global_buffers);
+ free(ti->max_tr_buffers);
+ free(ti->buffers);
return -1;
}
-static int ftrace_int_max_tr_trace(void)
+static void ftrace_destroy_all_instance_buffers()
{
- if (trace_buffer_available) {
- if (!max_buffer_available)
- return 0;
+ int i;
- global_max_buffer = global_trace + koffset(trace_array, max_buffer);
- read_value(max_tr_ring_buffer, global_max_buffer, trace_buffer, buffer);
- } else {
- read_value(max_tr_ring_buffer, max_tr_trace, trace_array, buffer);
+ for (i = 0; i < instance_count; i++)
+ {
+ struct trace_instance *ti = &trace_instances[i];
- if (!max_tr_ring_buffer)
- return 0;
+ if (ti->max_tr_ring_buffer) {
+ ftrace_destroy_buffers(ti->max_tr_buffers);
+ free(ti->max_tr_buffers);
+ }
- read_value(max_tr_pages, max_tr_ring_buffer, ring_buffer, pages);
+ ftrace_destroy_buffers(ti->buffers);
+ free(ti->buffers);
}
+}
- max_tr_buffers = calloc(sizeof(*max_tr_buffers), nr_cpu_ids);
- if (max_tr_buffers == NULL)
- goto out_fail;
+static void ftrace_destroy_instances()
+{
+ ftrace_destroy_all_instance_buffers();
+ free(trace_instances);
+}
- if (ftrace_init_buffers(max_tr_buffers, max_tr_ring_buffer,
- max_tr_pages) < 0)
- goto out_fail;
+static int ftrace_init_instances()
+{
+ int i;
+ struct trace_instance *ti;
+ struct list_data list_data;
+ struct list_data *ld = &list_data;
+
+ if (!multiple_instances_available)
+ return 0;
+
+ BZERO(ld, sizeof(struct list_data));
+ ld->start = ftrace_trace_arrays;
+ ld->end = global_trace;
+ ld->flags = LIST_ALLOCATE;
+ instance_count = do_list(ld);
+
+ /* The do_list count includes the list_head, which is not a
+ * proper instance */
+ instance_count--;
+ if (instance_count <= 0)
+ return 0;
+
+ trace_instances = calloc(sizeof(struct trace_instance), instance_count);
+
+ /* We start i at 1 to skip over the list_head and continue to the last
+ * instance, which lies at index instance_count */
+ for (i = 1; i <= instance_count; i++)
+ {
+ ulong instance_ptr;
+ ulong name_addr;
+ int ret;
+
+ ti = &trace_instances[i-1];
+ instance_ptr = ld->list_ptr[i];
+ read_value(name_addr, instance_ptr, trace_array, name);
+ if (!name_addr)
+ {
+ console("Instance name is NULL\n");
+ }
+ else if (!read_string(name_addr, ti->name, sizeof(ti->name)))
+ {
+ console("Failed to read instance name at address %p\n", (void*)name_addr);
+ goto out_fail;
+ }
+
+ ret = ftrace_init_trace(ti, instance_ptr);
+ if (ret < 0)
+ goto out_fail;
+ }
+ FREEBUF(ld->list_ptr);
return 0;
out_fail:
- free(max_tr_buffers);
- max_tr_ring_buffer = 0;
+ /* We've already freed the current instance's trace buffer info, so
+ * we'll clear that out to avoid double freeing in
+ * ftrace_destroy_instances() */
+ BZERO(ti, sizeof(struct trace_instance));
+ ftrace_destroy_instances();
+
return -1;
}
@@ -504,7 +601,7 @@ static int ftrace_init_current_tracer(vo
} else {
read_value(addr, current_trace, POINTER_SYM, POINTER);
}
-
+
read_value(addr, addr, tracer, name);
read_string(addr, tmp, 128);
@@ -524,9 +621,11 @@ static int ftrace_init(void)
struct syment *sym_max_tr_trace;
struct syment *sym_ftrace_events;
struct syment *sym_current_trace;
+ struct syment *sym_ftrace_trace_arrays;
sym_global_trace = symbol_search("global_trace");
sym_ftrace_events = symbol_search("ftrace_events");
+ sym_ftrace_trace_arrays = symbol_search("ftrace_trace_arrays");
if (sym_global_trace == NULL || sym_ftrace_events == NULL)
return -1;
@@ -534,6 +633,13 @@ static int ftrace_init(void)
global_trace = sym_global_trace->value;
ftrace_events = sym_ftrace_events->value;
+ if (sym_ftrace_trace_arrays)
+ {
+ multiple_instances_available = 1;
+ ftrace_trace_arrays = sym_ftrace_trace_arrays->value;
+ }
+
+
if (MEMBER_EXISTS("trace_array", "current_trace")) {
encapsulated_current_trace = 1;
} else {
@@ -564,28 +670,31 @@ static int ftrace_init(void)
return -1;
print_offsets();
- if (ftrace_int_global_trace() < 0)
+ if (ftrace_init_trace(&global_trace_instance, global_trace) < 0)
goto out_0;
- ftrace_int_max_tr_trace();
+ if (ftrace_init_instances() < 0)
+ goto out_1;
if (ftrace_init_event_types() < 0)
- goto out_1;
+ goto out_2;
if (ftrace_init_current_tracer() < 0)
- goto out_2;
+ goto out_3;
return 0;
-out_2:
+out_3:
ftrace_destroy_event_types();
+out_2:
+ ftrace_destroy_instances();
out_1:
- if (max_tr_ring_buffer) {
- ftrace_destroy_buffers(max_tr_buffers);
- free(max_tr_buffers);
+ if (global_trace_instance.max_tr_ring_buffer) {
+ ftrace_destroy_buffers(global_trace_instance.max_tr_buffers);
+ free(global_trace_instance.max_tr_buffers);
}
- ftrace_destroy_buffers(global_buffers);
- free(global_buffers);
+ ftrace_destroy_buffers(global_trace_instance.buffers);
+ free(global_trace_instance.buffers);
out_0:
return -1;
}
@@ -595,13 +704,15 @@ static void ftrace_destroy(void)
free(current_tracer_name);
ftrace_destroy_event_types();
- if (max_tr_ring_buffer) {
- ftrace_destroy_buffers(max_tr_buffers);
- free(max_tr_buffers);
+ ftrace_destroy_instances();
+
+ if (global_trace_instance.max_tr_ring_buffer) {
+ ftrace_destroy_buffers(global_trace_instance.max_tr_buffers);
+ free(global_trace_instance.max_tr_buffers);
}
- ftrace_destroy_buffers(global_buffers);
- free(global_buffers);
+ ftrace_destroy_buffers(global_trace_instance.buffers);
+ free(global_trace_instance.buffers);
}
static int ftrace_dump_page(int fd, ulong page, void *page_tmp)
@@ -652,7 +763,8 @@ static int try_mkdir(const char *pathnam
return 0;
}
-static int ftrace_dump_buffers(const char *per_cpu_path)
+static int ftrace_dump_buffers(const char *per_cpu_path,
+ struct trace_instance *ti)
{
int i;
void *page_tmp;
@@ -664,7 +776,7 @@ static int ftrace_dump_buffers(const cha
return -1;
for (i = 0; i < nr_cpu_ids; i++) {
- struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i];
+ struct ring_buffer_per_cpu *cpu_buffer = &ti->buffers[i];
if (!cpu_buffer->kaddr)
continue;
@@ -679,7 +791,7 @@ static int ftrace_dump_buffers(const cha
if (fd < 0)
goto out_fail;
- ftrace_dump_buffer(fd, cpu_buffer, global_pages, page_tmp);
+ ftrace_dump_buffer(fd, cpu_buffer, ti->pages, page_tmp);
close(fd);
}
@@ -1015,8 +1127,6 @@ static void ftrace_destroy_event_types(v
free(ftrace_common_fields);
}
-#define TRACE_EVENT_FL_TRACEPOINT 0x40
-
static
int ftrace_get_event_type_name(ulong call, char *name, int len)
{
@@ -1024,34 +1134,35 @@ int ftrace_get_event_type_name(ulong cal
static int name_offset;
static int flags_offset;
static int tp_name_offset;
- uint flags;
+ static long tracepoint_flag;
+ uint flags;
ulong name_addr;
if (inited)
goto work;
- inited = 1;
- name_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "name"),
- MEMBER_OFFSET("trace_event_call", "name"));
- if (name_offset >= 0)
- goto work;
-
- name_offset = MAX(ANON_MEMBER_OFFSET("ftrace_event_call", "name"),
- ANON_MEMBER_OFFSET("trace_event_call", "name"));
- if (name_offset < 0)
- return -1;
+ name_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "tp"),
+ MEMBER_OFFSET("trace_event_call", "tp"));
+ if (name_offset >= 0) {
+ flags_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "flags"),
+ MEMBER_OFFSET("trace_event_call", "flags"));
+ if (flags_offset < 0)
+ return -1;
- flags_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "flags"),
- MEMBER_OFFSET("trace_event_call", "flags"));
- if (flags_offset < 0)
- return -1;
+ tp_name_offset = MEMBER_OFFSET("tracepoint", "name");
+ if (tp_name_offset < 0)
+ return -1;
- tp_name_offset = MEMBER_OFFSET("tracepoint", "name");
- if (tp_name_offset < 0)
- return -1;
+ if (!enumerator_value("TRACE_EVENT_FL_TRACEPOINT", &tracepoint_flag))
+ return -1;
- inited = 2;
+ inited = 2;
+ } else {
+ name_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "name"),
+ MEMBER_OFFSET("trace_event_call", "name"));
+ inited = 1;
+ }
work:
if (name_offset < 0)
@@ -1067,7 +1178,7 @@ work:
RETURN_ON_ERROR))
return -1;
- if (flags & TRACE_EVENT_FL_TRACEPOINT) {
+ if (flags & (uint)tracepoint_flag) {
if (!readmem(name_addr + tp_name_offset, KVADDR,
&name_addr, sizeof(name_addr),
"read tracepoint name", RETURN_ON_ERROR))
@@ -1476,26 +1587,72 @@ static int dump_kallsyms(const char *dum
static int trace_cmd_data_output(int fd);
+#define FTRACE_DUMP_SYMBOLS (1 << 0)
+#define FTRACE_DUMP_META_DATA (1 << 1)
+
+static int populate_ftrace_dir_tree(struct trace_instance *ti,
+ char *root, uint flags)
+{
+ char path[PATH_MAX];
+ int ret;
+
+ ret = mkdir(root, 0755);
+ if (ret < 0) {
+ if (errno == EEXIST)
+ error(INFO, "mkdir: %s exists\n", root);
+ return FALSE;
+ }
+
+ snprintf(path, sizeof(path), "%s/per_cpu", root);
+ if (try_mkdir(path, 0755) < 0)
+ return FALSE;
+
+ if (ftrace_dump_buffers(path, ti) < 0)
+ return FALSE;
+
+ if (flags & FTRACE_DUMP_META_DATA) {
+ /* Dump event types */
+ snprintf(path, sizeof(path), "%s/events", root);
+ if (try_mkdir(path, 0755) < 0)
+ return FALSE;
+
+ if (ftrace_dump_event_types(path) < 0)
+ return FALSE;
+
+ /* Dump pids with corresponding cmdlines */
+ if (dump_saved_cmdlines(root) < 0)
+ return FALSE;
+ }
+
+ if (flags & FTRACE_DUMP_SYMBOLS) {
+ /* Dump all symbols of the kernel */
+ dump_kallsyms(root);
+ }
+
+ return TRUE;
+}
+
static void ftrace_dump(int argc, char *argv[])
{
int c;
- int dump_meta_data = 0;
- int dump_symbols = 0;
+ int i;
+ uint flags = 0;
char *dump_tracing_dir;
- char path[PATH_MAX];
- int ret;
+ char instance_path[PATH_MAX];
while ((c = getopt(argc, argv, "smt")) != EOF) {
switch(c)
{
case 's':
- dump_symbols = 1;
+ flags |= FTRACE_DUMP_SYMBOLS;
break;
case 'm':
- dump_meta_data = 1;
+ flags |= FTRACE_DUMP_META_DATA;
break;
case 't':
- if (dump_symbols || dump_meta_data || argc - optind > 1)
+ if (flags & FTRACE_DUMP_SYMBOLS ||
+ flags & FTRACE_DUMP_META_DATA ||
+ argc - optind > 1)
cmd_usage(pc->curcmd, SYNOPSIS);
else {
char *trace_dat = "trace.dat";
@@ -1526,38 +1683,34 @@ static void ftrace_dump(int argc, char *
return;
}
- ret = mkdir(dump_tracing_dir, 0755);
- if (ret < 0) {
- if (errno == EEXIST)
- error(INFO, "mkdir: %s exists\n", dump_tracing_dir);
+ if (!populate_ftrace_dir_tree(&global_trace_instance, dump_tracing_dir, flags))
return;
- }
- snprintf(path, sizeof(path), "%s/per_cpu", dump_tracing_dir);
- if (try_mkdir(path, 0755) < 0)
+ if (!multiple_instances_available || instance_count == 0)
return;
- if (ftrace_dump_buffers(path) < 0)
+ /* Create an instances directory, and dump instance data in there */
+ snprintf(instance_path, sizeof(instance_path),
+ "%s/instances", dump_tracing_dir);
+ if (try_mkdir(instance_path, 0755) < 0)
return;
- if (dump_meta_data) {
- /* Dump event types */
- snprintf(path, sizeof(path), "%s/events", dump_tracing_dir);
- if (try_mkdir(path, 0755) < 0)
- return;
+ /* Don't care about the flags anymore */
+ flags = 0;
- if (ftrace_dump_event_types(path) < 0)
- return;
+ for (i = 0; i < instance_count; i++)
+ {
+ struct trace_instance *ti = &trace_instances[i];
+
+ snprintf(instance_path, sizeof(instance_path),
+ "%s/instances/%s", dump_tracing_dir,
+ ti->name);
- /* Dump pids with corresponding cmdlines */
- if (dump_saved_cmdlines(dump_tracing_dir) < 0)
- return;
+ if (populate_ftrace_dir_tree(ti, instance_path, flags) < 0)
+ break;
}
- if (dump_symbols) {
- /* Dump all symbols of the kernel */
- dump_kallsyms(dump_tracing_dir);
- }
+ return;
}
static void ftrace_show(int argc, char *argv[])
@@ -2161,26 +2314,69 @@ static int save_ftrace_cmdlines(int fd)
return tmp_file_flush(fd);
}
-static int save_res_data(int fd, int nr_cpu_buffers)
+/* From trace-cmd.h */
+enum {
+ TRACECMD_OPTION_DONE, /* 0 */
+ TRACECMD_OPTION_DATE, /* 1 */
+ TRACECMD_OPTION_CPUSTAT, /* 2 */
+ TRACECMD_OPTION_BUFFER, /* 3 */
+ TRACECMD_OPTION_TRACECLOCK, /* 4 */
+ TRACECMD_OPTION_UNAME, /* 5 */
+ TRACECMD_OPTION_HOOK, /* 6 */
+};
+
+static int write_options(int fd, unsigned long long *buffer_offsets)
{
- unsigned short option = 0;
+ int i;
+ unsigned short option;
- if (write_and_check(fd, &nr_cpu_buffers, 4))
- return -1;
+ if (!multiple_instances_available)
+ return 0;
if (write_and_check(fd, "options ", 10))
return -1;
+ option = TRACECMD_OPTION_BUFFER;
+ for (i = 0; i < instance_count; i++)
+ {
+ char *name = trace_instances[i].name;
+ size_t name_size = strlen(name) + 1; /* Name length + '\0' */
+ unsigned long long option_size = 8 + name_size;
+ unsigned long long offset;
+
+ offset = buffer_offsets ? buffer_offsets[i] : 0;
+ if (write_and_check(fd, &option, 2))
+ return -1;
+ if (write_and_check(fd, &option_size, 4))
+ return -1;
+ if (write_and_check(fd, &offset, 8))
+ return -1;
+ if (write_and_check(fd, name, name_size))
+ return -1;
+ }
+
+ option = TRACECMD_OPTION_DONE;
if (write_and_check(fd, &option, 2))
return -1;
+ return 0;
+}
+
+static int save_res_data(int fd, int nr_cpu_buffers, unsigned long long *buffer_offsets)
+{
+ if (write_and_check(fd, &nr_cpu_buffers, 4))
+ return -1;
+
+ if (write_options(fd, buffer_offsets))
+ return -1;
+
if (write_and_check(fd, "flyrecord", 10))
return -1;
return 0;
}
-static int save_record_data(int fd, int nr_cpu_buffers)
+static int save_record_data(int fd, int nr_cpu_buffers, struct trace_instance *ti)
{
int i, j;
unsigned long long offset, buffer_offset;
@@ -2192,7 +2388,7 @@ static int save_record_data(int fd, int
buffer_offset = offset;
for (i = 0; i < nr_cpu_ids; i++) {
- struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i];
+ struct ring_buffer_per_cpu *cpu_buffer = &ti->buffers[i];
unsigned long long buffer_size;
if (!cpu_buffer->kaddr)
@@ -2212,7 +2408,7 @@ static int save_record_data(int fd, int
lseek(fd, offset, SEEK_SET);
for (i = 0; i < nr_cpu_ids; i++) {
- struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i];
+ struct ring_buffer_per_cpu *cpu_buffer = &ti->buffers[i];
if (!cpu_buffer->kaddr)
continue;
@@ -2231,13 +2427,13 @@ static int save_record_data(int fd, int
return 0;
}
-static int __trace_cmd_data_output(int fd)
+static int get_nr_cpu_buffers(struct trace_instance *ti)
{
int i;
int nr_cpu_buffers = 0;
for (i = 0; i < nr_cpu_ids; i++) {
- struct ring_buffer_per_cpu *cpu_buffer = &global_buffers[i];
+ struct ring_buffer_per_cpu *cpu_buffer = &ti->buffers[i];
if (!cpu_buffer->kaddr)
continue;
@@ -2245,6 +2441,19 @@ static int __trace_cmd_data_output(int f
nr_cpu_buffers++;
}
+ return nr_cpu_buffers;
+}
+
+static int __trace_cmd_data_output(int fd)
+{
+ int nr_cpu_buffers;
+ unsigned long long global_res_data_offset;
+ unsigned long long *instance_offsets;
+
+ instance_offsets = calloc(sizeof(unsigned long long), instance_count);
+
+ nr_cpu_buffers = get_nr_cpu_buffers(&global_trace_instance);
+
if (save_initial_data(fd))
return -1;
if (save_header_files(fd))
@@ -2257,9 +2466,38 @@ static int __trace_cmd_data_output(int f
return -1;
if (save_ftrace_cmdlines(fd))
return -1;
- if (save_res_data(fd, nr_cpu_buffers))
+
+ /* We don't have the instance buffer offsets yet, so we'll write in 0s
+ * for now, and fix it up after we have that information available */
+ global_res_data_offset = lseek(fd, 0, SEEK_CUR);
+ if (save_res_data(fd, nr_cpu_buffers, NULL))
return -1;
- if (save_record_data(fd, nr_cpu_buffers))
+ if (save_record_data(fd, nr_cpu_buffers, &global_trace_instance))
+ return -1;
+
+ if (multiple_instances_available)
+ {
+ int i;
+
+ for (i = 0; i < instance_count; i++)
+ {
+ struct trace_instance *ti = &trace_instances[i];
+ nr_cpu_buffers = get_nr_cpu_buffers(ti);
+
+ /* Save off the instance offset for fixup later */
+ instance_offsets[i] = lseek(fd, 0, SEEK_CUR);
+
+ if (write_and_check(fd, "flyrecord", 10))
+ return -1;
+ if (save_record_data(fd, nr_cpu_buffers, ti))
+ return -1;
+ }
+ }
+
+ /* Fix up the global trace's options header with the instance offsets */
+ lseek(fd, global_res_data_offset, SEEK_SET);
+ nr_cpu_buffers = get_nr_cpu_buffers(&global_trace_instance);
+ if (save_res_data(fd, nr_cpu_buffers, instance_offsets))
return -1;
return 0;

View File

@ -1,26 +0,0 @@
--- crash-trace-command-2.0/trace.c.orig
+++ crash-trace-command-2.0/trace.c
@@ -96,7 +96,7 @@ struct ring_buffer_per_cpu {
ulong real_head_page;
int head_page_index;
- unsigned int nr_pages;
+ unsigned long nr_pages;
ulong *pages;
ulong *linear_pages;
@@ -430,7 +432,13 @@ static int ftrace_init_buffers(struct ri
buffer_read_value(overrun);
buffer_read_value(entries);
if (per_cpu_buffer_sizes) {
- buffer_read_value(nr_pages);
+ if (MEMBER_SIZE("ring_buffer_per_cpu", "nr_pages") == sizeof(unsigned int)) {
+ unsigned int tmp_nr_pages;
+ read_value(tmp_nr_pages, buffers[i].kaddr, ring_buffer_per_cpu, nr_pages);
+ buffers[i].nr_pages = (unsigned long) tmp_nr_pages;
+ } else {
+ buffer_read_value(nr_pages);
+ }
pages = buffers[i].nr_pages;
} else
buffers[i].nr_pages = pages;

View File

@ -1,43 +0,0 @@
Fix the extension trace.so for RHEL7.6, which moved
ftrace_event_call.data into an anonymous union, and the
previous offset has changed, so the trace.so extension
module fails to load, indicating "no commands registered:
shared object unloaded".
--- crash-trace-command-2.0/trace.c.orig
+++ crash-trace-command-2.0/trace.c
@@ -853,8 +853,18 @@ static int syscall_get_enter_fields(ulon
inited = 1;
data_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "data"),
MEMBER_OFFSET("trace_event_call", "data"));
- if (data_offset < 0)
- return -1;
+ if (data_offset < 0) {
+ /*
+ * rhel-7.6 moved the .data member into an anonymous union.
+ */
+ if (MEMBER_EXISTS("ftrace_event_call", "rh_data") &&
+ MEMBER_EXISTS("ftrace_event_data", "data")) {
+ data_offset = MEMBER_OFFSET("ftrace_event_call", "rh_data") +
+ MEMBER_OFFSET("ftrace_event_data", "data");
+ inited = 2;
+ } else
+ return -1;
+ }
enter_fields_offset = MEMBER_OFFSET("syscall_metadata", "enter_fields");
if (enter_fields_offset < 0)
@@ -868,6 +878,12 @@ work:
"read ftrace_event_call data", RETURN_ON_ERROR))
return -1;
+ if (inited == 2) {
+ if (!readmem(metadata, KVADDR, &metadata, sizeof(metadata),
+ "read ftrace_event_call data (indirect rh_data)", RETURN_ON_ERROR))
+ return -1;
+ }
+
*fields = metadata + enter_fields_offset;
return 0;
}

View File

@ -1,11 +0,0 @@
--- crash-trace-command-2.0/trace.c.orig
+++ crash-trace-command-2.0/trace.c
@@ -1373,7 +1373,7 @@ static void ftrace_dump(int argc, char *
if (dump_symbols || dump_meta_data || argc - optind > 1)
cmd_usage(pc->curcmd, SYNOPSIS);
else {
- char *trace_dat;
+ char *trace_dat = "trace.dat";
int fd;
if (argc - optind == 0)

View File

@ -1,152 +0,0 @@
--- crash-trace-command-2.0/trace.c.orig
+++ crash-trace-command-2.0/trace.c
@@ -26,9 +26,21 @@ static int nr_cpu_ids;
*/
static int lockless_ring_buffer;
static int per_cpu_buffer_sizes;
+/*
+ * global and encapsulated current_trace are both supported
+ */
+static int encapsulated_current_trace;
+/*
+ * trace_buffer is supported
+ */
+static int trace_buffer_available;
#define koffset(struct, member) struct##_##member##_offset
+static int koffset(trace_array, current_trace);
+static int koffset(trace_array, trace_buffer);
+static int koffset(trace_array, max_buffer);
+static int koffset(trace_buffer, buffer);
static int koffset(trace_array, buffer);
static int koffset(tracer, name);
@@ -85,6 +97,8 @@ struct ring_buffer_per_cpu {
};
static ulong global_trace;
+static ulong global_trace_buffer;
+static ulong global_max_buffer;
static ulong global_ring_buffer;
static unsigned global_pages;
static struct ring_buffer_per_cpu *global_buffers;
@@ -144,8 +158,16 @@ static int init_offsets(void)
} \
} while (0)
+ if (encapsulated_current_trace)
+ init_offset(trace_array, current_trace);
- init_offset(trace_array, buffer);
+ if (trace_buffer_available) {
+ init_offset(trace_array, trace_buffer);
+ init_offset(trace_array, max_buffer);
+ init_offset(trace_buffer, buffer);
+ } else {
+ init_offset(trace_array, buffer);
+ }
init_offset(tracer, name);
if (MEMBER_EXISTS("ring_buffer_per_cpu", "nr_pages")) {
@@ -400,8 +422,13 @@ out_fail:
static int ftrace_int_global_trace(void)
{
- read_value(global_ring_buffer, global_trace, trace_array, buffer);
- read_value(global_pages, global_ring_buffer, ring_buffer, pages);
+ if (trace_buffer_available) {
+ global_trace_buffer = global_trace + koffset(trace_array, trace_buffer);
+ read_value(global_ring_buffer, global_trace_buffer, trace_buffer, buffer);
+ } else {
+ read_value(global_ring_buffer, global_trace, trace_array, buffer);
+ read_value(global_pages, global_ring_buffer, ring_buffer, pages);
+ }
global_buffers = calloc(sizeof(*global_buffers), nr_cpu_ids);
if (global_buffers == NULL)
@@ -420,12 +447,17 @@ out_fail:
static int ftrace_int_max_tr_trace(void)
{
- read_value(max_tr_ring_buffer, max_tr_trace, trace_array, buffer);
+ if (trace_buffer_available) {
+ global_max_buffer = global_trace + koffset(trace_array, max_buffer);
+ read_value(max_tr_ring_buffer, global_max_buffer, trace_buffer, buffer);
+ } else {
+ read_value(max_tr_ring_buffer, max_tr_trace, trace_array, buffer);
- if (!max_tr_ring_buffer)
- return 0;
+ if (!max_tr_ring_buffer)
+ return 0;
- read_value(max_tr_pages, max_tr_ring_buffer, ring_buffer, pages);
+ read_value(max_tr_pages, max_tr_ring_buffer, ring_buffer, pages);
+ }
max_tr_buffers = calloc(sizeof(*max_tr_buffers), nr_cpu_ids);
if (max_tr_buffers == NULL)
@@ -449,7 +481,12 @@ static int ftrace_init_current_tracer(vo
char tmp[128];
/* Get current tracer name */
- read_value(addr, current_trace, POINTER_SYM, POINTER);
+ if (encapsulated_current_trace) {
+ read_value(addr, global_trace, trace_array, current_trace);
+ } else {
+ read_value(addr, current_trace, POINTER_SYM, POINTER);
+ }
+
read_value(addr, addr, tracer, name);
read_string(addr, tmp, 128);
@@ -471,19 +508,33 @@ static int ftrace_init(void)
struct syment *sym_current_trace;
sym_global_trace = symbol_search("global_trace");
- sym_max_tr_trace = symbol_search("max_tr");
sym_ftrace_events = symbol_search("ftrace_events");
- sym_current_trace = symbol_search("current_trace");
- if (sym_global_trace == NULL || sym_max_tr_trace == NULL
- || sym_ftrace_events == NULL
- || sym_current_trace == NULL)
+ if (sym_global_trace == NULL || sym_ftrace_events == NULL)
return -1;
global_trace = sym_global_trace->value;
- max_tr_trace = sym_max_tr_trace->value;
ftrace_events = sym_ftrace_events->value;
- current_trace = sym_current_trace->value;
+
+ if (MEMBER_EXISTS("trace_array", "current_trace")) {
+ encapsulated_current_trace = 1;
+ } else {
+ sym_current_trace = symbol_search("current_trace");
+ if (sym_current_trace == NULL)
+ return -1;
+
+ current_trace = sym_current_trace->value;
+ }
+
+ if (MEMBER_EXISTS("trace_array", "trace_buffer")) {
+ trace_buffer_available = 1;
+ } else {
+ sym_max_tr_trace = symbol_search("max_tr");
+ if (sym_max_tr_trace == NULL)
+ return -1;
+
+ max_tr_trace = sym_max_tr_trace->value;
+ }
if (!try_get_symbol_data("nr_cpu_ids", sizeof(int), &nr_cpu_ids))
nr_cpu_ids = 1;
@@ -1453,6 +1504,7 @@ static void ftrace_show(int argc, char *
if ((file = popen(trace_cmd, "r"))) {
ret = fread(buf, 1, sizeof(buf), file);
buf[ret] = 0;
+ pclose(file);
}
if (!strstr(buf, "trace-cmd version")) {
if (env_trace_cmd)

View File

@ -1,265 +0,0 @@
--- crash-trace-command-2.0/trace.c.orig
+++ crash-trace-command-2.0/trace.c
@@ -34,6 +34,10 @@ static int encapsulated_current_trace;
* trace_buffer is supported
*/
static int trace_buffer_available;
+/*
+ * max_buffer is supported
+ */
+static int max_buffer_available;
#define koffset(struct, member) struct##_##member##_offset
@@ -154,6 +158,7 @@ static int init_offsets(void)
if (koffset(struct, member) < 0) { \
fprintf(fp, "failed to init the offset, struct:"\
#struct ", member:" #member); \
+ fprintf(fp, "\n"); \
return -1; \
} \
} while (0)
@@ -163,8 +168,10 @@ static int init_offsets(void)
if (trace_buffer_available) {
init_offset(trace_array, trace_buffer);
- init_offset(trace_array, max_buffer);
init_offset(trace_buffer, buffer);
+
+ if (max_buffer_available)
+ init_offset(trace_array, max_buffer);
} else {
init_offset(trace_array, buffer);
}
@@ -176,6 +183,9 @@ static int init_offsets(void)
fprintf(fp, "per cpu buffer sizes\n");
}
+ if (kernel_symbol_exists("ring_buffer_read"))
+ gdb_set_crash_scope(symbol_value("ring_buffer_read"), "ring_buffer_read");
+
if (!per_cpu_buffer_sizes)
init_offset(ring_buffer, pages);
init_offset(ring_buffer, flags);
@@ -205,7 +215,12 @@ static int init_offsets(void)
init_offset(list_head, next);
- init_offset(ftrace_event_call, list);
+ koffset(ftrace_event_call, list) = MAX(MEMBER_OFFSET("ftrace_event_call", "list"),
+ MEMBER_OFFSET("trace_event_call", "list"));
+ if (koffset(ftrace_event_call, list) < 0) {
+ fprintf(fp, "failed to init the offset, struct:[f]trace_event_call member:list)\n");
+ return -1; \
+ }
init_offset(ftrace_event_field, link);
init_offset(ftrace_event_field, name);
@@ -448,6 +463,9 @@ out_fail:
static int ftrace_int_max_tr_trace(void)
{
if (trace_buffer_available) {
+ if (!max_buffer_available)
+ return 0;
+
global_max_buffer = global_trace + koffset(trace_array, max_buffer);
read_value(max_tr_ring_buffer, global_max_buffer, trace_buffer, buffer);
} else {
@@ -528,6 +546,9 @@ static int ftrace_init(void)
if (MEMBER_EXISTS("trace_array", "trace_buffer")) {
trace_buffer_available = 1;
+
+ if (MEMBER_EXISTS("trace_array", "max_buffer"))
+ max_buffer_available = 1;
} else {
sym_max_tr_trace = symbol_search("max_tr");
if (sym_max_tr_trace == NULL)
@@ -710,7 +731,8 @@ static int syscall_get_enter_fields(ulon
goto work;
inited = 1;
- data_offset = MEMBER_OFFSET("ftrace_event_call", "data");
+ data_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "data"),
+ MEMBER_OFFSET("trace_event_call", "data"));
if (data_offset < 0)
return -1;
@@ -742,7 +764,8 @@ static int syscall_get_exit_fields_old(u
goto work;
inited = 1;
- data_offset = MEMBER_OFFSET("ftrace_event_call", "data");
+ data_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "data"),
+ MEMBER_OFFSET("trace_event_call", "data"));
if (data_offset < 0)
return -1;
@@ -803,18 +826,22 @@ int ftrace_get_event_type_fields(ulong c
goto work;
inited = 1;
- fields_offset = MEMBER_OFFSET("ftrace_event_call", "fields");
+ fields_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "fields"),
+ MEMBER_OFFSET("trace_event_call", "fields"));
- class_offset = MEMBER_OFFSET("ftrace_event_call", "class");
+ class_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "class"),
+ MEMBER_OFFSET("trace_event_call", "class"));
if (class_offset < 0)
goto work;
inited = 2;
- fields_offset = MEMBER_OFFSET("ftrace_event_class", "fields");
+ fields_offset = MAX(MEMBER_OFFSET("ftrace_event_class", "fields"),
+ MEMBER_OFFSET("trace_event_class", "fields"));
if (fields_offset < 0)
return -1;
- get_fields_offset = MEMBER_OFFSET("ftrace_event_class", "get_fields");
+ get_fields_offset = MAX(MEMBER_OFFSET("ftrace_event_class", "get_fields"),
+ MEMBER_OFFSET("trace_event_class", "get_fields"));
if ((sp = symbol_search("syscall_get_enter_fields")) != NULL)
syscall_get_enter_fields_value = sp->value;
if ((sp = symbol_search("syscall_get_exit_fields")) != NULL)
@@ -988,19 +1015,45 @@ static void ftrace_destroy_event_types(v
free(ftrace_common_fields);
}
+#define TRACE_EVENT_FL_TRACEPOINT 0x40
+
static
int ftrace_get_event_type_name(ulong call, char *name, int len)
{
static int inited;
static int name_offset;
+ static int flags_offset;
+ static int tp_name_offset;
+ uint flags;
ulong name_addr;
- if (!inited) {
- inited = 1;
- name_offset = MEMBER_OFFSET("ftrace_event_call", "name");
- }
+ if (inited)
+ goto work;
+ inited = 1;
+ name_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "name"),
+ MEMBER_OFFSET("trace_event_call", "name"));
+ if (name_offset >= 0)
+ goto work;
+
+ name_offset = MAX(ANON_MEMBER_OFFSET("ftrace_event_call", "name"),
+ ANON_MEMBER_OFFSET("trace_event_call", "name"));
+ if (name_offset < 0)
+ return -1;
+
+ flags_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "flags"),
+ MEMBER_OFFSET("trace_event_call", "flags"));
+ if (flags_offset < 0)
+ return -1;
+
+ tp_name_offset = MEMBER_OFFSET("tracepoint", "name");
+ if (tp_name_offset < 0)
+ return -1;
+
+ inited = 2;
+
+work:
if (name_offset < 0)
return -1;
@@ -1008,6 +1061,21 @@ int ftrace_get_event_type_name(ulong cal
"read ftrace_event_call name_addr", RETURN_ON_ERROR))
return -1;
+ if (inited == 2) {
+ if (!readmem(call + flags_offset, KVADDR, &flags,
+ sizeof(flags), "read ftrace_event_call flags",
+ RETURN_ON_ERROR))
+ return -1;
+
+ if (flags & TRACE_EVENT_FL_TRACEPOINT) {
+ if (!readmem(name_addr + tp_name_offset, KVADDR,
+ &name_addr, sizeof(name_addr),
+ "read tracepoint name", RETURN_ON_ERROR))
+ return -1;
+ }
+
+ }
+
if (!read_string(name_addr, name, len))
return -1;
@@ -1028,16 +1096,19 @@ int ftrace_get_event_type_system(ulong c
goto work;
inited = 1;
- sys_offset = MEMBER_OFFSET("ftrace_event_call", "system");
+ sys_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "system"),
+ MEMBER_OFFSET("trace_event_call", "system"));
if (sys_offset >= 0)
goto work;
- class_offset = MEMBER_OFFSET("ftrace_event_call", "class");
+ class_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "class"),
+ MEMBER_OFFSET("trace_event_call", "class"));
if (class_offset < 0)
return -1;
- sys_offset = MEMBER_OFFSET("ftrace_event_class", "system");
+ sys_offset = MAX(MEMBER_OFFSET("ftrace_event_class", "system"),
+ MEMBER_OFFSET("trace_event_class", "system"));
inited = 2;
work:
@@ -1109,7 +1180,8 @@ int ftrace_get_event_type_print_fmt(ulon
if (!inited) {
inited = 1;
- fmt_offset = MEMBER_OFFSET("ftrace_event_call", "print_fmt");
+ fmt_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "print_fmt"),
+ MEMBER_OFFSET("trace_event_call", "print_fmt"));
}
if (fmt_offset < 0) {
@@ -1132,11 +1204,13 @@ int ftrace_get_event_type_id(ulong call,
if (!inited) {
inited = 1;
- id_offset = MEMBER_OFFSET("ftrace_event_call", "id");
+ id_offset = MAX(MEMBER_OFFSET("ftrace_event_call", "id"),
+ MEMBER_OFFSET("trace_event_call", "id"));
if (id_offset < 0) {
/* id = call->event.type */
- int f1 = MEMBER_OFFSET("ftrace_event_call", "event");
+ int f1 = MAX(MEMBER_OFFSET("ftrace_event_call", "event"),
+ MEMBER_OFFSET("trace_event_call", "event"));
int f2 = MEMBER_OFFSET("trace_event", "type");
if (f1 >= 0 && f2 >= 0)
@@ -1495,7 +1569,6 @@ static void ftrace_show(int argc, char *
FILE *file;
size_t ret;
size_t nitems __attribute__ ((__unused__));
- char *unused __attribute__ ((__unused__));
/* check trace-cmd */
if (env_trace_cmd)
@@ -1519,8 +1592,9 @@ static void ftrace_show(int argc, char *
}
/* dump trace.dat to the temp file */
- unused = mktemp(tmp);
- fd = open(tmp, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ fd = mkstemp(tmp);
+ if (fd < 0)
+ return;
if (trace_cmd_data_output(fd) < 0)
goto out;

View File

@ -1,13 +0,0 @@
--- crash-trace-command-2.0/trace.c.orig
+++ crash-trace-command-2.0/trace.c
@@ -198,7 +198,9 @@ static int init_offsets(void)
fprintf(fp, "per cpu buffer sizes\n");
}
- if (kernel_symbol_exists("ring_buffer_read"))
+ if (machine_type("PPC64") && kernel_symbol_exists(".ring_buffer_read"))
+ gdb_set_crash_scope(symbol_value(".ring_buffer_read"), ".ring_buffer_read");
+ else if (kernel_symbol_exists("ring_buffer_read"))
gdb_set_crash_scope(symbol_value("ring_buffer_read"), "ring_buffer_read");
if (!per_cpu_buffer_sizes)

View File

@ -1,13 +0,0 @@
--- crash-trace-command-2.0/Makefile.orig
+++ crash-trace-command-2.0/Makefile
@@ -6,6 +6,10 @@ ifeq ($(shell arch), ppc64)
TARGET=PPC64
TARGET_CFLAGS=-m64
endif
+ifeq ($(shell arch), ppc64le)
+ TARGET=PPC64
+ TARGET_CFLAGS=-m64
+endif
ifeq ($(shell arch), ia64)
TARGET=IA64
TARGET_CFLAGS=

View File

@ -1,31 +0,0 @@
--- crash-trace-command-2.0/trace.c.orig
+++ crash-trace-command-2.0/trace.c
@@ -1536,23 +1535,21 @@ static struct command_table_entry comman
static int ftrace_initialized;
-int _init(void)
+void __attribute__((constructor))
+trace_init(void)
{
if (ftrace_init() < 0)
- return 0;
+ return;
ftrace_initialized = 1;
register_extension(command_table);
-
- return 1;
}
-int _fini(void)
+void __attribute__((destructor))
+trace_fini(void)
{
if (ftrace_initialized)
ftrace_destroy();
-
- return 1;
}
#define TRACE_CMD_FILE_VERSION_STRING "6"

View File

@ -1,9 +0,0 @@
diff -up crash-trace-command-2.0/Makefile.orig crash-trace-command-2.0/Makefile
--- crash-trace-command-2.0/Makefile.orig 2018-09-19 15:46:23.812160803 -0400
+++ crash-trace-command-2.0/Makefile 2018-09-19 15:47:12.489890130 -0400
@@ -36,4 +36,4 @@ INCDIR=/usr/include/crash
all: trace.so
trace.so: $(INCDIR)/defs.h trace.c
- gcc $(RPM_OPT_FLAGS) -Wall -I$(INCDIR) -nostartfiles -shared -rdynamic -o trace.so trace.c -fPIC -D$(TARGET) $(TARGET_CFLAGS)
+ gcc $(RPM_OPT_FLAGS) -Wall -I$(INCDIR) -nostartfiles -shared -rdynamic -o trace.so trace.c -fPIC -D$(TARGET) $(TARGET_CFLAGS) -Wl,-z,now

View File

@ -1,10 +0,0 @@
--- crash-trace-command-2.0/trace.c.orig
+++ crash-trace-command-2.0/trace.c
@@ -251,7 +251,6 @@ static int ftrace_init_pages(struct ring
cpu_buffer->linear_pages = calloc(sizeof(ulong), nr_pages + 1);
if (cpu_buffer->linear_pages == NULL) {
- free(cpu_buffer->pages);
return -1;
}

View File

@ -1,46 +0,0 @@
--- crash-trace-command-2.0/trace.c.orig
+++ crash-trace-command-2.0/trace.c
@@ -15,6 +15,7 @@
#include <stdio.h>
#include <ctype.h>
#include <setjmp.h>
+#include <stdlib.h>
static int verbose = 0;
@@ -892,7 +893,7 @@ out_fail:
static int ftrace_init_event_type(ulong call, struct event_type *aevent_type)
{
- ulong fields_head;
+ ulong fields_head = 0;
if (ftrace_get_event_type_fields(call, &fields_head) < 0)
return -1;
@@ -1443,6 +1444,8 @@ static void ftrace_show(int argc, char *
int fd;
FILE *file;
size_t ret;
+ size_t nitems __attribute__ ((__unused__));
+ char *unused __attribute__ ((__unused__));
/* check trace-cmd */
if (env_trace_cmd)
@@ -1465,7 +1468,7 @@ static void ftrace_show(int argc, char *
}
/* dump trace.dat to the temp file */
- mktemp(tmp);
+ unused = mktemp(tmp);
fd = open(tmp, O_WRONLY | O_CREAT | O_TRUNC, 0644);
if (trace_cmd_data_output(fd) < 0)
goto out;
@@ -1478,7 +1481,7 @@ static void ftrace_show(int argc, char *
ret = fread(buf, 1, sizeof(buf), file);
if (ret == 0)
break;
- fwrite(buf, 1, ret, fp);
+ nitems = fwrite(buf, 1, ret, fp);
}
pclose(file);
out:

View File

@ -1,14 +1,16 @@
#
# crash core analysis suite
#
%global reponame crash-trace
Summary: Trace extension module for the crash utility
Name: crash-trace-command
Version: 2.0
Release: 18%{?dist}
Version: 3.0
Release: 1%{?dist}
License: GPLv2
Group: Development/Debuggers
Source: https://github.com/crash-utility/crash-extensions/blob/master/%{name}-%{version}.tar.gz
URL: https://crash-utility.github.io/extensions.html
Source: https://github.com/fujitsu/crash-trace/archive/v%{version}/%{name}-%{version}.tar.gz
URL: https://github.com/fujitsu/crash-trace
# Vendor: Fujitsu Limited
# Packager: Qiao Nuohan <qiaonuohan@cn.fujitsu.com>
ExclusiveOS: Linux
@ -18,51 +20,24 @@ BuildRequires: zlib-devel lzo-devel snappy-devel
BuildRequires: crash-devel >= 7.2.0-2
Requires: trace-cmd
Requires: crash >= 7.2.0-2
Patch0: trace_compiler_warnings.patch
Patch1: replace_obsolete_init_and_fini.patch
Patch2: sigsegv_on_calloc_failure.patch
Patch3: initialize_trace_dat.patch
Patch4: ARM64.patch
Patch5: linux_3.10_support.patch
Patch6: ppc64le.patch
Patch7: linux_4.2_support.patch
Patch8: TRACE_EVENT_FL_TRACEPOINT_flag.patch
Patch9: big_endian_nr_pages.patch
Patch10: ppc64_ring_buffer_read.patch
Patch11: rhel8_build.patch
Patch12: ftrace_event_call_rh_data.patch
Patch13: 0001-Fixes-for-the-trace.so-extension-module.patch
Patch14: 0002-extensions-trace-Rename-trace_buffer-to-array_buffer.patch
Patch15: 0003-extensions-trace-Rename-ring_buffer-to-trace_buffer.patch
Patch0: 0001-Makefile-set-DT_SONAME-to-trace.so.patch
Patch1: 0002-Makefile-fix-build-failure-on-aarch64-and-ppc64le.patch
%description
Command for reading ftrace data from a dumpfile.
%prep
%setup -q -n %{name}-%{version}
%patch0 -p1 -b trace_compiler_warnings.patch
%patch1 -p1 -b replace_obsolete_init_and_fini.patch
%patch2 -p1 -b sigsegv_on_calloc_failure.patch
%patch3 -p1 -b initialize_trace_dat.patch
%patch4 -p1 -b ARM64.patch
%patch5 -p1 -b linux_3.10_support.patch
%patch6 -p1 -b ppc64le.patch
%patch7 -p1 -b linux_4.2_support.patch
%patch8 -p1 -b TRACE_EVENT_FL_TRACEPOINT_flag.patch
%patch9 -p1 -b big_endian_nr_pages.patch
%patch10 -p1 -b ppc64_ring_buffer_read.patch
%patch11 -p1 -b rhel8_build.patch
%patch12 -p1 -b ftrace_event_call_rh_data.patch
%patch13 -p1
%patch14 -p1
%patch15 -p1
%setup -q -n %{reponame}-%{version}
%patch0 -p1 -b 0001-Makefile-set-DT_SONAME-to-trace.so.patch
%patch1 -p1 -b 0002-Makefile-fix-build-failure-on-aarch64-and-ppc64le.patch
%build
make
%install
mkdir -p %{buildroot}%{_libdir}/crash/extensions/
cp %{_builddir}/%{name}-%{version}/trace.so %{buildroot}%{_libdir}/crash/extensions/
cp %{_builddir}/%{reponame}-%{version}/trace.so %{buildroot}%{_libdir}/crash/extensions/
%clean
rm -rf %{buildroot}
@ -73,6 +48,11 @@ rm -rf %{buildroot}
%doc COPYING
%changelog
* Fri Nov 18 2022 Lianbo Jiang <lijiang@redhat.com> - 3.0-1
- Rebase to upstream v3.0
- Update to the latest upstream commit
Resolves: rhbz#2119709
* Mon Feb 08 2021 Lianbo Jiang <lijiang@redhat.com> - 2.0-18
- Rename trace_buffer to array_buffer
- Rename ring_buffer to trace_buffer