crash/crash.patch
anderson aa209cddd0 - Updated source package to crash-3.10.tar.gz, containing IBM's final ppc64
processor support for RHEL4
- Fixes potential "bt -a" hang on dumpfile where netdump IPI interrupted an
    x86 process while executing the instructions just after it had entered
    the kernel for a syscall, but before calling the handler. BZ #139437
- Update to handle backtraces in dumpfiles generated on IA64 with the INIT
    switch (functionality intro'd in RHEL3-U5 kernel). BZ #139429
- Fix for handling ia64 and x86_64 machines booted with maxcpus=1 on an SMP
    kernel. BZ #139435
- Update to handle backtraces in dumpfiles generated on x86_64 from the NMI
    exception stack (functionality intro'd in RHEL3-U5 kernel).
- "kmem -[sS]" beefed up to more accurately verify slab cache chains and
    report errors found.
- Fix for ia64 INIT switch-generated backtrace handling when
    init_handler_platform() is inlined into ia64_init_handler(); properly
    handles both RHEL3 and RHEL4 kernel patches. BZ #138350
- Update to enhance ia64 gdb disassembly output so as to symbolically
    display call targets from kernel module text without requiring module
    debuginfo data.
2005-02-10 18:39:42 +00:00

1668 lines
48 KiB
Diff

--- crash/memory.c.orig 2005-02-09 16:13:25.000000000 -0500
+++ crash/memory.c 2005-01-06 14:56:46.000000000 -0500
@@ -88,7 +88,9 @@
static void kmem_search(struct meminfo *);
static void kmem_cache_init(void);
static ulong max_cpudata_limit(ulong, ulong *);
+static int ignore_cache(struct meminfo *, char *);
static char *is_kmem_cache_addr(ulong, char *);
+static void kmem_cache_list(void);
static void dump_kmem_cache(struct meminfo *);
static void dump_kmem_cache_percpu_v1(struct meminfo *);
static void dump_kmem_cache_percpu_v2(struct meminfo *);
@@ -104,6 +106,8 @@
static void dump_slab(struct meminfo *);
static void dump_slab_percpu_v1(struct meminfo *);
static void dump_slab_percpu_v2(struct meminfo *);
+static int verify_slab_v1(struct meminfo *, ulong, int);
+static int verify_slab_v2(struct meminfo *, ulong, int);
static void gather_slab_free_list(struct meminfo *);
static void gather_slab_free_list_percpu(struct meminfo *);
static void gather_cpudata_list_v1(struct meminfo *);
@@ -3321,7 +3325,10 @@
if (vt->flags & KMEM_CACHE_UNAVAIL)
error(FATAL,
"kmem cache slab subsystem not available\n");
- vt->dump_kmem_cache(&meminfo);
+ if (STREQ(meminfo.reqname, "list"))
+ kmem_cache_list();
+ else
+ vt->dump_kmem_cache(&meminfo);
}
if (Sflag == 1) {
@@ -3329,7 +3336,10 @@
error(FATAL,
"kmem cache slab subsystem not available\n");
meminfo.flags = VERBOSE;
- vt->dump_kmem_cache(&meminfo);
+ if (STREQ(meminfo.reqname, "list"))
+ kmem_cache_list();
+ else
+ vt->dump_kmem_cache(&meminfo);
}
if (vflag == 1)
@@ -5967,6 +5977,66 @@
}
/*
+ * Note same functionality as above, but instead it just
+ * dumps all slab cache names and their addresses.
+ */
+static void
+kmem_cache_list(void)
+{
+ ulong cache, cache_cache, name;
+ long next_offset, name_offset;
+ char *cache_buf;
+ char buf[BUFSIZE];
+
+ if (vt->flags & KMEM_CACHE_UNAVAIL) {
+ error(INFO, "kmem cache slab subsystem not available\n");
+ return;
+ }
+
+ name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ?
+ OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name);
+ next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ?
+ OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp);
+
+ cache = cache_cache = symbol_value("cache_cache");
+
+ cache_buf = GETBUF(SIZE(kmem_cache_s));
+
+ do {
+ readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s),
+ "kmem_cache_s buffer", FAULT_ON_ERROR);
+
+ if (vt->kmem_cache_namelen) {
+ BCOPY(cache_buf+name_offset, buf,
+ vt->kmem_cache_namelen);
+ } else {
+ name = ULONG(cache_buf + name_offset);
+ if (!read_string(name, buf, BUFSIZE-1)) {
+ if (vt->flags &
+ (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2))
+ error(FATAL,
+ "cannot read kmem_cache_s.name string at %lx\n",
+ name);
+ else
+ error(FATAL,
+ "cannot read kmem_cache_s.c_name string at %lx\n",
+ name);
+ }
+ }
+
+ fprintf(fp, "%lx %s\n", cache, buf);
+
+ cache = ULONG(cache_buf + next_offset);
+
+ if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2))
+ cache -= next_offset;
+
+ } while (cache != cache_cache);
+
+ FREEBUF(cache_buf);
+}
+
+/*
* Translate an address to its physical page number, verify that the
* page in fact belongs to the slab subsystem, and if so, return the
* name of the cache to which it belongs.
@@ -6040,11 +6110,14 @@
readmem(page+OFFSET(page_prev),
KVADDR, &slab, sizeof(void *),
"page.prev", FAULT_ON_ERROR);
-
else if (VALID_MEMBER(page_list_prev))
readmem(page+OFFSET(page_list_prev),
KVADDR, &slab, sizeof(void *),
"page.list.prev", FAULT_ON_ERROR);
+ else if (VALID_MEMBER(page_lru))
+ readmem(page+OFFSET(page_lru)+OFFSET(list_head_prev),
+ KVADDR, &slab, sizeof(void *),
+ "page.lru.prev", FAULT_ON_ERROR);
else
error(FATAL, "unknown definition of struct page?\n");
@@ -6231,6 +6304,41 @@
return max_limit;
}
+/*
+ * Determine whether the current slab cache is contained in
+ * the comma-separated list from a "kmem -I list1,list2 ..."
+ * command entry.
+ */
+static int
+ignore_cache(struct meminfo *si, char *name)
+{
+ int i, argc;
+ char *p1;
+ char *arglist[MAXARGS];
+ char buf[BUFSIZE];
+
+ if (!si->ignore)
+ return FALSE;
+
+ strcpy(buf, si->ignore);
+
+ p1 = buf;
+ while (*p1) {
+ if (*p1 == ',')
+ *p1 = ' ';
+ p1++;
+ }
+
+ argc = parse_line(buf, arglist);
+
+ for (i = 0; i < argc; i++) {
+ if (STREQ(name, arglist[i]))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
/*
* dump_kmem_cache() displays basic information about kmalloc() slabs.
@@ -6390,8 +6498,8 @@
if (reqname && !STREQ(reqname, buf))
goto next_cache;
- if (STREQ(si->ignore, buf)) {
- fprintf(fp, "%lx %-19s [IGNORED]\n", si->cache, buf);
+ if (ignore_cache(si, buf)) {
+ fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf);
goto next_cache;
}
@@ -6588,16 +6696,13 @@
if (reqname && !STREQ(reqname, buf))
goto next_cache;
- if (STREQ(si->ignore, buf)) {
- fprintf(fp, "%lx %-19s [IGNORED]\n", si->cache, buf);
+ if (ignore_cache(si, buf)) {
+ fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf);
goto next_cache;
}
si->curname = buf;
- if (CRASHDEBUG(1))
- fprintf(fp, "cache: %lx %s\n", si->cache, si->curname);
-
readmem(si->cache+OFFSET(kmem_cache_s_objsize),
KVADDR, &tmp_val, sizeof(uint),
"objsize", FAULT_ON_ERROR);
@@ -6804,16 +6909,13 @@
if (reqname && !STREQ(reqname, buf))
goto next_cache;
- if (STREQ(si->ignore, buf)) {
- fprintf(fp, "%lx %-19s [IGNORED]\n", si->cache, buf);
+ if (ignore_cache(si, buf)) {
+ fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf);
goto next_cache;
}
si->curname = buf;
- if (CRASHDEBUG(3))
- fprintf(fp, "cache: %lx %s\n", si->cache, si->curname);
-
readmem(si->cache+OFFSET(kmem_cache_s_objsize),
KVADDR, &tmp_val, sizeof(uint),
"objsize", FAULT_ON_ERROR);
@@ -6883,7 +6985,7 @@
case KMEM_SLAB_ADDR:
fprintf(fp, " %lx ",
(ulong)si->spec_addr);
- fprintf(fp, "(slab_s)\n");
+ fprintf(fp, "(slab)\n");
break;
case KMEM_ON_SLAB:
@@ -7073,6 +7175,8 @@
#define SLAB_CHAINS (3)
+static char *slab_chain_name_v1[] = {"full", "partial", "free"};
+
static void
do_slab_chain_percpu_v1(long cmd, struct meminfo *si)
{
@@ -7080,6 +7184,7 @@
int list_borked;
char *slab_s_buf;
ulong specified_slab;
+ ulong last;
ulong slab_chains[SLAB_CHAINS];
list_borked = 0;
@@ -7096,6 +7201,12 @@
slab_chains[2] = si->cache + OFFSET(kmem_cache_s_slabs_free);
}
+ if (CRASHDEBUG(1)) {
+ fprintf(fp, "[ %s: %lx ", si->curname, si->cache);
+ fprintf(fp, "full: %lx partial: %lx free: %lx ]\n",
+ slab_chains[0], slab_chains[1], slab_chains[2]);
+ }
+
switch (cmd)
{
case SLAB_GET_COUNTS:
@@ -7112,9 +7223,16 @@
if (!slab_chains[s])
continue;
- readmem(slab_chains[s],
- KVADDR, &si->slab, sizeof(ulong),
- "first slab", FAULT_ON_ERROR);
+ if (!readmem(slab_chains[s],
+ KVADDR, &si->slab, sizeof(ulong),
+ "first slab", QUIET|RETURN_ON_ERROR)) {
+ error(INFO,
+ "%s: %s list: bad slab pointer: %lx\n",
+ si->curname, slab_chain_name_v1[s],
+ slab_chains[s]);
+ list_borked = 1;
+ continue;
+ }
if (slab_data_saved(si)) {
FREEBUF(slab_s_buf);
@@ -7124,11 +7242,19 @@
if (si->slab == slab_chains[s])
continue;
+ last = slab_chains[s];
+
do {
if (received_SIGINT()) {
FREEBUF(slab_s_buf);
restart(0);
}
+
+ if (!verify_slab_v1(si, last, s)) {
+ list_borked = 1;
+ continue;
+ }
+ last = si->slab - OFFSET(slab_s_list);
readmem(si->slab, KVADDR, slab_s_buf,
SIZE(slab_s), "slab_s buffer",
@@ -7159,9 +7285,7 @@
error(NOTE,
"%s: slab chain inconsistency: %s list\n",
si->curname,
- s == 0 ? "full" :
- s == 1 ? "partial" :
- "free");
+ slab_chain_name_v1[s]);
list_borked = 1;
}
}
@@ -7170,7 +7294,8 @@
}
FREEBUF(slab_s_buf);
- save_slab_data(si);
+ if (!list_borked)
+ save_slab_data(si);
break;
case SLAB_WALKTHROUGH:
@@ -7183,14 +7308,24 @@
continue;
if (!specified_slab) {
- readmem(slab_chains[s],
- KVADDR, &si->slab, sizeof(ulong),
- "slabs", FAULT_ON_ERROR);
- }
+ if (!readmem(slab_chains[s],
+ KVADDR, &si->slab, sizeof(ulong),
+ "slabs", QUIET|RETURN_ON_ERROR)) {
+ error(INFO,
+ "%s: %s list: bad slab pointer: %lx\n",
+ si->curname,
+ slab_chain_name_v1[s],
+ slab_chains[s]);
+ list_borked = 1;
+ continue;
+ }
+ last = slab_chains[s];
+ } else
+ last = 0;
if (si->slab == slab_chains[s])
continue;
-
+
if (CRASHDEBUG(1)) {
fprintf(fp, "search cache: [%s] ", si->curname);
if (si->flags & ADDRESS_SPECIFIED)
@@ -7201,6 +7336,12 @@
do {
if (received_SIGINT())
restart(0);
+
+ if (!verify_slab_v1(si, last, s)) {
+ list_borked = 1;
+ continue;
+ }
+ last = si->slab - OFFSET(slab_s_list);
dump_slab_percpu_v1(si);
@@ -7214,7 +7355,7 @@
si->slab -= OFFSET(slab_s_list);
- } while (si->slab != slab_chains[s]);
+ } while (si->slab != slab_chains[s] && !list_borked);
}
break;
@@ -7222,8 +7363,94 @@
}
/*
+ * Try to preclude any attempt to translate a bogus slab structure.
+ */
+
+static int
+verify_slab_v1(struct meminfo *si, ulong last, int s)
+{
+ char slab_s_buf[BUFSIZE];
+ struct kernel_list_head *list_head;
+ unsigned int inuse;
+ ulong s_mem;
+ char *list;
+ int errcnt;
+
+ list = slab_chain_name_v1[s];
+
+ errcnt = 0;
+
+ if (!readmem(si->slab, KVADDR, slab_s_buf,
+ SIZE(slab_s), "slab_s buffer", QUIET|RETURN_ON_ERROR)) {
+ error(INFO, "%s: %s list: bad slab pointer: %lx\n",
+ si->curname, list, si->slab);
+ return FALSE;
+ }
+
+ list_head = (struct kernel_list_head *)
+ (slab_s_buf + OFFSET(slab_s_list));
+
+ if (!IS_KVADDR((ulong)list_head->next) ||
+ !accessible((ulong)list_head->next)) {
+ error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n",
+ si->curname, list, si->slab,
+ (ulong)list_head->next);
+ errcnt++;
+ }
+
+ if (last && (last != (ulong)list_head->prev)) {
+ error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n",
+ si->curname, list, si->slab,
+ (ulong)list_head->prev);
+ errcnt++;
+ }
+
+ inuse = UINT(slab_s_buf + OFFSET(slab_s_inuse));
+ if (inuse > si->c_num) {
+ error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n",
+ si->curname, list, si->slab, inuse);
+ errcnt++;
+ }
+ switch (s)
+ {
+ case 0: /* full -- or one singular list, so we can't test this */
+ break;
+
+ case 1: /* partial */
+ if ((inuse == 0) || (inuse == si->c_num)) {
+ error(INFO,
+ "%s: %s list: slab: %lx bad inuse counter: %ld\n",
+ si->curname, list, si->slab, inuse);
+ errcnt++;
+ }
+ break;
+
+ case 2: /* free */
+ if (inuse > 0) {
+ error(INFO,
+ "%s: %s list: slab: %lx bad inuse counter: %ld\n",
+ si->curname, list, si->slab, inuse);
+ errcnt++;
+ }
+ break;
+ }
+
+ s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem));
+ if (!IS_KVADDR(s_mem) || !accessible(s_mem)) {
+ error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n",
+ si->curname, list, si->slab, s_mem);
+ errcnt++;
+ }
+
+ return(errcnt ? FALSE : TRUE);
+}
+
+/*
* Updated for 2.6 slab substructure.
*/
+
+static char *slab_chain_name_v2[] = {"partial", "full", "free"};
+
static void
do_slab_chain_percpu_v2(long cmd, struct meminfo *si)
{
@@ -7231,6 +7458,7 @@
int list_borked;
char *slab_buf;
ulong specified_slab;
+ ulong last;
ulong slab_chains[SLAB_CHAINS];
list_borked = 0;
@@ -7244,6 +7472,12 @@
slab_chains[2] = si->cache + OFFSET(kmem_cache_s_lists) +
OFFSET(kmem_list3_slabs_free);
+ if (CRASHDEBUG(1)) {
+ fprintf(fp, "[ %s: %lx ", si->curname, si->cache);
+ fprintf(fp, "partial: %lx full: %lx free: %lx ]\n",
+ slab_chains[0], slab_chains[1], slab_chains[2]);
+ }
+
switch (cmd)
{
case SLAB_GET_COUNTS:
@@ -7259,9 +7493,17 @@
if (!slab_chains[s])
continue;
- readmem(slab_chains[s],
- KVADDR, &si->slab, sizeof(ulong),
- "first slab", FAULT_ON_ERROR);
+ if (!readmem(slab_chains[s],
+ KVADDR, &si->slab, sizeof(ulong),
+ "first slab", QUIET|RETURN_ON_ERROR)) {
+ error(INFO,
+ "%s: %s list: bad slab pointer: %lx\n",
+ si->curname,
+ slab_chain_name_v2[s],
+ slab_chains[s]);
+ list_borked = 1;
+ continue;
+ }
if (slab_data_saved(si)) {
FREEBUF(slab_buf);
@@ -7271,11 +7513,19 @@
if (si->slab == slab_chains[s])
continue;
+ last = slab_chains[s];
+
do {
if (received_SIGINT()) {
FREEBUF(slab_buf);
restart(0);
}
+
+ if (!verify_slab_v2(si, last, s)) {
+ list_borked = 1;
+ continue;
+ }
+ last = si->slab - OFFSET(slab_list);
readmem(si->slab, KVADDR, slab_buf,
SIZE(slab), "slab buffer",
@@ -7306,9 +7556,7 @@
error(NOTE,
"%s: slab chain inconsistency: %s list\n",
si->curname,
- s == 0 ? "full" :
- s == 1 ? "partial" :
- "free");
+ slab_chain_name_v2[s]);
list_borked = 1;
}
}
@@ -7317,7 +7565,8 @@
}
FREEBUF(slab_buf);
- save_slab_data(si);
+ if (!list_borked)
+ save_slab_data(si);
break;
case SLAB_WALKTHROUGH:
@@ -7330,11 +7579,21 @@
continue;
if (!specified_slab) {
- readmem(slab_chains[s],
- KVADDR, &si->slab, sizeof(ulong),
- "slabs", FAULT_ON_ERROR);
- }
-
+ if (!readmem(slab_chains[s],
+ KVADDR, &si->slab, sizeof(ulong),
+ "slabs", QUIET|RETURN_ON_ERROR)) {
+ error(INFO,
+ "%s: %s list: bad slab pointer: %lx\n",
+ si->curname,
+ slab_chain_name_v2[s],
+ slab_chains[s]);
+ list_borked = 1;
+ continue;
+ }
+ last = slab_chains[s];
+ } else
+ last = 0;
+
if (si->slab == slab_chains[s])
continue;
@@ -7349,6 +7608,12 @@
if (received_SIGINT())
restart(0);
+ if (!verify_slab_v2(si, last, s)) {
+ list_borked = 1;
+ continue;
+ }
+ last = si->slab - OFFSET(slab_list);
+
dump_slab_percpu_v2(si);
if (si->found) {
@@ -7361,13 +7626,98 @@
si->slab -= OFFSET(slab_list);
- } while (si->slab != slab_chains[s]);
+ } while (si->slab != slab_chains[s] && !list_borked);
}
break;
}
}
+/*
+ * Try to preclude any attempt to translate a bogus slab structure.
+ */
+static int
+verify_slab_v2(struct meminfo *si, ulong last, int s)
+{
+ char slab_buf[BUFSIZE];
+ struct kernel_list_head *list_head;
+ unsigned int inuse;
+ ulong s_mem;
+ char *list;
+ int errcnt;
+
+ list = slab_chain_name_v2[s];
+
+ errcnt = 0;
+
+ if (!readmem(si->slab, KVADDR, slab_buf,
+ SIZE(slab), "slab buffer", QUIET|RETURN_ON_ERROR)) {
+ error(INFO, "%s: %s list: bad slab pointer: %lx\n",
+ si->curname, list, si->slab);
+ return FALSE;
+ }
+
+ list_head = (struct kernel_list_head *)(slab_buf + OFFSET(slab_list));
+ if (!IS_KVADDR((ulong)list_head->next) ||
+ !accessible((ulong)list_head->next)) {
+ error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n",
+ si->curname, list, si->slab,
+ (ulong)list_head->next);
+ errcnt++;
+ }
+
+ if (last && (last != (ulong)list_head->prev)) {
+ error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n",
+ si->curname, list, si->slab,
+ (ulong)list_head->prev);
+ errcnt++;
+ }
+
+ inuse = UINT(slab_buf + OFFSET(slab_inuse));
+ if (inuse > si->c_num) {
+ error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n",
+ si->curname, list, si->slab, inuse);
+ errcnt++;
+ }
+ switch (s)
+ {
+ case 0: /* partial */
+ if ((inuse == 0) || (inuse == si->c_num)) {
+ error(INFO,
+ "%s: %s list: slab: %lx bad inuse counter: %ld\n",
+ si->curname, list, si->slab, inuse);
+ errcnt++;
+ }
+ break;
+
+ case 1: /* full */
+ if (inuse != si->c_num) {
+ error(INFO,
+ "%s: %s list: slab: %lx bad inuse counter: %ld\n",
+ si->curname, list, si->slab, inuse);
+ errcnt++;
+ }
+ break;
+
+ case 2: /* free */
+ if (inuse > 0) {
+ error(INFO,
+ "%s: %s list: slab: %lx bad inuse counter: %ld\n",
+ si->curname, list, si->slab, inuse);
+ errcnt++;
+ }
+ break;
+ }
+
+ s_mem = ULONG(slab_buf + OFFSET(slab_s_mem));
+ if (!IS_KVADDR(s_mem) || !accessible(s_mem)) {
+ error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n",
+ si->curname, list, si->slab, s_mem);
+ errcnt++;
+ }
+
+ return(errcnt ? FALSE : TRUE);
+}
/*
* If it's a dumpfile, save the essential slab data to avoid re-reading
@@ -7579,7 +7929,7 @@
if (INSLAB_PERCPU(si->slab, si) &&
(si->spec_addr >= si->slab) &&
(si->spec_addr < (si->slab+tmp))) {
- if (si->spec_addr >= (si->slab + SIZE(slab_s)))
+ if (si->spec_addr >= (si->slab + SIZE(slab)))
si->found = KMEM_BUFCTL_ADDR;
else
si->found = KMEM_SLAB_ADDR;
--- crash/help.c.orig 2005-02-09 16:13:25.000000000 -0500
+++ crash/help.c 2005-01-28 15:26:13.000000000 -0500
@@ -3578,8 +3578,9 @@
" -li walks through the inactive_list and verifies nr_inactive_pages.",
" -La same as -la, but also dumps each page in the active_list.",
" -Li same as -li, but also dumps each page in the inactive_list.",
-" slab-name when used with -s or -S, limits the command to only the slab",
-" of name \"slab-cache\".",
+" slab-name when used with -s or -S, limits the command to only the slab cache",
+" of name \"slab-name\". If the slab-name argument is \"list\", then",
+" all slab cache names and addresses are listed.",
" -P declares that the following address argument is a physical address.",
" address when used without any flag, the address can be a kernel virtual,",
" or physical address; a search is made through the symbol table,",
@@ -5257,8 +5258,8 @@
" ",
" These are the current prerequisites: ",
"",
-" o At this point, x86, ia64, x86_64, alpha and ppc-based kernels are ",
-" supported. Other architectures will be addressed in the future.",
+" o At this point, x86, ia64, x86_64, alpha and ppc64-based kernels are ",
+" supported. Other architectures may be addressed in the future.",
"",
" o One size fits all -- the utility can be run on any Linux kernel version ",
" from 2.2.5-15 through 2.6.*.",
--- crash/task.c.orig 2005-02-09 16:13:25.000000000 -0500
+++ crash/task.c 2005-01-28 15:26:13.000000000 -0500
@@ -2842,6 +2842,10 @@
fprintf(fp, "(HARDWARE RESET)");
else if (machdep->flags & SYSRQ)
fprintf(fp, "(SYSRQ)");
+ else if (machdep->flags & INIT)
+ fprintf(fp, "(INIT)");
+ else if (kt->cpu_flags[tc->processor] & NMI)
+ fprintf(fp, "(NMI)");
else if (tc->task == tt->panic_task)
fprintf(fp, "(PANIC)");
else
--- crash/kernel.c.orig 2005-02-09 16:13:25.000000000 -0500
+++ crash/kernel.c 2005-01-28 15:26:13.000000000 -0500
@@ -3252,6 +3252,9 @@
for (i = 0; i < NR_CPUS; i++)
fprintf(fp, "%s%.*lx ", (i % 4) == 0 ? "\n " : "",
LONG_PRLEN, kt->__per_cpu_offset[i]);
+ fprintf(fp, "\n cpu_flags[NR_CPUS]:");
+ for (i = 0; i < NR_CPUS; i++)
+ fprintf(fp, "%lx ", kt->cpu_flags[i]);
fprintf(fp, "\n");
}
--- crash/ia64.c.orig 2005-02-09 16:13:25.000000000 -0500
+++ crash/ia64.c 2005-02-09 16:12:05.000000000 -0500
@@ -56,6 +56,7 @@
static struct line_number_hook ia64_line_number_hooks[];
static ulong ia64_get_stackbase(ulong);
static ulong ia64_get_stacktop(ulong);
+static void parse_cmdline_arg(void);
struct unw_frame_info;
static void dump_unw_frame_info(struct unw_frame_info *);
@@ -112,12 +113,11 @@
machdep->last_ptbl_read = 0;
machdep->verify_paddr = ia64_verify_paddr;
machdep->ptrs_per_pgd = PTRS_PER_PGD;
- if (machdep->cmdline_arg)
- machdep->machspec->phys_start =
- htol(machdep->cmdline_arg, RETURN_ON_ERROR, NULL);
- else
- machdep->machspec->phys_start = UNKNOWN_PHYS_START;
- machdep->flags |= DEVMEMRD;
+ machdep->machspec->phys_start = UNKNOWN_PHYS_START;
+ if (machdep->cmdline_arg)
+ parse_cmdline_arg();
+ if (ACTIVE())
+ machdep->flags |= DEVMEMRD;
break;
case PRE_GDB:
@@ -218,6 +218,108 @@
}
}
+/*
+ * --machdep <addr> defaults to the physical start location.
+ *
+ * Otherwise, it's got to be a "item=value" string, separated
+ * by commas if more than one is passed in.
+ */
+
+void
+parse_cmdline_arg(void)
+{
+ int i, c, errflag;
+ char *p;
+ char buf[BUFSIZE];
+ char *arglist[MAXARGS];
+ ulong value;
+ struct machine_specific *ms;
+
+ ms = &ia64_machine_specific;
+
+ if (!strstr(machdep->cmdline_arg, "=")) {
+ errflag = 0;
+ value = htol(machdep->cmdline_arg,
+ RETURN_ON_ERROR|QUIET, &errflag);
+ if (!errflag) {
+ ms->phys_start = value;
+ error(NOTE, "setting phys_start to: 0x%lx\n",
+ ms->phys_start);
+ } else
+ error(WARNING, "ignoring --machdep option: %s\n\n",
+ machdep->cmdline_arg);
+ return;
+ }
+
+ strcpy(buf, machdep->cmdline_arg);
+
+ for (p = buf; *p; p++) {
+ if (*p == ',')
+ *p = ' ';
+ }
+
+ c = parse_line(buf, arglist);
+
+ for (i = 0; i < c; i++) {
+ errflag = 0;
+
+ if (STRNEQ(arglist[i], "phys_start=")) {
+ p = arglist[i] + strlen("phys_start=");
+ if (strlen(p)) {
+ value = htol(p, RETURN_ON_ERROR|QUIET,
+ &errflag);
+ if (!errflag) {
+ ms->phys_start = value;
+ error(NOTE,
+ "setting phys_start to: 0x%lx\n",
+ ms->phys_start);
+ continue;
+ }
+ }
+ } else if (STRNEQ(arglist[i], "init_stack_size=")) {
+ p = arglist[i] + strlen("init_stack_size=");
+ if (strlen(p)) {
+ value = stol(p, RETURN_ON_ERROR|QUIET,
+ &errflag);
+ if (!errflag) {
+ ms->ia64_init_stack_size = (int)value;
+ error(NOTE,
+ "setting init_stack_size to: 0x%x (%d)\n",
+ ms->ia64_init_stack_size,
+ ms->ia64_init_stack_size);
+ continue;
+ }
+ }
+ }
+
+ error(WARNING, "ignoring --machdep option: %s\n", arglist[i]);
+ }
+
+ if (c)
+ fprintf(fp, "\n");
+}
+
+
+int
+ia64_in_init_stack(ulong addr)
+{
+ ulong init_stack_addr;
+
+ if (!symbol_exists("ia64_init_stack"))
+ return FALSE;
+
+ /*
+ * ia64_init_stack could be aliased to region 5
+ */
+ init_stack_addr = ia64_VTOP(symbol_value("ia64_init_stack"));
+ addr = ia64_VTOP(addr);
+ if ((addr < init_stack_addr) ||
+ (addr >= (init_stack_addr+machdep->machspec->ia64_init_stack_size)))
+ return FALSE;
+
+ return TRUE;
+}
+
void
ia64_dump_machdep_table(ulong arg)
{
@@ -309,6 +411,8 @@
fprintf(fp, "%sSYSRQ", others++ ? "|" : "");
if (machdep->flags & DEVMEMRD)
fprintf(fp, "%sDEVMEMRD", others++ ? "|" : "");
+ if (machdep->flags & INIT)
+ fprintf(fp, "%sINIT", others++ ? "|" : "");
fprintf(fp, ")\n");
fprintf(fp, " kvbase: %lx\n", machdep->kvbase);
fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base);
@@ -446,6 +550,9 @@
else
fprintf(fp, "%lx\n", (ulong)ms->unwind_debug);
+ fprintf(fp, " ia64_init_stack_size: %d\n",
+ ms->ia64_init_stack_size);
+
if (verbose)
ia64_display_memmap();
}
@@ -1077,9 +1184,9 @@
{
char buf1[BUFSIZE];
char buf2[BUFSIZE];
- char *colon, *p1;
+ char *colon, *p1, *p2;
int argc;
- int revise, stop_bit;
+ int revise_bracket, stop_bit;
char *argv[MAXARGS];
ulong value;
@@ -1105,25 +1212,24 @@
strcpy(buf1, inbuf);
argc = parse_line(buf1, argv);
- revise = stop_bit = 0;
+ revise_bracket = stop_bit = 0;
if ((FIRSTCHAR(argv[argc-1]) == '<') &&
(LASTCHAR(argv[argc-1]) == '>')) {
- revise = TRUE;
+ revise_bracket = TRUE;
stop_bit = FALSE;
} else if ((FIRSTCHAR(argv[argc-1]) == '<') &&
strstr(argv[argc-1], ">;;")) {
- revise = TRUE;
+ revise_bracket = TRUE;
stop_bit = TRUE;
}
- if (revise) {
+ if (revise_bracket) {
p1 = rindex(inbuf, '<');
- while ((p1 > inbuf) && !STRNEQ(p1, " 0x"))
+ while ((p1 > inbuf) && !STRNEQ(p1, "0x"))
p1--;
- if (!STRNEQ(p1, " 0x"))
+ if (!STRNEQ(p1, "0x"))
return FALSE;
- p1++;
if (!extract_hex(p1, &value, NULLCHAR, TRUE))
return FALSE;
@@ -1133,10 +1239,46 @@
stop_bit ? ";;" : "");
sprintf(p1, "%s", buf1);
- }
- console(" %s", inbuf);
+ } else if (STRNEQ(argv[argc-2], "br.call.") &&
+ STRNEQ(argv[argc-1], "b0=0x")) {
+ /*
+ * Update module function calls of these formats:
+ *
+ * br.call.sptk.many b0=0xa0000000003d5e40;;
+ * br.call.sptk.many b0=0xa00000000001dfc0
+ *
+ * to show a bracketed function name if the destination
+ * address is a known symbol with no offset.
+ */
+ if ((p1 = strstr(argv[argc-1], ";;")) &&
+ (p2 = strstr(inbuf, ";;\n"))) {
+ *p1 = NULLCHAR;
+ p1 = &argv[argc-1][3];
+
+ if (extract_hex(p1, &value, NULLCHAR, TRUE)) {
+ sprintf(buf1, " <%s>;;\n",
+ value_to_symstr(value, buf2,
+ pc->output_radix));
+ if (IS_MODULE_VADDR(value) &&
+ !strstr(buf2, "+"))
+ sprintf(p2, buf1);
+ }
+ } else {
+ p1 = &argv[argc-1][3];
+ p2 = &LASTCHAR(inbuf);
+ if (extract_hex(p1, &value, '\n', TRUE)) {
+ sprintf(buf1, " <%s>\n",
+ value_to_symstr(value, buf2,
+ pc->output_radix));
+ if (IS_MODULE_VADDR(value) &&
+ !strstr(buf2, "+"))
+ sprintf(p2, buf1);
+ }
+ }
+ }
+ console(" %s", inbuf);
return TRUE;
}
@@ -2540,6 +2682,13 @@
"unimpl_pa_mask", FAULT_ON_ERROR);
}
}
+
+ if (symbol_exists("ia64_init_stack") && !ms->ia64_init_stack_size)
+ ms->ia64_init_stack_size = get_array_length("ia64_init_stack",
+ NULL, 0);
+
+ if (DUMPFILE() && ia64_in_init_stack(SWITCH_STACK_ADDR(CURRENT_TASK())))
+ machdep->flags |= INIT;
}
/*
--- crash/x86_64.c.orig 2005-02-09 16:13:25.000000000 -0500
+++ crash/x86_64.c 2005-01-28 15:26:13.000000000 -0500
@@ -64,6 +64,7 @@
static void x86_64_init_kernel_pgd(void);
static void x86_64_cpu_pda_init(void);
static void x86_64_ist_init(void);
+static void x86_64_post_init(void);
struct machine_specific x86_64_machine_specific = { 0 };
@@ -172,6 +173,7 @@
break;
case POST_INIT:
+ x86_64_post_init();
break;
}
}
@@ -482,6 +484,55 @@
}
}
+static void
+x86_64_post_init(void)
+{
+ int c, i, clues;
+ struct machine_specific *ms;
+ ulong *up;
+ struct syment *spt, *spc;
+ ulong offset;
+
+ /*
+ * Check whether each cpu was stopped by an NMI.
+ */
+ ms = machdep->machspec;
+
+ for (c = 0; c < kt->cpus; c++) {
+ if (ms->stkinfo.ebase[c][NMI_STACK] == 0)
+ break;
+
+ if (!readmem(ms->stkinfo.ebase[c][NMI_STACK],
+ KVADDR, ms->irqstack,
+ ms->stkinfo.esize,
+ "NMI exception stack contents",
+ RETURN_ON_ERROR|QUIET))
+ continue;
+
+ for (i = clues = 0; i < (ms->stkinfo.esize)/sizeof(ulong); i++){
+ up = (ulong *)(&ms->irqstack[i*sizeof(ulong)]);
+
+ if (!is_kernel_text(*up) ||
+ !(spt = value_search(*up, &offset)))
+ continue;
+
+ if (STREQ(spt->name, "try_crashdump") ||
+ STREQ(spt->name, "die_nmi"))
+ clues++;
+
+ if ((STREQ(spt->name, "nmi_watchdog_tick") ||
+ STREQ(spt->name, "default_do_nmi"))) {
+ spc = x86_64_function_called_by((*up)-5);
+ if (spc && STREQ(spc->name, "die_nmi"))
+ clues += 2;
+ }
+ }
+
+ if (clues >= 2)
+ kt->cpu_flags[c] |= NMI;
+ }
+}
+
/*
* No x86_64 swapper_pg_dir; initialize the vt->kernel_pgd[NR_CPUS] array
* with the lazily-sync'd init_level4_pgt page address. The level4 page
@@ -1128,7 +1179,8 @@
return BACKTRACE_ENTRY_DISPLAYED;
}
- if (!offset && !(bt->flags & BT_EXCEPTION_FRAME)) {
+ if (!offset && !(bt->flags & BT_EXCEPTION_FRAME) &&
+ !(bt->flags & BT_START)) {
if (STREQ(name, "child_rip")) {
if (symbol_exists("kernel_thread"))
name = "kernel_thread";
@@ -1164,6 +1216,8 @@
if (bt->flags & BT_EXCEPTION_FRAME)
rsp = bt->stkptr;
+ else if (bt->flags & BT_START)
+ rsp = bt->stkptr;
else
rsp = bt->stackbase + (stkindex * sizeof(long));
@@ -1320,6 +1374,10 @@
irq_eframe = 0;
bt->call_target = NULL;
rsp = bt->stkptr;
+ if (!rsp) {
+ error(INFO, "cannot determine starting stack pointer\n");
+ return;
+ }
ms = machdep->machspec;
if (BT_REFERENCE_CHECK(bt))
ofp = pc->nullfp;
@@ -1333,7 +1391,12 @@
STREQ(closest_symbol(bt->instptr), "thread_return") ?
" (schedule)" : "",
bt->instptr);
- }
+ } else if (bt->flags & BT_START) {
+ x86_64_print_stack_entry(bt, ofp, level,
+ 0, bt->instptr);
+ bt->flags &= ~BT_START;
+ level++;
+ }
if ((estack = x86_64_in_exception_stack(bt))) {
@@ -1403,7 +1466,8 @@
bt->stackbuf + (bt->stacktop - bt->stackbase) -
SIZE(pt_regs), bt, ofp);
- fprintf(fp, "--- <exception stack> ---\n");
+ if (!BT_REFERENCE_CHECK(bt))
+ fprintf(fp, "--- <exception stack> ---\n");
/*
* stack = (unsigned long *) estack_end[-2];
@@ -1411,13 +1475,24 @@
up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]);
up -= 2;
rsp = bt->stkptr = *up;
- up -= 5;
+ up -= 3;
bt->instptr = *up;
if (cs & 3)
done = TRUE; /* user-mode exception */
else
done = FALSE; /* kernel-mode exception */
bt->frameptr = 0;
+
+ /*
+ * Print the return values from the estack end.
+ */
+ if (!done) {
+ bt->flags |= BT_START;
+ x86_64_print_stack_entry(bt, ofp, level,
+ 0, bt->instptr);
+ bt->flags &= ~BT_START;
+ level++;
+ }
}
/*
@@ -1466,7 +1541,8 @@
}
}
- fprintf(fp, "--- <IRQ stack> ---\n");
+ if (!BT_REFERENCE_CHECK(bt))
+ fprintf(fp, "--- <IRQ stack> ---\n");
/*
* stack = (unsigned long *) (irqstack_end[-1]);
@@ -1950,6 +2026,7 @@
int panic_task;
int i, panic, stage;
char *sym;
+ struct syment *sp;
ulong *up;
struct bt_info bt_local, *bt;
struct machine_specific *ms;
@@ -1999,12 +2076,27 @@
if (STREQ(sym, "netconsole_netdump") ||
STREQ(sym, "netpoll_start_netdump") ||
STREQ(sym, "start_disk_dump") ||
- STREQ(sym, "disk_dump")) {
+ STREQ(sym, "disk_dump") ||
+ STREQ(sym, "try_crashdump")) {
*rip = *up;
*rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
return;
}
+ if ((stage == 2) &&
+ (STREQ(sym, "nmi_watchdog_tick") ||
+ STREQ(sym, "default_do_nmi"))) {
+ sp = x86_64_function_called_by((*up)-5);
+ if (!sp || !STREQ(sp->name, "die_nmi"))
+ continue;
+ *rip = *up;
+ *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
+ bt_in->flags |= BT_START;
+ *rip = symbol_value("die_nmi");
+ *rsp = (*rsp) - (7*sizeof(ulong));
+ return;
+ }
+
if (STREQ(sym, "panic")) {
*rip = *up;
*rsp = bt->stackbase + ((char *)(up) - bt->stackbuf);
@@ -2062,7 +2154,19 @@
stage = 1;
goto next_stack;
+ /*
+ * Check the NMI exception stack.
+ */
case 1:
+ bt->stackbase = ms->stkinfo.ebase[bt->tc->processor][NMI_STACK];
+ bt->stacktop = ms->stkinfo.ebase[bt->tc->processor][NMI_STACK] +
+ ms->stkinfo.esize;
+ bt->stackbuf = ms->irqstack;
+ alter_stackbuf(bt);
+ stage = 2;
+ goto next_stack;
+
+ case 2:
break;
}
--- crash/unwind.c.orig 2005-02-09 16:13:26.000000000 -0500
+++ crash/unwind.c 2005-02-07 16:02:44.000000000 -0500
@@ -31,6 +31,10 @@
* unwind_v[123].o object files.
*/
+/*
+ * 2004-09-14 J. Nomura Added OS_INIT handling
+ */
+
/* #include <asm/ptrace.h> can't include this -- it's changing over time! */
#include "defs.h"
@@ -56,6 +60,13 @@
static void dump_unwind_table(struct unw_table *);
static int unw_init_from_blocked_task(struct unw_frame_info *,
struct bt_info *);
+static void unw_init_from_interruption(struct unw_frame_info *,
+ struct bt_info *, ulong, ulong);
+static int unw_switch_from_osinit_v1(struct unw_frame_info *,
+ struct bt_info *);
+static int unw_switch_from_osinit_v2(struct unw_frame_info *,
+ struct bt_info *);
+static unsigned long get_init_stack_ulong(unsigned long addr);
static void unw_init_frame_info(struct unw_frame_info *,
struct bt_info *, ulong);
static int find_save_locs(struct unw_frame_info *);
@@ -406,8 +417,8 @@
else
*nat_addr &= ~nat_mask;
} else {
- if ((GET_STACK_ULONG(nat_addr) & nat_mask) == 0) {
- *val = GET_STACK_ULONG(addr);
+ if ((IA64_GET_STACK_ULONG(nat_addr) & nat_mask) == 0) {
+ *val = IA64_GET_STACK_ULONG(addr);
*nat = 0;
} else {
*val = 0; /* if register is a NaT, *addr may contain kernel data! */
@@ -457,7 +468,7 @@
if (write)
*addr = *val;
else
- *val = GET_STACK_ULONG(addr);
+ *val = IA64_GET_STACK_ULONG(addr);
return 0;
}
@@ -695,7 +706,7 @@
*info->cfm_loc =
(*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
else
- *val = (GET_STACK_ULONG(info->cfm_loc) >> 52) & 0x3f;
+ *val = (IA64_GET_STACK_ULONG(info->cfm_loc) >> 52) & 0x3f;
return 0;
case UNW_AR_FPSR:
@@ -739,7 +750,7 @@
if (write)
*addr = *val;
else
- *val = GET_STACK_ULONG(addr);
+ *val = IA64_GET_STACK_ULONG(addr);
return 0;
}
@@ -764,7 +775,7 @@
if (write)
*addr = *val;
else
- *val = GET_STACK_ULONG(addr);
+ *val = IA64_GET_STACK_ULONG(addr);
return 0;
}
@@ -1644,6 +1655,7 @@
frame = 0;
do {
+restart:
unw_get_ip(info, &ip);
unw_get_sp(info, &sp);
unw_get_bsp(info, &bsp);
@@ -1695,6 +1707,26 @@
STREQ(name, "start_kernel_thread"))
break;
+ /*
+ * "init_handler_platform" indicates that this task was
+ * interrupted by INIT and its stack was switched.
+ */
+ if (STREQ(name, "init_handler_platform")) {
+ unw_switch_from_osinit_v1(info, bt);
+ frame++;
+ goto restart;
+ }
+
+ /*
+ * In some cases, init_handler_platform is inlined into
+ * ia64_init_handler.
+ */
+ if (STREQ(name, "ia64_init_handler")) {
+ unw_switch_from_osinit_v2(info, bt);
+ frame++;
+ goto restart;
+ }
+
frame++;
} while (unw_unwind(info) >= 0);
@@ -1797,6 +1829,16 @@
dump_struct("unw_table", (ulong)table, RADIX(16));
}
+static unsigned long
+get_init_stack_ulong(unsigned long addr)
+{
+ unsigned long tmp;
+
+ readmem(addr, KVADDR, &tmp, sizeof(unsigned long),
+ "get_init_stack_ulong", FAULT_ON_ERROR);
+
+ return tmp;
+}
static int
unw_init_from_blocked_task(struct unw_frame_info *info, struct bt_info *bt)
@@ -1804,13 +1846,129 @@
ulong sw;
sw = SWITCH_STACK_ADDR(bt->task);
- if (!INSTACK(sw, bt))
+ if (!INSTACK(sw, bt) && !ia64_in_init_stack(sw))
return FALSE;
unw_init_frame_info(info, bt, sw);
return TRUE;
}
+/*
+ * unw_init_from_interruption
+ * Initialize frame info from specified pt_regs/switch_stack.
+ *
+ * Similar to unw_init_frame_info() except that:
+ * - do not use readmem to access stack
+ * (because stack may be modified by unw_init_from_saved_regs)
+ * - use ar.ifs and ar.iip instead of ar.pfs and b0, respectively
+ * - use sof(size-of-frame) of ar.ifs to caluculate bsp,
+ * instead of sol(size-of-local) of ar.pfs
+ * (because of cover instruction in kernel minstate save macro)
+ */
+static void
+unw_init_from_interruption(struct unw_frame_info *info, struct bt_info *bt, ulong pt, ulong sw)
+{
+// unsigned long rbslimit, rbstop, stklimit, stktop, sof, ar_pfs;
+ unsigned long rbslimit, rbstop, stklimit, stktop, sof;
+ ulong t;
+
+ t = bt->task;
+
+ memset(info, 0, sizeof(*info));
+
+ rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
+ rbstop = IA64_GET_STACK_ULONG(sw + OFFSET(switch_stack_ar_bspstore));
+ if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
+ rbstop = rbslimit;
+
+ stklimit = (unsigned long) t + IA64_STK_OFFSET;
+ stktop = IA64_GET_STACK_ULONG(pt + offsetof(struct pt_regs, r12));
+ if (stktop <= rbstop)
+ stktop = rbstop;
+
+ info->regstk.limit = rbslimit;
+ info->regstk.top = rbstop;
+ info->memstk.limit = stklimit;
+ info->memstk.top = stktop;
+ info->task = (struct task_struct *)bt;
+ info->sw = (struct switch_stack *)sw;
+ info->sp = info->psp = stktop;
+ info->pr = IA64_GET_STACK_ULONG(sw + OFFSET(switch_stack_pr));
+
+ info->cfm_loc = (unsigned long *) (pt + offsetof(struct pt_regs, cr_ifs));
+ info->unat_loc = (unsigned long *) (pt + offsetof(struct pt_regs, ar_unat));
+ info->pfs_loc = (unsigned long *) (pt + offsetof(struct pt_regs, ar_pfs));
+ /* register stack is covered */
+ sof = IA64_GET_STACK_ULONG(info->cfm_loc) & 0x7f;
+ info->bsp = (unsigned long)
+ ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
+ /* interrupted ip is saved in iip */
+ info->ip = IA64_GET_STACK_ULONG(pt + offsetof(struct pt_regs, cr_iip));
+#if defined(UNWIND_V2) || defined(UNWIND_V3)
+ info->pt = pt;
+#endif
+
+ find_save_locs(info);
+}
+
+/*
+ * unw_switch_from_osinit
+ * switch back to interrupted context
+ *
+ * assumption: init_handler_platform() has 3 arguments,
+ * 2nd arg is pt_regs and 3rd arg is switch_stack.
+ */
+static int
+unw_switch_from_osinit_v1(struct unw_frame_info *info, struct bt_info *bt)
+{
+ unsigned long pt, sw;
+ char is_nat;
+
+ /* pt_regs is the 2nd argument of init_handler_platform */
+ if (unw_get_gr(info, 33, &pt, &is_nat)) {
+ fprintf(fp, "gr 33 get error\n");
+ return FALSE;
+ }
+ /* switch_stack is the 3rd argument of init_handler_platform */
+ if (unw_get_gr(info, 34, &sw, &is_nat)) {
+ fprintf(fp, "gr 33 get error\n");
+ return FALSE;
+ }
+
+ unw_init_from_interruption(info, bt, pt, sw);
+ ia64_exception_frame(pt, bt);
+
+ return TRUE;
+}
+
+static int
+unw_switch_from_osinit_v2(struct unw_frame_info *info, struct bt_info *bt)
+{
+ unsigned long pt, sw;
+ char is_nat;
+
+ /* pt_regs is the 1st argument of ia64_init_handler */
+ if (unw_get_gr(info, 32, &pt, &is_nat)) {
+ fprintf(fp, "gr 32 get error\n");
+
+ return FALSE;
+ }
+ /* switch_stack is the 2nd argument of ia64_init_handler */
+ if (unw_get_gr(info, 33, &sw, &is_nat)) {
+ fprintf(fp, "gr 33 get error\n");
+ return FALSE;
+ }
+
+ /* Fix me! */
+ sw = info->psp + 16;
+ pt = sw + STRUCT_SIZE("switch_stack");
+
+ unw_init_from_interruption(info, bt, pt, sw);
+ ia64_exception_frame(pt, bt);
+
+ return TRUE;
+}
+
static void
unw_init_frame_info (struct unw_frame_info *info, struct bt_info *bt, ulong sw)
{
@@ -1851,12 +2009,12 @@
info->sw = (struct switch_stack *)sw;
info->sp = info->psp = (unsigned long) (sw + SIZE(switch_stack)) - 16;
info->cfm_loc = (ulong *)(sw + OFFSET(switch_stack_ar_pfs));
- ar_pfs = GET_STACK_ULONG(info->cfm_loc);
+ ar_pfs = IA64_GET_STACK_ULONG(info->cfm_loc);
sol = (ar_pfs >> 7) & 0x7f;
info->bsp = (unsigned long)
ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
- info->ip = GET_STACK_ULONG(sw + OFFSET(switch_stack_b0));
- info->pr = GET_STACK_ULONG(sw + OFFSET(switch_stack_pr));
+ info->ip = IA64_GET_STACK_ULONG(sw + OFFSET(switch_stack_b0));
+ info->pr = IA64_GET_STACK_ULONG(sw + OFFSET(switch_stack_pr));
find_save_locs(info);
}
@@ -1973,7 +2131,7 @@
info->ip);
return -1;
}
- ip = info->ip = GET_STACK_ULONG(info->rp_loc);
+ ip = info->ip = IA64_GET_STACK_ULONG(info->rp_loc);
if (ip < GATE_ADDR + PAGE_SIZE) {
/*
* We don't have unwind info for the gate page,
@@ -1997,26 +2155,26 @@
if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
#ifdef UNWIND_V1
if ((pr & (1UL << pNonSys)) != 0)
- num_regs = GET_STACK_ULONG(info->cfm_loc) & 0x7f; /* size of frame */
+ num_regs = IA64_GET_STACK_ULONG(info->cfm_loc) & 0x7f; /* size of frame */
info->pfs_loc =
(unsigned long *) (info->sp + 16 + struct_offset(struct pt_regs, ar_pfs));
#endif
#ifdef UNWIND_V2
info->pt = info->sp + 16;
if ((pr & (1UL << pNonSys)) != 0)
- num_regs = GET_STACK_ULONG(info->cfm_loc) & 0x7f; /* size of frame */
+ num_regs = IA64_GET_STACK_ULONG(info->cfm_loc) & 0x7f; /* size of frame */
info->pfs_loc =
(unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
#endif
#ifdef UNWIND_V3
info->pt = info->sp + 16;
if ((pr & (1UL << pNonSys)) != 0)
- num_regs = GET_STACK_ULONG(info->cfm_loc) & 0x7f; /* size of frame */
+ num_regs = IA64_GET_STACK_ULONG(info->cfm_loc) & 0x7f; /* size of frame */
info->pfs_loc =
(unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
#endif
} else
- num_regs = (GET_STACK_ULONG(info->cfm_loc) >> 7) & 0x7f; /* size of locals */
+ num_regs = (IA64_GET_STACK_ULONG(info->cfm_loc) >> 7) & 0x7f; /* size of locals */
info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
error(INFO, "unwind: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
@@ -2026,7 +2184,8 @@
/* restore the sp: */
info->sp = info->psp;
- if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
+ if ((info->sp < info->memstk.top || info->sp > info->memstk.limit)
+ && !ia64_in_init_stack(info->sp)) {
error(INFO, "unwind: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
info->sp, info->memstk.top, info->memstk.limit);
return -1;
@@ -2135,7 +2294,7 @@
break;
}
#endif
- s[dst] = GET_STACK_ULONG(s[val]);
+ s[dst] = IA64_GET_STACK_ULONG(s[val]);
break;
}
}
--- crash/defs.h.orig 2005-02-09 16:13:26.000000000 -0500
+++ crash/defs.h 2005-01-28 15:26:12.000000000 -0500
@@ -62,7 +62,7 @@
#include <sys/wait.h>
#include <sys/time.h>
-#define BASELEVEL_REVISION "3.8"
+#define BASELEVEL_REVISION "3.10"
#undef TRUE
#undef FALSE
@@ -446,6 +446,8 @@
long __rq_idx[NR_CPUS];
long __cpu_idx[NR_CPUS];
long __per_cpu_offset[NR_CPUS];
+ long cpu_flags[NR_CPUS];
+#define NMI 0x1
};
/*
@@ -674,6 +676,7 @@
#define FRAMESIZE_DEBUG (0x10000000)
#define MACHDEP_BT_TEXT (0x8000000)
#define DEVMEMRD (0x4000000)
+#define INIT (0x2000000)
#define SYSRQ_TASK(X) ((machdep->flags & SYSRQ) && is_task_active(X))
extern struct machdep_table *machdep;
@@ -2030,6 +2033,9 @@
#define UNKNOWN_PHYS_START ((ulong)(-1))
#define DEFAULT_PHYS_START (KERNEL_TR_PAGE_SIZE * 1)
+#define IA64_GET_STACK_ULONG(OFF) \
+ ((INSTACK(OFF,bt)) ? (GET_STACK_ULONG(OFF)) : get_init_stack_ulong((unsigned long)OFF))
+
#endif /* IA64 */
#ifdef PPC64
@@ -3106,6 +3112,7 @@
#define BT_IRQSTACK (0x200000000ULL)
#define BT_DUMPFILE_SEARCH (0x400000000ULL)
#define BT_EFRAME_SEARCH2 (0x800000000ULL)
+#define BT_START (0x1000000000ULL)
#define BT_REF_HEXVAL (0x1)
#define BT_REF_SYMBOL (0x2)
@@ -3229,6 +3236,8 @@
int isize;
};
+#define NMI_STACK 2 /* ebase[] offset to NMI exception stack */
+
struct machine_specific {
char *pml4;
char *irqstack;
@@ -3318,6 +3327,7 @@
int ia64_IS_VMALLOC_ADDR(ulong);
#define display_idt_table() \
error(FATAL, "-d option TBD on ia64 architecture\n");
+int ia64_in_init_stack(ulong addr);
#define OLD_UNWIND (0x1) /* CONFIG_IA64_NEW_UNWIND not turned on */
#define NEW_UNWIND (0x2) /* CONFIG_IA64_NEW_UNWIND turned on */
@@ -3359,8 +3369,10 @@
void (*unwind)(struct bt_info *);
void (*dump_unwind_stats)(void);
int (*unwind_debug)(ulong);
+ int ia64_init_stack_size;
};
+
/*
* unwind.c
*/
--- crash/Makefile.orig 2005-02-09 16:13:26.000000000 -0500
+++ crash/Makefile 2005-02-09 16:13:25.000000000 -0500
@@ -153,10 +153,10 @@
GDB_6.1_FILES=${GDB}/gdb/Makefile.in ${GDB}/Makefile.in \
${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \
${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \
- ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c
+ ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c ${GDB}/gdb/dwarf2read.c
GDB_6.1_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o \
${GDB}/gdb/target.o ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o \
- ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o
+ ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o ${GDB}/gdb/dwarf2read.o
#
# GDB_FLAGS is passed up from the gdb Makefile.
--- crash/crash.8.orig 2005-02-09 16:13:26.000000000 -0500
+++ crash/crash.8 2005-01-31 10:55:43.000000000 -0500
@@ -104,9 +104,8 @@
.TP
.BI \-d \ num
.B Crash
-sets its internal debug level
-.I num
-. The higher the number, the more debugging data will be printed while
+sets its internal debug level.
+The higher the number, the more debugging data will be printed while
.B crash
runs.
.TP